From e76b5fa6896c09257181675bbf4cf47789d32927 Mon Sep 17 00:00:00 2001 From: Travis Oliphant Date: Sat, 15 Dec 2007 18:54:52 +0000 Subject: Create a branch for io work in NumPy --- numpy/__init__.py | 111 - numpy/_import_tools.py | 357 - numpy/add_newdocs.py | 1444 - numpy/core/__init__.py | 38 - numpy/core/_internal.py | 289 - numpy/core/arrayprint.py | 452 - numpy/core/blasdot/_dotblas.c | 1113 - numpy/core/blasdot/cblas.h | 578 - numpy/core/code_generators/array_api_order.txt | 85 - numpy/core/code_generators/genapi.py | 295 - numpy/core/code_generators/generate_array_api.py | 208 - numpy/core/code_generators/generate_ufunc_api.py | 125 - numpy/core/code_generators/generate_umath.py | 679 - .../core/code_generators/multiarray_api_order.txt | 83 - numpy/core/code_generators/ufunc_api_order.txt | 30 - numpy/core/defchararray.py | 340 - numpy/core/defmatrix.py | 494 - numpy/core/fromnumeric.py | 1519 - numpy/core/include/numpy/arrayobject.h | 21 - numpy/core/include/numpy/arrayscalars.h | 152 - numpy/core/include/numpy/fenv/fenv.c | 38 - numpy/core/include/numpy/fenv/fenv.h | 224 - numpy/core/include/numpy/ndarrayobject.h | 1998 - numpy/core/include/numpy/noprefix.h | 191 - numpy/core/include/numpy/npy_interrupt.h | 117 - numpy/core/include/numpy/old_defines.h | 169 - numpy/core/include/numpy/oldnumeric.h | 23 - numpy/core/include/numpy/ufuncobject.h | 379 - numpy/core/info.py | 86 - numpy/core/ma.py | 2255 -- numpy/core/memmap.py | 103 - numpy/core/numeric.py | 1051 - numpy/core/numerictypes.py | 488 - numpy/core/records.py | 588 - numpy/core/setup.py | 347 - numpy/core/src/_isnan.c | 46 - numpy/core/src/_signbit.c | 32 - numpy/core/src/_sortmodule.c.src | 490 - numpy/core/src/arraymethods.c | 1942 - numpy/core/src/arrayobject.c | 12081 ------- numpy/core/src/arraytypes.inc.src | 2545 -- numpy/core/src/multiarraymodule.c | 7666 ---- numpy/core/src/scalarmathmodule.c.src | 1249 - numpy/core/src/scalartypes.inc.src | 2779 -- numpy/core/src/ucsnarrow.c | 108 - numpy/core/src/ufuncobject.c | 3893 -- numpy/core/src/umathmodule.c.src | 2334 -- numpy/core/tests/test_defmatrix.py | 184 - numpy/core/tests/test_errstate.py | 62 - numpy/core/tests/test_ma.py | 873 - numpy/core/tests/test_multiarray.py | 551 - numpy/core/tests/test_numeric.py | 734 - numpy/core/tests/test_numerictypes.py | 342 - numpy/core/tests/test_records.py | 114 - numpy/core/tests/test_regression.py | 773 - numpy/core/tests/test_scalarmath.py | 66 - numpy/core/tests/test_ufunc.py | 16 - numpy/core/tests/test_umath.py | 208 - numpy/core/tests/test_unicode.py | 304 - numpy/core/tests/testdata.fits | Bin 8640 -> 0 bytes numpy/ctypeslib.py | 165 - numpy/distutils/__init__.py | 20 - numpy/distutils/__version__.py | 4 - numpy/distutils/ccompiler.py | 403 - numpy/distutils/command/__init__.py | 31 - numpy/distutils/command/bdist_rpm.py | 22 - numpy/distutils/command/build.py | 34 - numpy/distutils/command/build_clib.py | 259 - numpy/distutils/command/build_ext.py | 474 - numpy/distutils/command/build_py.py | 25 - numpy/distutils/command/build_scripts.py | 49 - numpy/distutils/command/build_src.py | 716 - numpy/distutils/command/config.py | 158 - numpy/distutils/command/config_compiler.py | 123 - numpy/distutils/command/develop.py | 15 - numpy/distutils/command/egg_info.py | 9 - numpy/distutils/command/install.py | 36 - numpy/distutils/command/install_data.py | 13 - numpy/distutils/command/install_headers.py | 25 - numpy/distutils/command/sdist.py | 27 - numpy/distutils/conv_template.py | 202 - numpy/distutils/core.py | 219 - numpy/distutils/cpuinfo.py | 681 - numpy/distutils/environment.py | 70 - numpy/distutils/exec_command.py | 641 - numpy/distutils/extension.py | 74 - numpy/distutils/fcompiler/__init__.py | 952 - numpy/distutils/fcompiler/absoft.py | 157 - numpy/distutils/fcompiler/compaq.py | 115 - numpy/distutils/fcompiler/g95.py | 44 - numpy/distutils/fcompiler/gnu.py | 387 - numpy/distutils/fcompiler/hpux.py | 40 - numpy/distutils/fcompiler/ibm.py | 95 - numpy/distutils/fcompiler/intel.py | 248 - numpy/distutils/fcompiler/lahey.py | 47 - numpy/distutils/fcompiler/mips.py | 56 - numpy/distutils/fcompiler/nag.py | 43 - numpy/distutils/fcompiler/none.py | 30 - numpy/distutils/fcompiler/pg.py | 41 - numpy/distutils/fcompiler/sun.py | 50 - numpy/distutils/fcompiler/vast.py | 54 - numpy/distutils/from_template.py | 256 - numpy/distutils/info.py | 5 - numpy/distutils/intelccompiler.py | 29 - numpy/distutils/interactive.py | 187 - numpy/distutils/lib2def.py | 113 - numpy/distutils/line_endings.py | 74 - numpy/distutils/log.py | 73 - numpy/distutils/mingw32ccompiler.py | 227 - numpy/distutils/misc_util.py | 1530 - numpy/distutils/setup.py | 15 - numpy/distutils/system_info.py | 1947 - numpy/distutils/tests/f2py_ext/__init__.py | 0 numpy/distutils/tests/f2py_ext/setup.py | 11 - numpy/distutils/tests/f2py_ext/src/fib1.f | 18 - numpy/distutils/tests/f2py_ext/src/fib2.pyf | 9 - numpy/distutils/tests/f2py_ext/tests/test_fib2.py | 13 - numpy/distutils/tests/f2py_f90_ext/__init__.py | 0 .../distutils/tests/f2py_f90_ext/include/body.f90 | 5 - numpy/distutils/tests/f2py_f90_ext/setup.py | 16 - .../distutils/tests/f2py_f90_ext/src/foo_free.f90 | 6 - .../distutils/tests/f2py_f90_ext/tests/test_foo.py | 13 - numpy/distutils/tests/gen_ext/__init__.py | 0 numpy/distutils/tests/gen_ext/setup.py | 47 - numpy/distutils/tests/gen_ext/tests/test_fib3.py | 13 - numpy/distutils/tests/pyrex_ext/__init__.py | 0 numpy/distutils/tests/pyrex_ext/primes.pyx | 22 - numpy/distutils/tests/pyrex_ext/setup.py | 12 - .../distutils/tests/pyrex_ext/tests/test_primes.py | 13 - numpy/distutils/tests/setup.py | 14 - numpy/distutils/tests/swig_ext/__init__.py | 0 numpy/distutils/tests/swig_ext/setup.py | 18 - numpy/distutils/tests/swig_ext/src/example.c | 14 - numpy/distutils/tests/swig_ext/src/example.i | 14 - numpy/distutils/tests/swig_ext/src/zoo.cc | 23 - numpy/distutils/tests/swig_ext/src/zoo.h | 9 - numpy/distutils/tests/swig_ext/src/zoo.i | 10 - .../distutils/tests/swig_ext/tests/test_example.py | 18 - .../tests/swig_ext/tests/test_example2.py | 17 - numpy/distutils/tests/test_fcompiler_gnu.py | 52 - numpy/distutils/tests/test_misc_util.py | 60 - numpy/distutils/unixccompiler.py | 69 - numpy/doc/CAPI.txt | 313 - numpy/doc/DISTUTILS.txt | 569 - numpy/doc/HOWTO_DOCUMENT.txt | 216 - numpy/doc/README.txt | 15 - numpy/doc/example.py | 98 - numpy/doc/html/api-objects.txt | 4 - numpy/doc/html/crarr.png | Bin 340 -> 0 bytes numpy/doc/html/epydoc.css | 315 - numpy/doc/html/epydoc.js | 280 - numpy/doc/html/example-module.html | 316 - numpy/doc/html/example-pysrc.html | 208 - numpy/doc/html/frames.html | 17 - numpy/doc/html/help.html | 268 - numpy/doc/html/identifier-index.html | 180 - numpy/doc/html/index.html | 17 - numpy/doc/html/module-tree.html | 101 - numpy/doc/html/redirect.html | 38 - numpy/doc/html/toc-everything.html | 34 - numpy/doc/html/toc-example-module.html | 34 - numpy/doc/html/toc.html | 34 - numpy/doc/pep_buffer.txt | 869 - numpy/doc/pyrex/MANIFEST | 2 - numpy/doc/pyrex/Makefile | 9 - numpy/doc/pyrex/c_numpy.pxd | 125 - numpy/doc/pyrex/c_python.pxd | 20 - numpy/doc/pyrex/notes | 3 - numpy/doc/pyrex/numpyx.c | 1037 - numpy/doc/pyrex/numpyx.pyx | 97 - numpy/doc/pyrex/run_test.py | 3 - numpy/doc/pyrex/setup.py | 42 - numpy/doc/records.txt | 87 - numpy/doc/swig/Makefile | 36 - numpy/doc/swig/README | 130 - numpy/doc/swig/doc/Makefile | 51 - numpy/doc/swig/doc/numpy_swig.html | 1244 - numpy/doc/swig/doc/numpy_swig.pdf | Bin 168839 -> 0 bytes numpy/doc/swig/doc/numpy_swig.txt | 950 - numpy/doc/swig/doc/testing.html | 482 - numpy/doc/swig/doc/testing.pdf | Bin 72439 -> 0 bytes numpy/doc/swig/doc/testing.txt | 173 - numpy/doc/swig/numpy.i | 1581 - numpy/doc/swig/pyfragments.swg | 174 - numpy/doc/swig/test/Array.i | 107 - numpy/doc/swig/test/Array1.cxx | 131 - numpy/doc/swig/test/Array1.h | 55 - numpy/doc/swig/test/Array2.cxx | 168 - numpy/doc/swig/test/Array2.h | 63 - numpy/doc/swig/test/Farray.cxx | 122 - numpy/doc/swig/test/Farray.h | 56 - numpy/doc/swig/test/Farray.i | 73 - numpy/doc/swig/test/Makefile | 32 - numpy/doc/swig/test/Matrix.cxx | 112 - numpy/doc/swig/test/Matrix.h | 52 - numpy/doc/swig/test/Matrix.i | 45 - numpy/doc/swig/test/Tensor.cxx | 131 - numpy/doc/swig/test/Tensor.h | 52 - numpy/doc/swig/test/Tensor.i | 49 - numpy/doc/swig/test/Vector.cxx | 100 - numpy/doc/swig/test/Vector.h | 58 - numpy/doc/swig/test/Vector.i | 47 - numpy/doc/swig/test/setup.py | 58 - numpy/doc/swig/test/testArray.py | 285 - numpy/doc/swig/test/testFarray.py | 158 - numpy/doc/swig/test/testMatrix.py | 365 - numpy/doc/swig/test/testTensor.py | 405 - numpy/doc/swig/test/testVector.py | 384 - numpy/doc/ufuncs.txt | 103 - numpy/dual.py | 57 - numpy/f2py/BUGS.txt | 55 - numpy/f2py/Makefile | 173 - numpy/f2py/NEWS.txt | 2 - numpy/f2py/README.txt | 5 - numpy/f2py/TODO.txt | 67 - numpy/f2py/__init__.py | 42 - numpy/f2py/__version__.py | 8 - numpy/f2py/auxfuncs.py | 664 - numpy/f2py/capi_maps.py | 755 - numpy/f2py/cb_rules.py | 541 - numpy/f2py/cfuncs.py | 1156 - numpy/f2py/common_rules.py | 134 - numpy/f2py/crackfortran.py | 2750 -- numpy/f2py/diagnose.py | 166 - numpy/f2py/doc/Makefile | 76 - numpy/f2py/doc/Release-1.x.txt | 27 - numpy/f2py/doc/Release-2.x.txt | 77 - numpy/f2py/doc/Release-3.x.txt | 87 - numpy/f2py/doc/Release-4.x.txt | 91 - numpy/f2py/doc/apps.tex | 71 - numpy/f2py/doc/bugs.tex | 109 - numpy/f2py/doc/collectinput.py | 78 - numpy/f2py/doc/commands.tex | 20 - numpy/f2py/doc/ex1/arr.f | 4 - numpy/f2py/doc/ex1/bar.f | 4 - numpy/f2py/doc/ex1/foo.f | 5 - numpy/f2py/doc/ex1/foobar-smart.f90 | 24 - numpy/f2py/doc/ex1/foobar.f90 | 16 - numpy/f2py/doc/ex1/foobarmodule.tex | 36 - numpy/f2py/doc/ex1/runme | 18 - numpy/f2py/doc/f2py2e.tex | 50 - numpy/f2py/doc/f2python9-final/README.txt | 38 - numpy/f2py/doc/f2python9-final/aerostructure.jpg | Bin 72247 -> 0 bytes numpy/f2py/doc/f2python9-final/flow.jpg | Bin 13266 -> 0 bytes numpy/f2py/doc/f2python9-final/mk_html.sh | 13 - numpy/f2py/doc/f2python9-final/mk_pdf.sh | 13 - numpy/f2py/doc/f2python9-final/mk_ps.sh | 14 - numpy/f2py/doc/f2python9-final/src/examples/exp1.f | 26 - .../doc/f2python9-final/src/examples/exp1mess.txt | 17 - .../f2python9-final/src/examples/exp1session.txt | 20 - .../f2py/doc/f2python9-final/src/examples/foo.pyf | 13 - .../f2py/doc/f2python9-final/src/examples/foom.pyf | 14 - numpy/f2py/doc/f2python9-final/structure.jpg | Bin 17860 -> 0 bytes numpy/f2py/doc/fortranobject.tex | 574 - numpy/f2py/doc/index.html | 264 - numpy/f2py/doc/intro.tex | 158 - numpy/f2py/doc/multiarray/array_from_pyobj.c | 323 - numpy/f2py/doc/multiarray/bar.c | 15 - numpy/f2py/doc/multiarray/foo.f | 13 - .../doc/multiarray/fortran_array_from_pyobj.txt | 284 - numpy/f2py/doc/multiarray/fun.pyf | 89 - numpy/f2py/doc/multiarray/run.pyf | 91 - numpy/f2py/doc/multiarray/transpose.txt | 1127 - numpy/f2py/doc/multiarrays.txt | 120 - numpy/f2py/doc/notes.tex | 310 - numpy/f2py/doc/oldnews.html | 121 - numpy/f2py/doc/options.tex | 63 - numpy/f2py/doc/python9.tex | 1046 - numpy/f2py/doc/signaturefile.tex | 368 - numpy/f2py/doc/using_F_compiler.txt | 147 - numpy/f2py/doc/win32_notes.txt | 85 - numpy/f2py/docs/FAQ.txt | 615 - numpy/f2py/docs/HISTORY.txt | 1044 - numpy/f2py/docs/OLDNEWS.txt | 63 - numpy/f2py/docs/README.txt | 461 - numpy/f2py/docs/TESTING.txt | 108 - numpy/f2py/docs/THANKS.txt | 63 - numpy/f2py/docs/default.css | 180 - numpy/f2py/docs/docutils.conf | 16 - numpy/f2py/docs/hello.f | 7 - numpy/f2py/docs/pyforttest.pyf | 5 - numpy/f2py/docs/pytest.py | 10 - numpy/f2py/docs/simple.f | 13 - numpy/f2py/docs/simple_session.dat | 51 - numpy/f2py/docs/usersguide/allocarr.f90 | 16 - numpy/f2py/docs/usersguide/allocarr_session.dat | 27 - numpy/f2py/docs/usersguide/array.f | 17 - numpy/f2py/docs/usersguide/array_session.dat | 65 - numpy/f2py/docs/usersguide/calculate.f | 14 - numpy/f2py/docs/usersguide/calculate_session.dat | 6 - numpy/f2py/docs/usersguide/callback.f | 12 - numpy/f2py/docs/usersguide/callback2.pyf | 19 - numpy/f2py/docs/usersguide/callback_session.dat | 23 - numpy/f2py/docs/usersguide/common.f | 13 - numpy/f2py/docs/usersguide/common_session.dat | 27 - numpy/f2py/docs/usersguide/compile_session.dat | 11 - numpy/f2py/docs/usersguide/default.css | 180 - numpy/f2py/docs/usersguide/docutils.conf | 16 - numpy/f2py/docs/usersguide/extcallback.f | 14 - numpy/f2py/docs/usersguide/extcallback_session.dat | 19 - numpy/f2py/docs/usersguide/fib1.f | 18 - numpy/f2py/docs/usersguide/fib1.pyf | 12 - numpy/f2py/docs/usersguide/fib2.pyf | 9 - numpy/f2py/docs/usersguide/fib3.f | 21 - numpy/f2py/docs/usersguide/ftype.f | 9 - numpy/f2py/docs/usersguide/ftype_session.dat | 21 - numpy/f2py/docs/usersguide/index.txt | 1772 - numpy/f2py/docs/usersguide/moddata.f90 | 18 - numpy/f2py/docs/usersguide/moddata_session.dat | 23 - numpy/f2py/docs/usersguide/run_main_session.dat | 14 - numpy/f2py/docs/usersguide/scalar.f | 12 - numpy/f2py/docs/usersguide/scalar_session.dat | 21 - numpy/f2py/docs/usersguide/setup_example.py | 19 - numpy/f2py/docs/usersguide/spam.pyf | 19 - numpy/f2py/docs/usersguide/spam_session.dat | 5 - numpy/f2py/docs/usersguide/string.f | 21 - numpy/f2py/docs/usersguide/string_session.dat | 27 - numpy/f2py/docs/usersguide/var.pyf | 11 - numpy/f2py/docs/usersguide/var_session.dat | 3 - numpy/f2py/f2py.1 | 209 - numpy/f2py/f2py2e.py | 568 - numpy/f2py/f2py_testing.py | 43 - numpy/f2py/f90mod_rules.py | 243 - numpy/f2py/func2subr.py | 170 - numpy/f2py/info.py | 5 - numpy/f2py/lib/__init__.py | 14 - numpy/f2py/lib/api.py | 14 - numpy/f2py/lib/doc.txt | 239 - numpy/f2py/lib/extgen/__init__.py | 27 - numpy/f2py/lib/extgen/base.py | 543 - numpy/f2py/lib/extgen/c_support.py | 293 - numpy/f2py/lib/extgen/doc.txt | 449 - numpy/f2py/lib/extgen/py_support.py | 1104 - numpy/f2py/lib/extgen/setup_py.py | 124 - numpy/f2py/lib/extgen/utils.py | 126 - numpy/f2py/lib/main.py | 534 - numpy/f2py/lib/nary.py | 32 - numpy/f2py/lib/parser/Fortran2003.py | 5890 --- numpy/f2py/lib/parser/__init__.py | 14 - numpy/f2py/lib/parser/api.py | 73 - numpy/f2py/lib/parser/base_classes.py | 819 - numpy/f2py/lib/parser/block_statements.py | 1229 - numpy/f2py/lib/parser/doc.txt | 395 - numpy/f2py/lib/parser/parsefortran.py | 197 - numpy/f2py/lib/parser/pattern_tools.py | 401 - numpy/f2py/lib/parser/readfortran.py | 857 - numpy/f2py/lib/parser/sourceinfo.py | 81 - numpy/f2py/lib/parser/splitline.py | 426 - numpy/f2py/lib/parser/statements.py | 1856 - numpy/f2py/lib/parser/test_Fortran2003.py | 2101 -- numpy/f2py/lib/parser/test_parser.py | 496 - numpy/f2py/lib/parser/typedecl_statements.py | 563 - numpy/f2py/lib/parser/utils.py | 177 - numpy/f2py/lib/py_wrap.py | 128 - numpy/f2py/lib/py_wrap_subprogram.py | 210 - numpy/f2py/lib/py_wrap_type.py | 753 - numpy/f2py/lib/setup.py | 13 - numpy/f2py/lib/src/F_FUNC.cpp | 34 - numpy/f2py/lib/src/pyobj_to_string_len.c | 11 - numpy/f2py/lib/tests/test_derived_scalar.py | 74 - numpy/f2py/lib/tests/test_module_module.py | 61 - numpy/f2py/lib/tests/test_module_scalar.py | 58 - numpy/f2py/lib/tests/test_scalar_function_in.py | 532 - numpy/f2py/lib/tests/test_scalar_in_out.py | 529 - numpy/f2py/lib/wrapper_base.py | 178 - numpy/f2py/rules.py | 1361 - numpy/f2py/setup.cfg | 3 - numpy/f2py/setup.py | 130 - numpy/f2py/src/fortranobject.c | 816 - numpy/f2py/src/fortranobject.h | 124 - numpy/f2py/src/test/Makefile | 96 - numpy/f2py/src/test/bar.f | 11 - numpy/f2py/src/test/foo.f | 11 - numpy/f2py/src/test/foo90.f90 | 13 - numpy/f2py/src/test/foomodule.c | 144 - numpy/f2py/src/test/wrap.f | 70 - numpy/f2py/tests/array_from_pyobj/__init__.py | 0 numpy/f2py/tests/array_from_pyobj/setup.py | 25 - .../tests/test_array_from_pyobj.py | 515 - numpy/f2py/tests/array_from_pyobj/wrapmodule.c | 196 - numpy/f2py/tests/c/return_real.py | 107 - numpy/f2py/tests/f77/callback.py | 98 - numpy/f2py/tests/f77/return_character.py | 99 - numpy/f2py/tests/f77/return_complex.py | 124 - numpy/f2py/tests/f77/return_integer.py | 147 - numpy/f2py/tests/f77/return_logical.py | 133 - numpy/f2py/tests/f77/return_real.py | 126 - numpy/f2py/tests/f90/return_character.py | 98 - numpy/f2py/tests/f90/return_complex.py | 126 - numpy/f2py/tests/f90/return_integer.py | 151 - numpy/f2py/tests/f90/return_logical.py | 137 - numpy/f2py/tests/f90/return_real.py | 129 - numpy/f2py/tests/mixed/foo.f | 5 - numpy/f2py/tests/mixed/foo_fixed.f90 | 8 - numpy/f2py/tests/mixed/foo_free.f90 | 8 - numpy/f2py/tests/mixed/run.py | 49 - numpy/f2py/tests/run_all.py | 55 - numpy/f2py/use_rules.py | 112 - numpy/fft/__init__.py | 9 - numpy/fft/fftpack.c | 1501 - numpy/fft/fftpack.h | 28 - numpy/fft/fftpack.py | 328 - numpy/fft/fftpack_litemodule.c | 275 - numpy/fft/helper.py | 66 - numpy/fft/info.py | 29 - numpy/fft/setup.py | 19 - numpy/fft/tests/test_fftpack.py | 24 - numpy/fft/tests/test_helper.py | 45 - numpy/lib/__init__.py | 35 - numpy/lib/arraysetops.py | 329 - numpy/lib/convdtype.py | 65 - numpy/lib/function_base.py | 1492 - numpy/lib/getlimits.py | 175 - numpy/lib/index_tricks.py | 457 - numpy/lib/info.py | 136 - numpy/lib/machar.py | 285 - numpy/lib/polynomial.py | 670 - numpy/lib/scimath.py | 86 - numpy/lib/setup.py | 21 - numpy/lib/shape_base.py | 633 - numpy/lib/src/_compiled_base.c | 590 - numpy/lib/tests/test_arraysetops.py | 171 - numpy/lib/tests/test_function_base.py | 454 - numpy/lib/tests/test_getlimits.py | 55 - numpy/lib/tests/test_index_tricks.py | 51 - numpy/lib/tests/test_polynomial.py | 98 - numpy/lib/tests/test_shape_base.py | 412 - numpy/lib/tests/test_twodim_base.py | 200 - numpy/lib/tests/test_type_check.py | 280 - numpy/lib/tests/test_ufunclike.py | 66 - numpy/lib/twodim_base.py | 184 - numpy/lib/type_check.py | 233 - numpy/lib/ufunclike.py | 60 - numpy/lib/user_array.py | 217 - numpy/lib/utils.py | 432 - numpy/linalg/__init__.py | 8 - numpy/linalg/blas_lite.c | 10659 ------ numpy/linalg/dlamch.c | 951 - numpy/linalg/dlapack_lite.c | 36005 ------------------- numpy/linalg/f2c.h | 217 - numpy/linalg/f2c_lite.c | 492 - numpy/linalg/info.py | 25 - numpy/linalg/lapack_lite/README | 40 - numpy/linalg/lapack_lite/clapack_scrub.py | 275 - numpy/linalg/lapack_lite/fortran.py | 114 - numpy/linalg/lapack_lite/make_lite.py | 264 - numpy/linalg/lapack_lite/wrapped_routines | 19 - numpy/linalg/lapack_litemodule.c | 836 - numpy/linalg/linalg.py | 975 - numpy/linalg/setup.py | 31 - numpy/linalg/tests/test_linalg.py | 99 - numpy/linalg/zlapack_lite.c | 26018 -------------- numpy/matlib.py | 65 - numpy/numarray/__init__.py | 26 - numpy/numarray/_capi.c | 3341 -- numpy/numarray/alter_code1.py | 265 - numpy/numarray/alter_code2.py | 70 - numpy/numarray/compat.py | 4 - numpy/numarray/convolve.py | 14 - numpy/numarray/fft.py | 7 - numpy/numarray/functions.py | 490 - numpy/numarray/image.py | 14 - numpy/numarray/linear_algebra.py | 15 - numpy/numarray/ma.py | 2 - numpy/numarray/matrix.py | 7 - numpy/numarray/mlab.py | 7 - numpy/numarray/nd_image.py | 14 - numpy/numarray/numerictypes.py | 551 - numpy/numarray/numpy/arraybase.h | 71 - numpy/numarray/numpy/cfunc.h | 78 - numpy/numarray/numpy/ieeespecial.h | 124 - numpy/numarray/numpy/libnumarray.h | 611 - numpy/numarray/numpy/numcomplex.h | 252 - numpy/numarray/numpy/nummacro.h | 447 - numpy/numarray/random_array.py | 9 - numpy/numarray/session.py | 348 - numpy/numarray/setup.py | 17 - numpy/numarray/ufuncs.py | 22 - numpy/numarray/util.py | 40 - numpy/oldnumeric/__init__.py | 41 - numpy/oldnumeric/alter_code1.py | 240 - numpy/oldnumeric/alter_code2.py | 146 - numpy/oldnumeric/array_printer.py | 16 - numpy/oldnumeric/arrayfns.py | 96 - numpy/oldnumeric/compat.py | 66 - numpy/oldnumeric/fft.py | 21 - numpy/oldnumeric/fix_default_axis.py | 291 - numpy/oldnumeric/functions.py | 124 - numpy/oldnumeric/linear_algebra.py | 83 - numpy/oldnumeric/ma.py | 14 - numpy/oldnumeric/matrix.py | 67 - numpy/oldnumeric/misc.py | 42 - numpy/oldnumeric/mlab.py | 122 - numpy/oldnumeric/precision.py | 169 - numpy/oldnumeric/random_array.py | 266 - numpy/oldnumeric/rng.py | 135 - numpy/oldnumeric/rng_stats.py | 35 - numpy/oldnumeric/setup.py | 8 - numpy/oldnumeric/tests/test_oldnumeric.py | 86 - numpy/oldnumeric/typeconv.py | 60 - numpy/oldnumeric/ufuncs.py | 19 - numpy/oldnumeric/user_array.py | 9 - numpy/random/__init__.py | 18 - numpy/random/info.py | 55 - numpy/random/mtrand/Python.pxi | 54 - numpy/random/mtrand/distributions.c | 853 - numpy/random/mtrand/distributions.h | 185 - numpy/random/mtrand/generate_mtrand_c.py | 37 - numpy/random/mtrand/initarray.c | 136 - numpy/random/mtrand/initarray.h | 6 - numpy/random/mtrand/mtrand.c | 10907 ------ numpy/random/mtrand/mtrand.pyx | 1724 - numpy/random/mtrand/numpy.pxi | 133 - numpy/random/mtrand/randomkit.c | 365 - numpy/random/mtrand/randomkit.h | 189 - numpy/random/setup.py | 53 - numpy/random/tests/test_random.py | 19 - numpy/setup.py | 22 - numpy/testing/__init__.py | 5 - numpy/testing/info.py | 30 - numpy/testing/numpytest.py | 691 - numpy/testing/parametric.py | 300 - numpy/testing/setup.py | 16 - numpy/testing/utils.py | 272 - numpy/tests/test_ctypeslib.py | 63 - numpy/version.py | 15 - 526 files changed, 239549 deletions(-) delete mode 100644 numpy/__init__.py delete mode 100644 numpy/_import_tools.py delete mode 100644 numpy/add_newdocs.py delete mode 100644 numpy/core/__init__.py delete mode 100644 numpy/core/_internal.py delete mode 100644 numpy/core/arrayprint.py delete mode 100644 numpy/core/blasdot/_dotblas.c delete mode 100644 numpy/core/blasdot/cblas.h delete mode 100644 numpy/core/code_generators/array_api_order.txt delete mode 100644 numpy/core/code_generators/genapi.py delete mode 100644 numpy/core/code_generators/generate_array_api.py delete mode 100644 numpy/core/code_generators/generate_ufunc_api.py delete mode 100644 numpy/core/code_generators/generate_umath.py delete mode 100644 numpy/core/code_generators/multiarray_api_order.txt delete mode 100644 numpy/core/code_generators/ufunc_api_order.txt delete mode 100644 numpy/core/defchararray.py delete mode 100644 numpy/core/defmatrix.py delete mode 100644 numpy/core/fromnumeric.py delete mode 100644 numpy/core/include/numpy/arrayobject.h delete mode 100644 numpy/core/include/numpy/arrayscalars.h delete mode 100644 numpy/core/include/numpy/fenv/fenv.c delete mode 100644 numpy/core/include/numpy/fenv/fenv.h delete mode 100644 numpy/core/include/numpy/ndarrayobject.h delete mode 100644 numpy/core/include/numpy/noprefix.h delete mode 100644 numpy/core/include/numpy/npy_interrupt.h delete mode 100644 numpy/core/include/numpy/old_defines.h delete mode 100644 numpy/core/include/numpy/oldnumeric.h delete mode 100644 numpy/core/include/numpy/ufuncobject.h delete mode 100644 numpy/core/info.py delete mode 100644 numpy/core/ma.py delete mode 100644 numpy/core/memmap.py delete mode 100644 numpy/core/numeric.py delete mode 100644 numpy/core/numerictypes.py delete mode 100644 numpy/core/records.py delete mode 100644 numpy/core/setup.py delete mode 100644 numpy/core/src/_isnan.c delete mode 100644 numpy/core/src/_signbit.c delete mode 100644 numpy/core/src/_sortmodule.c.src delete mode 100644 numpy/core/src/arraymethods.c delete mode 100644 numpy/core/src/arrayobject.c delete mode 100644 numpy/core/src/arraytypes.inc.src delete mode 100644 numpy/core/src/multiarraymodule.c delete mode 100644 numpy/core/src/scalarmathmodule.c.src delete mode 100644 numpy/core/src/scalartypes.inc.src delete mode 100644 numpy/core/src/ucsnarrow.c delete mode 100644 numpy/core/src/ufuncobject.c delete mode 100644 numpy/core/src/umathmodule.c.src delete mode 100644 numpy/core/tests/test_defmatrix.py delete mode 100644 numpy/core/tests/test_errstate.py delete mode 100644 numpy/core/tests/test_ma.py delete mode 100644 numpy/core/tests/test_multiarray.py delete mode 100644 numpy/core/tests/test_numeric.py delete mode 100644 numpy/core/tests/test_numerictypes.py delete mode 100644 numpy/core/tests/test_records.py delete mode 100644 numpy/core/tests/test_regression.py delete mode 100644 numpy/core/tests/test_scalarmath.py delete mode 100644 numpy/core/tests/test_ufunc.py delete mode 100644 numpy/core/tests/test_umath.py delete mode 100644 numpy/core/tests/test_unicode.py delete mode 100644 numpy/core/tests/testdata.fits delete mode 100644 numpy/ctypeslib.py delete mode 100644 numpy/distutils/__init__.py delete mode 100644 numpy/distutils/__version__.py delete mode 100644 numpy/distutils/ccompiler.py delete mode 100644 numpy/distutils/command/__init__.py delete mode 100644 numpy/distutils/command/bdist_rpm.py delete mode 100644 numpy/distutils/command/build.py delete mode 100644 numpy/distutils/command/build_clib.py delete mode 100644 numpy/distutils/command/build_ext.py delete mode 100644 numpy/distutils/command/build_py.py delete mode 100644 numpy/distutils/command/build_scripts.py delete mode 100644 numpy/distutils/command/build_src.py delete mode 100644 numpy/distutils/command/config.py delete mode 100644 numpy/distutils/command/config_compiler.py delete mode 100644 numpy/distutils/command/develop.py delete mode 100644 numpy/distutils/command/egg_info.py delete mode 100644 numpy/distutils/command/install.py delete mode 100644 numpy/distutils/command/install_data.py delete mode 100644 numpy/distutils/command/install_headers.py delete mode 100644 numpy/distutils/command/sdist.py delete mode 100644 numpy/distutils/conv_template.py delete mode 100644 numpy/distutils/core.py delete mode 100644 numpy/distutils/cpuinfo.py delete mode 100644 numpy/distutils/environment.py delete mode 100644 numpy/distutils/exec_command.py delete mode 100644 numpy/distutils/extension.py delete mode 100644 numpy/distutils/fcompiler/__init__.py delete mode 100644 numpy/distutils/fcompiler/absoft.py delete mode 100644 numpy/distutils/fcompiler/compaq.py delete mode 100644 numpy/distutils/fcompiler/g95.py delete mode 100644 numpy/distutils/fcompiler/gnu.py delete mode 100644 numpy/distutils/fcompiler/hpux.py delete mode 100644 numpy/distutils/fcompiler/ibm.py delete mode 100644 numpy/distutils/fcompiler/intel.py delete mode 100644 numpy/distutils/fcompiler/lahey.py delete mode 100644 numpy/distutils/fcompiler/mips.py delete mode 100644 numpy/distutils/fcompiler/nag.py delete mode 100644 numpy/distutils/fcompiler/none.py delete mode 100644 numpy/distutils/fcompiler/pg.py delete mode 100644 numpy/distutils/fcompiler/sun.py delete mode 100644 numpy/distutils/fcompiler/vast.py delete mode 100644 numpy/distutils/from_template.py delete mode 100644 numpy/distutils/info.py delete mode 100644 numpy/distutils/intelccompiler.py delete mode 100644 numpy/distutils/interactive.py delete mode 100644 numpy/distutils/lib2def.py delete mode 100644 numpy/distutils/line_endings.py delete mode 100644 numpy/distutils/log.py delete mode 100644 numpy/distutils/mingw32ccompiler.py delete mode 100644 numpy/distutils/misc_util.py delete mode 100644 numpy/distutils/setup.py delete mode 100644 numpy/distutils/system_info.py delete mode 100644 numpy/distutils/tests/f2py_ext/__init__.py delete mode 100644 numpy/distutils/tests/f2py_ext/setup.py delete mode 100644 numpy/distutils/tests/f2py_ext/src/fib1.f delete mode 100644 numpy/distutils/tests/f2py_ext/src/fib2.pyf delete mode 100644 numpy/distutils/tests/f2py_ext/tests/test_fib2.py delete mode 100644 numpy/distutils/tests/f2py_f90_ext/__init__.py delete mode 100644 numpy/distutils/tests/f2py_f90_ext/include/body.f90 delete mode 100644 numpy/distutils/tests/f2py_f90_ext/setup.py delete mode 100644 numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 delete mode 100644 numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py delete mode 100644 numpy/distutils/tests/gen_ext/__init__.py delete mode 100644 numpy/distutils/tests/gen_ext/setup.py delete mode 100644 numpy/distutils/tests/gen_ext/tests/test_fib3.py delete mode 100644 numpy/distutils/tests/pyrex_ext/__init__.py delete mode 100644 numpy/distutils/tests/pyrex_ext/primes.pyx delete mode 100644 numpy/distutils/tests/pyrex_ext/setup.py delete mode 100644 numpy/distutils/tests/pyrex_ext/tests/test_primes.py delete mode 100644 numpy/distutils/tests/setup.py delete mode 100644 numpy/distutils/tests/swig_ext/__init__.py delete mode 100644 numpy/distutils/tests/swig_ext/setup.py delete mode 100644 numpy/distutils/tests/swig_ext/src/example.c delete mode 100644 numpy/distutils/tests/swig_ext/src/example.i delete mode 100644 numpy/distutils/tests/swig_ext/src/zoo.cc delete mode 100644 numpy/distutils/tests/swig_ext/src/zoo.h delete mode 100644 numpy/distutils/tests/swig_ext/src/zoo.i delete mode 100644 numpy/distutils/tests/swig_ext/tests/test_example.py delete mode 100644 numpy/distutils/tests/swig_ext/tests/test_example2.py delete mode 100644 numpy/distutils/tests/test_fcompiler_gnu.py delete mode 100644 numpy/distutils/tests/test_misc_util.py delete mode 100644 numpy/distutils/unixccompiler.py delete mode 100644 numpy/doc/CAPI.txt delete mode 100644 numpy/doc/DISTUTILS.txt delete mode 100644 numpy/doc/HOWTO_DOCUMENT.txt delete mode 100644 numpy/doc/README.txt delete mode 100644 numpy/doc/example.py delete mode 100644 numpy/doc/html/api-objects.txt delete mode 100644 numpy/doc/html/crarr.png delete mode 100644 numpy/doc/html/epydoc.css delete mode 100644 numpy/doc/html/epydoc.js delete mode 100644 numpy/doc/html/example-module.html delete mode 100644 numpy/doc/html/example-pysrc.html delete mode 100644 numpy/doc/html/frames.html delete mode 100644 numpy/doc/html/help.html delete mode 100644 numpy/doc/html/identifier-index.html delete mode 100644 numpy/doc/html/index.html delete mode 100644 numpy/doc/html/module-tree.html delete mode 100644 numpy/doc/html/redirect.html delete mode 100644 numpy/doc/html/toc-everything.html delete mode 100644 numpy/doc/html/toc-example-module.html delete mode 100644 numpy/doc/html/toc.html delete mode 100644 numpy/doc/pep_buffer.txt delete mode 100644 numpy/doc/pyrex/MANIFEST delete mode 100644 numpy/doc/pyrex/Makefile delete mode 100644 numpy/doc/pyrex/c_numpy.pxd delete mode 100644 numpy/doc/pyrex/c_python.pxd delete mode 100644 numpy/doc/pyrex/notes delete mode 100644 numpy/doc/pyrex/numpyx.c delete mode 100644 numpy/doc/pyrex/numpyx.pyx delete mode 100755 numpy/doc/pyrex/run_test.py delete mode 100644 numpy/doc/pyrex/setup.py delete mode 100644 numpy/doc/records.txt delete mode 100644 numpy/doc/swig/Makefile delete mode 100644 numpy/doc/swig/README delete mode 100644 numpy/doc/swig/doc/Makefile delete mode 100644 numpy/doc/swig/doc/numpy_swig.html delete mode 100644 numpy/doc/swig/doc/numpy_swig.pdf delete mode 100644 numpy/doc/swig/doc/numpy_swig.txt delete mode 100644 numpy/doc/swig/doc/testing.html delete mode 100644 numpy/doc/swig/doc/testing.pdf delete mode 100644 numpy/doc/swig/doc/testing.txt delete mode 100644 numpy/doc/swig/numpy.i delete mode 100644 numpy/doc/swig/pyfragments.swg delete mode 100644 numpy/doc/swig/test/Array.i delete mode 100644 numpy/doc/swig/test/Array1.cxx delete mode 100644 numpy/doc/swig/test/Array1.h delete mode 100644 numpy/doc/swig/test/Array2.cxx delete mode 100644 numpy/doc/swig/test/Array2.h delete mode 100644 numpy/doc/swig/test/Farray.cxx delete mode 100644 numpy/doc/swig/test/Farray.h delete mode 100644 numpy/doc/swig/test/Farray.i delete mode 100644 numpy/doc/swig/test/Makefile delete mode 100644 numpy/doc/swig/test/Matrix.cxx delete mode 100644 numpy/doc/swig/test/Matrix.h delete mode 100644 numpy/doc/swig/test/Matrix.i delete mode 100644 numpy/doc/swig/test/Tensor.cxx delete mode 100644 numpy/doc/swig/test/Tensor.h delete mode 100644 numpy/doc/swig/test/Tensor.i delete mode 100644 numpy/doc/swig/test/Vector.cxx delete mode 100644 numpy/doc/swig/test/Vector.h delete mode 100644 numpy/doc/swig/test/Vector.i delete mode 100755 numpy/doc/swig/test/setup.py delete mode 100755 numpy/doc/swig/test/testArray.py delete mode 100755 numpy/doc/swig/test/testFarray.py delete mode 100755 numpy/doc/swig/test/testMatrix.py delete mode 100755 numpy/doc/swig/test/testTensor.py delete mode 100755 numpy/doc/swig/test/testVector.py delete mode 100644 numpy/doc/ufuncs.txt delete mode 100644 numpy/dual.py delete mode 100644 numpy/f2py/BUGS.txt delete mode 100644 numpy/f2py/Makefile delete mode 100644 numpy/f2py/NEWS.txt delete mode 100644 numpy/f2py/README.txt delete mode 100644 numpy/f2py/TODO.txt delete mode 100644 numpy/f2py/__init__.py delete mode 100644 numpy/f2py/__version__.py delete mode 100644 numpy/f2py/auxfuncs.py delete mode 100644 numpy/f2py/capi_maps.py delete mode 100644 numpy/f2py/cb_rules.py delete mode 100644 numpy/f2py/cfuncs.py delete mode 100644 numpy/f2py/common_rules.py delete mode 100755 numpy/f2py/crackfortran.py delete mode 100644 numpy/f2py/diagnose.py delete mode 100644 numpy/f2py/doc/Makefile delete mode 100644 numpy/f2py/doc/Release-1.x.txt delete mode 100644 numpy/f2py/doc/Release-2.x.txt delete mode 100644 numpy/f2py/doc/Release-3.x.txt delete mode 100644 numpy/f2py/doc/Release-4.x.txt delete mode 100644 numpy/f2py/doc/apps.tex delete mode 100644 numpy/f2py/doc/bugs.tex delete mode 100755 numpy/f2py/doc/collectinput.py delete mode 100644 numpy/f2py/doc/commands.tex delete mode 100644 numpy/f2py/doc/ex1/arr.f delete mode 100644 numpy/f2py/doc/ex1/bar.f delete mode 100644 numpy/f2py/doc/ex1/foo.f delete mode 100644 numpy/f2py/doc/ex1/foobar-smart.f90 delete mode 100644 numpy/f2py/doc/ex1/foobar.f90 delete mode 100644 numpy/f2py/doc/ex1/foobarmodule.tex delete mode 100755 numpy/f2py/doc/ex1/runme delete mode 100644 numpy/f2py/doc/f2py2e.tex delete mode 100644 numpy/f2py/doc/f2python9-final/README.txt delete mode 100644 numpy/f2py/doc/f2python9-final/aerostructure.jpg delete mode 100644 numpy/f2py/doc/f2python9-final/flow.jpg delete mode 100755 numpy/f2py/doc/f2python9-final/mk_html.sh delete mode 100755 numpy/f2py/doc/f2python9-final/mk_pdf.sh delete mode 100755 numpy/f2py/doc/f2python9-final/mk_ps.sh delete mode 100644 numpy/f2py/doc/f2python9-final/src/examples/exp1.f delete mode 100644 numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt delete mode 100644 numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt delete mode 100644 numpy/f2py/doc/f2python9-final/src/examples/foo.pyf delete mode 100644 numpy/f2py/doc/f2python9-final/src/examples/foom.pyf delete mode 100644 numpy/f2py/doc/f2python9-final/structure.jpg delete mode 100644 numpy/f2py/doc/fortranobject.tex delete mode 100644 numpy/f2py/doc/index.html delete mode 100644 numpy/f2py/doc/intro.tex delete mode 100644 numpy/f2py/doc/multiarray/array_from_pyobj.c delete mode 100644 numpy/f2py/doc/multiarray/bar.c delete mode 100644 numpy/f2py/doc/multiarray/foo.f delete mode 100644 numpy/f2py/doc/multiarray/fortran_array_from_pyobj.txt delete mode 100644 numpy/f2py/doc/multiarray/fun.pyf delete mode 100644 numpy/f2py/doc/multiarray/run.pyf delete mode 100644 numpy/f2py/doc/multiarray/transpose.txt delete mode 100644 numpy/f2py/doc/multiarrays.txt delete mode 100644 numpy/f2py/doc/notes.tex delete mode 100644 numpy/f2py/doc/oldnews.html delete mode 100644 numpy/f2py/doc/options.tex delete mode 100644 numpy/f2py/doc/python9.tex delete mode 100644 numpy/f2py/doc/signaturefile.tex delete mode 100644 numpy/f2py/doc/using_F_compiler.txt delete mode 100644 numpy/f2py/doc/win32_notes.txt delete mode 100644 numpy/f2py/docs/FAQ.txt delete mode 100644 numpy/f2py/docs/HISTORY.txt delete mode 100644 numpy/f2py/docs/OLDNEWS.txt delete mode 100644 numpy/f2py/docs/README.txt delete mode 100644 numpy/f2py/docs/TESTING.txt delete mode 100644 numpy/f2py/docs/THANKS.txt delete mode 100644 numpy/f2py/docs/default.css delete mode 100644 numpy/f2py/docs/docutils.conf delete mode 100644 numpy/f2py/docs/hello.f delete mode 100644 numpy/f2py/docs/pyforttest.pyf delete mode 100644 numpy/f2py/docs/pytest.py delete mode 100644 numpy/f2py/docs/simple.f delete mode 100644 numpy/f2py/docs/simple_session.dat delete mode 100644 numpy/f2py/docs/usersguide/allocarr.f90 delete mode 100644 numpy/f2py/docs/usersguide/allocarr_session.dat delete mode 100644 numpy/f2py/docs/usersguide/array.f delete mode 100644 numpy/f2py/docs/usersguide/array_session.dat delete mode 100644 numpy/f2py/docs/usersguide/calculate.f delete mode 100644 numpy/f2py/docs/usersguide/calculate_session.dat delete mode 100644 numpy/f2py/docs/usersguide/callback.f delete mode 100644 numpy/f2py/docs/usersguide/callback2.pyf delete mode 100644 numpy/f2py/docs/usersguide/callback_session.dat delete mode 100644 numpy/f2py/docs/usersguide/common.f delete mode 100644 numpy/f2py/docs/usersguide/common_session.dat delete mode 100644 numpy/f2py/docs/usersguide/compile_session.dat delete mode 100644 numpy/f2py/docs/usersguide/default.css delete mode 100644 numpy/f2py/docs/usersguide/docutils.conf delete mode 100644 numpy/f2py/docs/usersguide/extcallback.f delete mode 100644 numpy/f2py/docs/usersguide/extcallback_session.dat delete mode 100644 numpy/f2py/docs/usersguide/fib1.f delete mode 100644 numpy/f2py/docs/usersguide/fib1.pyf delete mode 100644 numpy/f2py/docs/usersguide/fib2.pyf delete mode 100644 numpy/f2py/docs/usersguide/fib3.f delete mode 100644 numpy/f2py/docs/usersguide/ftype.f delete mode 100644 numpy/f2py/docs/usersguide/ftype_session.dat delete mode 100644 numpy/f2py/docs/usersguide/index.txt delete mode 100644 numpy/f2py/docs/usersguide/moddata.f90 delete mode 100644 numpy/f2py/docs/usersguide/moddata_session.dat delete mode 100644 numpy/f2py/docs/usersguide/run_main_session.dat delete mode 100644 numpy/f2py/docs/usersguide/scalar.f delete mode 100644 numpy/f2py/docs/usersguide/scalar_session.dat delete mode 100644 numpy/f2py/docs/usersguide/setup_example.py delete mode 100644 numpy/f2py/docs/usersguide/spam.pyf delete mode 100644 numpy/f2py/docs/usersguide/spam_session.dat delete mode 100644 numpy/f2py/docs/usersguide/string.f delete mode 100644 numpy/f2py/docs/usersguide/string_session.dat delete mode 100644 numpy/f2py/docs/usersguide/var.pyf delete mode 100644 numpy/f2py/docs/usersguide/var_session.dat delete mode 100644 numpy/f2py/f2py.1 delete mode 100755 numpy/f2py/f2py2e.py delete mode 100644 numpy/f2py/f2py_testing.py delete mode 100644 numpy/f2py/f90mod_rules.py delete mode 100644 numpy/f2py/func2subr.py delete mode 100644 numpy/f2py/info.py delete mode 100644 numpy/f2py/lib/__init__.py delete mode 100644 numpy/f2py/lib/api.py delete mode 100644 numpy/f2py/lib/doc.txt delete mode 100644 numpy/f2py/lib/extgen/__init__.py delete mode 100644 numpy/f2py/lib/extgen/base.py delete mode 100644 numpy/f2py/lib/extgen/c_support.py delete mode 100644 numpy/f2py/lib/extgen/doc.txt delete mode 100644 numpy/f2py/lib/extgen/py_support.py delete mode 100644 numpy/f2py/lib/extgen/setup_py.py delete mode 100644 numpy/f2py/lib/extgen/utils.py delete mode 100644 numpy/f2py/lib/main.py delete mode 100644 numpy/f2py/lib/nary.py delete mode 100644 numpy/f2py/lib/parser/Fortran2003.py delete mode 100644 numpy/f2py/lib/parser/__init__.py delete mode 100644 numpy/f2py/lib/parser/api.py delete mode 100644 numpy/f2py/lib/parser/base_classes.py delete mode 100644 numpy/f2py/lib/parser/block_statements.py delete mode 100644 numpy/f2py/lib/parser/doc.txt delete mode 100644 numpy/f2py/lib/parser/parsefortran.py delete mode 100644 numpy/f2py/lib/parser/pattern_tools.py delete mode 100644 numpy/f2py/lib/parser/readfortran.py delete mode 100644 numpy/f2py/lib/parser/sourceinfo.py delete mode 100644 numpy/f2py/lib/parser/splitline.py delete mode 100644 numpy/f2py/lib/parser/statements.py delete mode 100644 numpy/f2py/lib/parser/test_Fortran2003.py delete mode 100644 numpy/f2py/lib/parser/test_parser.py delete mode 100644 numpy/f2py/lib/parser/typedecl_statements.py delete mode 100644 numpy/f2py/lib/parser/utils.py delete mode 100644 numpy/f2py/lib/py_wrap.py delete mode 100644 numpy/f2py/lib/py_wrap_subprogram.py delete mode 100644 numpy/f2py/lib/py_wrap_type.py delete mode 100644 numpy/f2py/lib/setup.py delete mode 100644 numpy/f2py/lib/src/F_FUNC.cpp delete mode 100644 numpy/f2py/lib/src/pyobj_to_string_len.c delete mode 100644 numpy/f2py/lib/tests/test_derived_scalar.py delete mode 100644 numpy/f2py/lib/tests/test_module_module.py delete mode 100644 numpy/f2py/lib/tests/test_module_scalar.py delete mode 100644 numpy/f2py/lib/tests/test_scalar_function_in.py delete mode 100644 numpy/f2py/lib/tests/test_scalar_in_out.py delete mode 100644 numpy/f2py/lib/wrapper_base.py delete mode 100644 numpy/f2py/rules.py delete mode 100644 numpy/f2py/setup.cfg delete mode 100755 numpy/f2py/setup.py delete mode 100644 numpy/f2py/src/fortranobject.c delete mode 100644 numpy/f2py/src/fortranobject.h delete mode 100644 numpy/f2py/src/test/Makefile delete mode 100644 numpy/f2py/src/test/bar.f delete mode 100644 numpy/f2py/src/test/foo.f delete mode 100644 numpy/f2py/src/test/foo90.f90 delete mode 100644 numpy/f2py/src/test/foomodule.c delete mode 100644 numpy/f2py/src/test/wrap.f delete mode 100644 numpy/f2py/tests/array_from_pyobj/__init__.py delete mode 100644 numpy/f2py/tests/array_from_pyobj/setup.py delete mode 100644 numpy/f2py/tests/array_from_pyobj/tests/test_array_from_pyobj.py delete mode 100644 numpy/f2py/tests/array_from_pyobj/wrapmodule.c delete mode 100644 numpy/f2py/tests/c/return_real.py delete mode 100644 numpy/f2py/tests/f77/callback.py delete mode 100644 numpy/f2py/tests/f77/return_character.py delete mode 100644 numpy/f2py/tests/f77/return_complex.py delete mode 100644 numpy/f2py/tests/f77/return_integer.py delete mode 100644 numpy/f2py/tests/f77/return_logical.py delete mode 100644 numpy/f2py/tests/f77/return_real.py delete mode 100644 numpy/f2py/tests/f90/return_character.py delete mode 100644 numpy/f2py/tests/f90/return_complex.py delete mode 100644 numpy/f2py/tests/f90/return_integer.py delete mode 100644 numpy/f2py/tests/f90/return_logical.py delete mode 100644 numpy/f2py/tests/f90/return_real.py delete mode 100644 numpy/f2py/tests/mixed/foo.f delete mode 100644 numpy/f2py/tests/mixed/foo_fixed.f90 delete mode 100644 numpy/f2py/tests/mixed/foo_free.f90 delete mode 100644 numpy/f2py/tests/mixed/run.py delete mode 100755 numpy/f2py/tests/run_all.py delete mode 100644 numpy/f2py/use_rules.py delete mode 100644 numpy/fft/__init__.py delete mode 100644 numpy/fft/fftpack.c delete mode 100644 numpy/fft/fftpack.h delete mode 100644 numpy/fft/fftpack.py delete mode 100644 numpy/fft/fftpack_litemodule.c delete mode 100644 numpy/fft/helper.py delete mode 100644 numpy/fft/info.py delete mode 100644 numpy/fft/setup.py delete mode 100644 numpy/fft/tests/test_fftpack.py delete mode 100644 numpy/fft/tests/test_helper.py delete mode 100644 numpy/lib/__init__.py delete mode 100644 numpy/lib/arraysetops.py delete mode 100644 numpy/lib/convdtype.py delete mode 100644 numpy/lib/function_base.py delete mode 100644 numpy/lib/getlimits.py delete mode 100644 numpy/lib/index_tricks.py delete mode 100644 numpy/lib/info.py delete mode 100644 numpy/lib/machar.py delete mode 100644 numpy/lib/polynomial.py delete mode 100644 numpy/lib/scimath.py delete mode 100644 numpy/lib/setup.py delete mode 100644 numpy/lib/shape_base.py delete mode 100644 numpy/lib/src/_compiled_base.c delete mode 100644 numpy/lib/tests/test_arraysetops.py delete mode 100644 numpy/lib/tests/test_function_base.py delete mode 100644 numpy/lib/tests/test_getlimits.py delete mode 100644 numpy/lib/tests/test_index_tricks.py delete mode 100644 numpy/lib/tests/test_polynomial.py delete mode 100644 numpy/lib/tests/test_shape_base.py delete mode 100644 numpy/lib/tests/test_twodim_base.py delete mode 100644 numpy/lib/tests/test_type_check.py delete mode 100644 numpy/lib/tests/test_ufunclike.py delete mode 100644 numpy/lib/twodim_base.py delete mode 100644 numpy/lib/type_check.py delete mode 100644 numpy/lib/ufunclike.py delete mode 100644 numpy/lib/user_array.py delete mode 100644 numpy/lib/utils.py delete mode 100644 numpy/linalg/__init__.py delete mode 100644 numpy/linalg/blas_lite.c delete mode 100644 numpy/linalg/dlamch.c delete mode 100644 numpy/linalg/dlapack_lite.c delete mode 100644 numpy/linalg/f2c.h delete mode 100644 numpy/linalg/f2c_lite.c delete mode 100644 numpy/linalg/info.py delete mode 100644 numpy/linalg/lapack_lite/README delete mode 100644 numpy/linalg/lapack_lite/clapack_scrub.py delete mode 100644 numpy/linalg/lapack_lite/fortran.py delete mode 100755 numpy/linalg/lapack_lite/make_lite.py delete mode 100644 numpy/linalg/lapack_lite/wrapped_routines delete mode 100644 numpy/linalg/lapack_litemodule.c delete mode 100644 numpy/linalg/linalg.py delete mode 100644 numpy/linalg/setup.py delete mode 100644 numpy/linalg/tests/test_linalg.py delete mode 100644 numpy/linalg/zlapack_lite.c delete mode 100644 numpy/matlib.py delete mode 100644 numpy/numarray/__init__.py delete mode 100644 numpy/numarray/_capi.c delete mode 100644 numpy/numarray/alter_code1.py delete mode 100644 numpy/numarray/alter_code2.py delete mode 100644 numpy/numarray/compat.py delete mode 100644 numpy/numarray/convolve.py delete mode 100644 numpy/numarray/fft.py delete mode 100644 numpy/numarray/functions.py delete mode 100644 numpy/numarray/image.py delete mode 100644 numpy/numarray/linear_algebra.py delete mode 100644 numpy/numarray/ma.py delete mode 100644 numpy/numarray/matrix.py delete mode 100644 numpy/numarray/mlab.py delete mode 100644 numpy/numarray/nd_image.py delete mode 100644 numpy/numarray/numerictypes.py delete mode 100644 numpy/numarray/numpy/arraybase.h delete mode 100644 numpy/numarray/numpy/cfunc.h delete mode 100644 numpy/numarray/numpy/ieeespecial.h delete mode 100644 numpy/numarray/numpy/libnumarray.h delete mode 100644 numpy/numarray/numpy/numcomplex.h delete mode 100644 numpy/numarray/numpy/nummacro.h delete mode 100644 numpy/numarray/random_array.py delete mode 100644 numpy/numarray/session.py delete mode 100644 numpy/numarray/setup.py delete mode 100644 numpy/numarray/ufuncs.py delete mode 100644 numpy/numarray/util.py delete mode 100644 numpy/oldnumeric/__init__.py delete mode 100644 numpy/oldnumeric/alter_code1.py delete mode 100644 numpy/oldnumeric/alter_code2.py delete mode 100644 numpy/oldnumeric/array_printer.py delete mode 100644 numpy/oldnumeric/arrayfns.py delete mode 100644 numpy/oldnumeric/compat.py delete mode 100644 numpy/oldnumeric/fft.py delete mode 100644 numpy/oldnumeric/fix_default_axis.py delete mode 100644 numpy/oldnumeric/functions.py delete mode 100644 numpy/oldnumeric/linear_algebra.py delete mode 100644 numpy/oldnumeric/ma.py delete mode 100644 numpy/oldnumeric/matrix.py delete mode 100644 numpy/oldnumeric/misc.py delete mode 100644 numpy/oldnumeric/mlab.py delete mode 100644 numpy/oldnumeric/precision.py delete mode 100644 numpy/oldnumeric/random_array.py delete mode 100644 numpy/oldnumeric/rng.py delete mode 100644 numpy/oldnumeric/rng_stats.py delete mode 100644 numpy/oldnumeric/setup.py delete mode 100644 numpy/oldnumeric/tests/test_oldnumeric.py delete mode 100644 numpy/oldnumeric/typeconv.py delete mode 100644 numpy/oldnumeric/ufuncs.py delete mode 100644 numpy/oldnumeric/user_array.py delete mode 100644 numpy/random/__init__.py delete mode 100644 numpy/random/info.py delete mode 100644 numpy/random/mtrand/Python.pxi delete mode 100644 numpy/random/mtrand/distributions.c delete mode 100644 numpy/random/mtrand/distributions.h delete mode 100644 numpy/random/mtrand/generate_mtrand_c.py delete mode 100644 numpy/random/mtrand/initarray.c delete mode 100644 numpy/random/mtrand/initarray.h delete mode 100644 numpy/random/mtrand/mtrand.c delete mode 100644 numpy/random/mtrand/mtrand.pyx delete mode 100644 numpy/random/mtrand/numpy.pxi delete mode 100644 numpy/random/mtrand/randomkit.c delete mode 100644 numpy/random/mtrand/randomkit.h delete mode 100644 numpy/random/setup.py delete mode 100644 numpy/random/tests/test_random.py delete mode 100644 numpy/setup.py delete mode 100644 numpy/testing/__init__.py delete mode 100644 numpy/testing/info.py delete mode 100644 numpy/testing/numpytest.py delete mode 100644 numpy/testing/parametric.py delete mode 100755 numpy/testing/setup.py delete mode 100644 numpy/testing/utils.py delete mode 100644 numpy/tests/test_ctypeslib.py delete mode 100644 numpy/version.py (limited to 'numpy') diff --git a/numpy/__init__.py b/numpy/__init__.py deleted file mode 100644 index 1a9271bce..000000000 --- a/numpy/__init__.py +++ /dev/null @@ -1,111 +0,0 @@ -"""\ -NumPy -========== - -You can support the development of NumPy and SciPy by purchasing -the book "Guide to NumPy" at - - http://www.trelgol.com - -It is being distributed for a fee for only a few years to -cover some of the costs of development. After the restriction period -it will also be freely available. - -Additional documentation is available in the docstrings and at - -http://www.scipy.org. -""" - -try: - from numpy.__config__ import show as show_config -except ImportError: - show_config = None - -if show_config is None: - import sys as _sys - print >> _sys.stderr, 'Running from numpy source directory.' - del _sys -else: - from version import version as __version__ - - from _import_tools import PackageLoader - - def pkgload(*packages, **options): - loader = PackageLoader(infunc=True) - return loader(*packages, **options) - - import testing - from testing import ScipyTest, NumpyTest - import core - from core import * - import lib - from lib import * - import linalg - import fft - import random - import ctypeslib - - # Make these accessible from numpy name-space - # but not imported in from numpy import * - from __builtin__ import bool, int, long, float, complex, \ - object, unicode, str - from core import round, abs, max, min - - __all__ = ['__version__', 'pkgload', 'PackageLoader', - 'ScipyTest', 'NumpyTest', 'show_config'] - __all__ += core.__all__ - __all__ += lib.__all__ - __all__ += ['linalg', 'fft', 'random', 'ctypeslib'] - - if __doc__ is not None: - __doc__ += """ - -Available subpackages ---------------------- -core --- Defines a multi-dimensional array and useful procedures - for Numerical computation. -lib --- Basic functions used by several sub-packages and useful - to have in the main name-space. -random --- Core Random Tools -linalg --- Core Linear Algebra Tools -fft --- Core FFT routines -testing --- Numpy testing tools - - These packages require explicit import -f2py --- Fortran to Python Interface Generator. -distutils --- Enhancements to distutils with support for - Fortran compilers support and more. - - -Global symbols from subpackages -------------------------------- -core --> * -lib --> * -testing --> NumpyTest -""" - - def test(*args, **kw): - import os, sys - print 'Numpy is installed in %s' % (os.path.split(__file__)[0],) - print 'Numpy version %s' % (__version__,) - print 'Python version %s' % (sys.version.replace('\n', '',),) - return NumpyTest().test(*args, **kw) - test.__doc__ = NumpyTest.test.__doc__ - - import add_newdocs - - __all__.extend(['add_newdocs','test']) - - if __doc__ is not None: - __doc__ += """ - -Utility tools -------------- - - test --- Run numpy unittests - pkgload --- Load numpy packages - show_config --- Show numpy build configuration - dual --- Overwrite certain functions with high-performance Scipy tools - matlib --- Make everything matrices. - __version__ --- Numpy version string -""" diff --git a/numpy/_import_tools.py b/numpy/_import_tools.py deleted file mode 100644 index 7940e84ff..000000000 --- a/numpy/_import_tools.py +++ /dev/null @@ -1,357 +0,0 @@ - -import os -import sys -import imp -from glob import glob - -__all__ = ['PackageLoader'] - -class PackageLoader: - def __init__(self, verbose=False, infunc=False): - """ Manages loading packages. - """ - - if infunc: - _level = 2 - else: - _level = 1 - self.parent_frame = frame = sys._getframe(_level) - self.parent_name = eval('__name__',frame.f_globals,frame.f_locals) - parent_path = eval('__path__',frame.f_globals,frame.f_locals) - if isinstance(parent_path, str): - parent_path = [parent_path] - self.parent_path = parent_path - if '__all__' not in frame.f_locals: - exec('__all__ = []',frame.f_globals,frame.f_locals) - self.parent_export_names = eval('__all__',frame.f_globals,frame.f_locals) - - self.info_modules = {} - self.imported_packages = [] - self.verbose = None - - def _get_info_files(self, package_dir, parent_path, parent_package=None): - """ Return list of (package name,info.py file) from parent_path subdirectories. - """ - from glob import glob - files = glob(os.path.join(parent_path,package_dir,'info.py')) - for info_file in glob(os.path.join(parent_path,package_dir,'info.pyc')): - if info_file[:-1] not in files: - files.append(info_file) - info_files = [] - for info_file in files: - package_name = os.path.dirname(info_file[len(parent_path)+1:])\ - .replace(os.sep,'.') - if parent_package: - package_name = parent_package + '.' + package_name - info_files.append((package_name,info_file)) - info_files.extend(self._get_info_files('*', - os.path.dirname(info_file), - package_name)) - return info_files - - def _init_info_modules(self, packages=None): - """Initialize info_modules = {: }. - """ - import imp - info_files = [] - info_modules = self.info_modules - - if packages is None: - for path in self.parent_path: - info_files.extend(self._get_info_files('*',path)) - else: - for package_name in packages: - package_dir = os.path.join(*package_name.split('.')) - for path in self.parent_path: - names_files = self._get_info_files(package_dir, path) - if names_files: - info_files.extend(names_files) - break - else: - try: - exec 'import %s.info as info' % (package_name) - info_modules[package_name] = info - except ImportError, msg: - self.warn('No scipy-style subpackage %r found in %s. '\ - 'Ignoring: %s'\ - % (package_name,':'.join(self.parent_path), msg)) - - for package_name,info_file in info_files: - if package_name in info_modules: - continue - fullname = self.parent_name +'.'+ package_name - if info_file[-1]=='c': - filedescriptor = ('.pyc','rb',2) - else: - filedescriptor = ('.py','U',1) - - try: - info_module = imp.load_module(fullname+'.info', - open(info_file,filedescriptor[1]), - info_file, - filedescriptor) - except Exception,msg: - self.error(msg) - info_module = None - - if info_module is None or getattr(info_module,'ignore',False): - info_modules.pop(package_name,None) - else: - self._init_info_modules(getattr(info_module,'depends',[])) - info_modules[package_name] = info_module - - return - - def _get_sorted_names(self): - """ Return package names sorted in the order as they should be - imported due to dependence relations between packages. - """ - - depend_dict = {} - for name,info_module in self.info_modules.items(): - depend_dict[name] = getattr(info_module,'depends',[]) - package_names = [] - - for name in depend_dict.keys(): - if not depend_dict[name]: - package_names.append(name) - del depend_dict[name] - - while depend_dict: - for name, lst in depend_dict.items(): - new_lst = [n for n in lst if n in depend_dict] - if not new_lst: - package_names.append(name) - del depend_dict[name] - else: - depend_dict[name] = new_lst - - return package_names - - def __call__(self,*packages, **options): - """Load one or more packages into parent package top-level namespace. - - Usage: - - This function is intended to shorten the need to import many - subpackages, say of scipy, constantly with statements such as - - import scipy.linalg, scipy.fftpack, scipy.etc... - - Instead, you can say: - - import scipy - scipy.pkgload('linalg','fftpack',...) - - or - - scipy.pkgload() - - to load all of them in one call. - - If a name which doesn't exist in scipy's namespace is - given, a warning is shown. - - Inputs: - - - the names (one or more strings) of all the numpy modules one - wishes to load into the top-level namespace. - - Optional keyword inputs: - - - verbose - integer specifying verbosity level [default: -1]. - verbose=-1 will suspend also warnings. - - force - when True, force reloading loaded packages - [default: False]. - - postpone - when True, don't load packages [default: False] - - If no input arguments are given, then all of scipy's subpackages - are imported. - - """ - frame = self.parent_frame - self.info_modules = {} - if options.get('force',False): - self.imported_packages = [] - self.verbose = verbose = options.get('verbose',-1) - postpone = options.get('postpone',None) - self._init_info_modules(packages or None) - - self.log('Imports to %r namespace\n----------------------------'\ - % self.parent_name) - - for package_name in self._get_sorted_names(): - if package_name in self.imported_packages: - continue - info_module = self.info_modules[package_name] - global_symbols = getattr(info_module,'global_symbols',[]) - postpone_import = getattr(info_module,'postpone_import',False) - if (postpone and not global_symbols) \ - or (postpone_import and postpone is not None): - self.log('__all__.append(%r)' % (package_name)) - if '.' not in package_name: - self.parent_export_names.append(package_name) - continue - - old_object = frame.f_locals.get(package_name,None) - - cmdstr = 'import '+package_name - if self._execcmd(cmdstr): - continue - self.imported_packages.append(package_name) - - if verbose!=-1: - new_object = frame.f_locals.get(package_name) - if old_object is not None and old_object is not new_object: - self.warn('Overwriting %s=%s (was %s)' \ - % (package_name,self._obj2repr(new_object), - self._obj2repr(old_object))) - - if '.' not in package_name: - self.parent_export_names.append(package_name) - - for symbol in global_symbols: - if symbol=='*': - symbols = eval('getattr(%s,"__all__",None)'\ - % (package_name), - frame.f_globals,frame.f_locals) - if symbols is None: - symbols = eval('dir(%s)' % (package_name), - frame.f_globals,frame.f_locals) - symbols = filter(lambda s:not s.startswith('_'),symbols) - else: - symbols = [symbol] - - if verbose!=-1: - old_objects = {} - for s in symbols: - if s in frame.f_locals: - old_objects[s] = frame.f_locals[s] - - cmdstr = 'from '+package_name+' import '+symbol - if self._execcmd(cmdstr): - continue - - if verbose!=-1: - for s,old_object in old_objects.items(): - new_object = frame.f_locals[s] - if new_object is not old_object: - self.warn('Overwriting %s=%s (was %s)' \ - % (s,self._obj2repr(new_object), - self._obj2repr(old_object))) - - if symbol=='*': - self.parent_export_names.extend(symbols) - else: - self.parent_export_names.append(symbol) - - return - - def _execcmd(self,cmdstr): - """ Execute command in parent_frame.""" - frame = self.parent_frame - try: - exec (cmdstr, frame.f_globals,frame.f_locals) - except Exception,msg: - self.error('%s -> failed: %s' % (cmdstr,msg)) - return True - else: - self.log('%s -> success' % (cmdstr)) - return - - def _obj2repr(self,obj): - """ Return repr(obj) with""" - module = getattr(obj,'__module__',None) - file = getattr(obj,'__file__',None) - if module is not None: - return repr(obj) + ' from ' + module - if file is not None: - return repr(obj) + ' from ' + file - return repr(obj) - - def log(self,mess): - if self.verbose>1: - print >> sys.stderr, str(mess) - def warn(self,mess): - if self.verbose>=0: - print >> sys.stderr, str(mess) - def error(self,mess): - if self.verbose!=-1: - print >> sys.stderr, str(mess) - - def _get_doc_title(self, info_module): - """ Get the title from a package info.py file. - """ - title = getattr(info_module,'__doc_title__',None) - if title is not None: - return title - title = getattr(info_module,'__doc__',None) - if title is not None: - title = title.lstrip().split('\n',1)[0] - return title - return '* Not Available *' - - def _format_titles(self,titles,colsep='---'): - display_window_width = 70 # How to determine the correct value in runtime?? - lengths = [len(name)-name.find('.')-1 for (name,title) in titles]+[0] - max_length = max(lengths) - lines = [] - for (name,title) in titles: - name = name[name.find('.')+1:] - w = max_length - len(name) - words = title.split() - line = '%s%s %s' % (name,w*' ',colsep) - tab = len(line) * ' ' - while words: - word = words.pop(0) - if len(line)+len(word)>display_window_width: - lines.append(line) - line = tab - line += ' ' + word - else: - lines.append(line) - return '\n'.join(lines) - - def get_pkgdocs(self): - """ Return documentation summary of subpackages. - """ - import sys - self.info_modules = {} - self._init_info_modules(None) - - titles = [] - symbols = [] - for package_name, info_module in self.info_modules.items(): - global_symbols = getattr(info_module,'global_symbols',[]) - fullname = self.parent_name +'.'+ package_name - note = '' - if fullname not in sys.modules: - note = ' [*]' - titles.append((fullname,self._get_doc_title(info_module) + note)) - if global_symbols: - symbols.append((package_name,', '.join(global_symbols))) - - retstr = self._format_titles(titles) +\ - '\n [*] - using a package requires explicit import (see pkgload)' - - - if symbols: - retstr += """\n\nGlobal symbols from subpackages"""\ - """\n-------------------------------\n""" +\ - self._format_titles(symbols,'-->') - - return retstr - -class PackageLoaderDebug(PackageLoader): - def _execcmd(self,cmdstr): - """ Execute command in parent_frame.""" - frame = self.parent_frame - print 'Executing',`cmdstr`,'...', - sys.stdout.flush() - exec (cmdstr, frame.f_globals,frame.f_locals) - print 'ok' - sys.stdout.flush() - return - -if int(os.environ.get('NUMPY_IMPORT_DEBUG','0')): - PackageLoader = PackageLoaderDebug diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py deleted file mode 100644 index ce4b43107..000000000 --- a/numpy/add_newdocs.py +++ /dev/null @@ -1,1444 +0,0 @@ -from lib import add_newdoc - -add_newdoc('numpy.core','dtype', - [('fields', "Fields of the data-type or None if no fields"), - ('names', "Names of fields or None if no fields"), - ('alignment', "Needed alignment for this data-type"), - ('byteorder', - "Little-endian (<), big-endian (>), native (=), or "\ - "not-applicable (|)"), - ('char', "Letter typecode for this data-type"), - ('type', "Type object associated with this data-type"), - ('kind', "Character giving type-family of this data-type"), - ('itemsize', "Size of each item"), - ('hasobject', "Non-zero if Python objects are in "\ - "this data-type"), - ('num', "Internally-used number for builtin base"), - ('newbyteorder', -"""self.newbyteorder() -returns a copy of the dtype object with altered byteorders. -If is not given all byteorders are swapped. -Otherwise endian can be '>', '<', or '=' to force a particular -byteorder. Data-types in all fields are also updated in the -new dtype object. -"""), - ("__reduce__", "self.__reduce__() for pickling"), - ("__setstate__", "self.__setstate__() for pickling"), - ("subdtype", "A tuple of (descr, shape) or None"), - ("descr", "The array_interface data-type descriptor."), - ("str", "The array interface typestring."), - ("name", "The name of the true data-type"), - ("base", "The base data-type or self if no subdtype"), - ("shape", "The shape of the subdtype or (1,)"), - ("isbuiltin", "Is this a built-in data-type?"), - ("isnative", "Is the byte-order of this data-type native?") - ] - ) - -############################################################################### -# -# flatiter -# -# flatiter needs a toplevel description -# -############################################################################### - -# attributes -add_newdoc('numpy.core', 'flatiter', ('base', - """documentation needed - - """)) - - - -add_newdoc('numpy.core', 'flatiter', ('coords', - """An N-d tuple of current coordinates. - - """)) - - - -add_newdoc('numpy.core', 'flatiter', ('index', - """documentation needed - - """)) - - - -# functions -add_newdoc('numpy.core', 'flatiter', ('__array__', - """__array__(type=None) Get array from iterator - - """)) - - -add_newdoc('numpy.core', 'flatiter', ('copy', - """copy() Get a copy of the iterator as a 1-d array - - """)) - - -############################################################################### -# -# broadcast -# -############################################################################### - -# attributes -add_newdoc('numpy.core', 'broadcast', ('index', - """current index in broadcasted result - - """)) - - -add_newdoc('numpy.core', 'broadcast', ('iters', - """tuple of individual iterators - - """)) - - -add_newdoc('numpy.core', 'broadcast', ('nd', - """number of dimensions of broadcasted result - - """)) - - -add_newdoc('numpy.core', 'broadcast', ('numiter', - """number of iterators - - """)) - - -add_newdoc('numpy.core', 'broadcast', ('shape', - """shape of broadcasted result - - """)) - - -add_newdoc('numpy.core', 'broadcast', ('size', - """total size of broadcasted result - - """)) - - -############################################################################### -# -# numpy functions -# -############################################################################### - -add_newdoc('numpy.core.multiarray','array', - """array(object, dtype=None, copy=1,order=None, subok=0,ndmin=0) - - Return an array from object with the specified date-type. - - Inputs: - object - an array, any object exposing the array interface, any - object whose __array__ method returns an array, or any - (nested) sequence. - dtype - The desired data-type for the array. If not given, then - the type will be determined as the minimum type required - to hold the objects in the sequence. This argument can only - be used to 'upcast' the array. For downcasting, use the - .astype(t) method. - copy - If true, then force a copy. Otherwise a copy will only occur - if __array__ returns a copy, obj is a nested sequence, or - a copy is needed to satisfy any of the other requirements - order - Specify the order of the array. If order is 'C', then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'FORTRAN', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). If order is None, then the returned array may - be in either C-, or Fortran-contiguous order or even - discontiguous. - subok - If True, then sub-classes will be passed-through, otherwise - the returned array will be forced to be a base-class array - ndmin - Specifies the minimum number of dimensions that the resulting - array should have. 1's will be pre-pended to the shape as - needed to meet this requirement. - - """) - -add_newdoc('numpy.core.multiarray','empty', - """empty((d1,...,dn),dtype=float,order='C') - - Return a new array of shape (d1,...,dn) and given type with all its - entries uninitialized. This can be faster than zeros. - - """) - - -add_newdoc('numpy.core.multiarray','scalar', - """scalar(dtype,obj) - - Return a new scalar array of the given type initialized with - obj. Mainly for pickle support. The dtype must be a valid data-type - descriptor. If dtype corresponds to an OBJECT descriptor, then obj - can be any object, otherwise obj must be a string. If obj is not given - it will be interpreted as None for object type and zeros for all other - types. - - """) - -add_newdoc('numpy.core.multiarray','zeros', - """zeros((d1,...,dn),dtype=float,order='C') - - Return a new array of shape (d1,...,dn) and type typecode with all - it's entries initialized to zero. - - """) - -add_newdoc('numpy.core.multiarray','set_typeDict', - """set_typeDict(dict) - - Set the internal dictionary that can look up an array type using a - registered code. - - """) - -add_newdoc('numpy.core.multiarray','fromstring', - """fromstring(string, dtype=float, count=-1, sep='') - - Return a new 1d array initialized from the raw binary data in string. - - If count is positive, the new array will have count elements, otherwise its - size is determined by the size of string. If sep is not empty then the - string is interpreted in ASCII mode and converted to the desired number type - using sep as the separator between elements (extra whitespace is ignored). - - """) - -add_newdoc('numpy.core.multiarray','fromiter', - """fromiter(iterable, dtype, count=-1) - - Return a new 1d array initialized from iterable. If count is - nonegative, the new array will have count elements, otherwise it's - size is determined by the generator. - - """) - -add_newdoc('numpy.core.multiarray','fromfile', - """fromfile(file=, dtype=float, count=-1, sep='') -> array. - - Required arguments: - file -- open file object or string containing file name. - - Keyword arguments: - dtype -- type and order of the returned array (default float) - count -- number of items to input (default all) - sep -- separater between items if file is a text file (default "") - - Return an array of the given data type from a text or binary file. The - 'file' argument can be an open file or a string with the name of a file to - read from. If 'count' == -1 the entire file is read, otherwise count is the - number of items of the given type to read in. If 'sep' is "" it means to - read binary data from the file using the specified dtype, otherwise it gives - the separator between elements in a text file. The 'dtype' value is also - used to determine the size and order of the items in binary files. - - - Data written using the tofile() method can be conveniently recovered using - this function. - - WARNING: This function should be used sparingly as the binary files are not - platform independent. In particular, they contain no endianess or datatype - information. Nevertheless it can be useful for reading in simply formatted - or binary data quickly. - - """) - -add_newdoc('numpy.core.multiarray','frombuffer', - """frombuffer(buffer=, dtype=float, count=-1, offset=0) - - Returns a 1-d array of data type dtype from buffer. The buffer - argument must be an object that exposes the buffer interface. If - count is -1 then the entire buffer is used, otherwise, count is the - size of the output. If offset is given then jump that far into the - buffer. If the buffer has data that is out not in machine byte-order, - than use a propert data type descriptor. The data will not be - byteswapped, but the array will manage it in future operations. - - """) - -add_newdoc('numpy.core.multiarray','concatenate', - """concatenate((a1, a2, ...), axis=0) - - Join arrays together. - - The tuple of sequences (a1, a2, ...) are joined along the given axis - (default is the first one) into a single numpy array. - - Example: - - >>> concatenate( ([0,1,2], [5,6,7]) ) - array([0, 1, 2, 5, 6, 7]) - - """) - -add_newdoc('numpy.core.multiarray','inner', - """inner(a,b) - - Returns the dot product of two arrays, which has shape a.shape[:-1] + - b.shape[:-1] with elements computed by the product of the elements - from the last dimensions of a and b. - - """) - -add_newdoc('numpy.core','fastCopyAndTranspose', - """_fastCopyAndTranspose(a)""") - -add_newdoc('numpy.core.multiarray','correlate', - """cross_correlate(a,v, mode=0)""") - -add_newdoc('numpy.core.multiarray','arange', - """arange([start,] stop[, step,], dtype=None) - - For integer arguments, just like range() except it returns an array - whose type can be specified by the keyword argument dtype. If dtype - is not specified, the type of the result is deduced from the type of - the arguments. - - For floating point arguments, the length of the result is ceil((stop - - start)/step). This rule may result in the last element of the result - being greater than stop. - - """) - -add_newdoc('numpy.core.multiarray','_get_ndarray_c_version', - """_get_ndarray_c_version() - - Return the compile time NDARRAY_VERSION number. - - """) - -add_newdoc('numpy.core.multiarray','_reconstruct', - """_reconstruct(subtype, shape, dtype) - - Construct an empty array. Used by Pickles. - - """) - - -add_newdoc('numpy.core.multiarray','set_string_function', - """set_string_function(f, repr=1) - - Set the python function f to be the function used to obtain a pretty - printable string version of an array whenever an array is printed. - f(M) should expect an array argument M, and should return a string - consisting of the desired representation of M for printing. - - """) - -add_newdoc('numpy.core.multiarray','set_numeric_ops', - """set_numeric_ops(op=func, ...) - - Set some or all of the number methods for all array objects. Do not - forget **dict can be used as the argument list. Return the functions - that were replaced, which can be stored and set later. - - """) - -add_newdoc('numpy.core.multiarray','where', - """where(condition, x, y) or where(condition) - - Return elements from `x` or `y`, depending on `condition`. - - *Parameters*: - condition : array of bool - When True, yield x, otherwise yield y. - x,y : 1-dimensional arrays - Values from which to choose. - - *Notes* - This is equivalent to - - [xv if c else yv for (c,xv,yv) in zip(condition,x,y)] - - The result is shaped like `condition` and has elements of `x` - or `y` where `condition` is respectively True or False. - - In the special case, where only `condition` is given, the - tuple condition.nonzero() is returned, instead. - - *Examples* - >>> where([True,False,True],[1,2,3],[4,5,6]) - array([1, 5, 3]) - - """) - - -add_newdoc('numpy.core.multiarray','lexsort', - """lexsort(keys=, axis=-1) -> array of indices. Argsort with list of keys. - - Perform an indirect sort using a list of keys. The first key is sorted, - then the second, and so on through the list of keys. At each step the - previous order is preserved when equal keys are encountered. The result is - a sort on multiple keys. If the keys represented columns of a spreadsheet, - for example, this would sort using multiple columns (the last key being - used for the primary sort order, the second-to-last key for the secondary - sort order, and so on). The keys argument must be a sequence of things - that can be converted to arrays of the same shape. - - *Parameters*: - - keys : (k,N) array or tuple of (N,) sequences - Array containing values that the returned indices should sort. - - axis : integer - Axis to be indirectly sorted. Default is -1 (i.e. last axis). - - *Returns*: - - indices : (N,) integer array - Array of indices that sort the keys along the specified axis. - - *See Also*: - - `argsort` : indirect sort - `sort` : inplace sort - - *Examples* - - >>> a = [1,5,1,4,3,6,7] - >>> b = [9,4,0,4,0,4,3] - >>> ind = lexsort((b,a)) - >>> print ind - [2 0 4 3 1 5 6] - >>> print take(a,ind) - [1 1 3 4 5 6 7] - >>> print take(b,ind) - [0 9 0 4 4 4 3] - - """) - -add_newdoc('numpy.core.multiarray','can_cast', - """can_cast(from=d1, to=d2) - - Returns True if data type d1 can be cast to data type d2 without - losing precision. - - """) - -add_newdoc('numpy.core.multiarray','newbuffer', - """newbuffer(size) - - Return a new uninitialized buffer object of size bytes - - """) - -add_newdoc('numpy.core.multiarray','getbuffer', - """getbuffer(obj [,offset[, size]]) - - Create a buffer object from the given object referencing a slice of - length size starting at offset. Default is the entire buffer. A - read-write buffer is attempted followed by a read-only buffer. - - """) - -############################################################################## -# -# Documentation for ndarray attributes and methods -# -############################################################################## - - -############################################################################## -# -# ndarray object -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', - """An array object represents a multidimensional, homogeneous array - of fixed-size items. An associated data-type-descriptor object - details the data-type in an array (including byteorder and any - fields). An array can be constructed using the numpy.array - command. Arrays are sequence, mapping and numeric objects. - More information is available in the numpy module and by looking - at the methods and attributes of an array. - - ndarray.__new__(subtype, shape=, dtype=float, buffer=None, - offset=0, strides=None, order=None) - - There are two modes of creating an array using __new__: - 1) If buffer is None, then only shape, dtype, and order - are used - 2) If buffer is an object exporting the buffer interface, then - all keywords are interpreted. - The dtype parameter can be any object that can be interpreted - as a numpy.dtype object. - - No __init__ method is needed because the array is fully - initialized after the __new__ method. - - """) - - -############################################################################## -# -# ndarray attributes -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', - """Array protocol: Python side.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', - """None.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', - """Array priority.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', - """Array protocol: C-struct side.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_', - """Allow the array to be interpreted as a ctypes object by returning the - data-memory location as an integer - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('base', - """Base object if memory is from some other object. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', - """A ctypes interface object. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('data', - """Buffer object pointing to the start of the data. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', - """Data-type for the array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', - """Imaginary part of the array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', - """Length of one element in bytes. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', - """Special object providing array flags. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', - """A 1-d flat iterator. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', - """Number of bytes in the array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', - """Number of array dimensions. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('real', - """Real part of the array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', - """Tuple of array dimensions. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('size', - """Number of elements in the array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', - """Tuple of bytes to step in each dimension. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('T', - """Same as self.transpose() except self is returned for self.ndim < 2. - - """)) - - -############################################################################## -# -# ndarray methods -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', - """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. - - Returns either a new reference to self if dtype is not given or a new array - of provided data type if dtype is different from the current dtype of the - array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', - """a.__array_wrap__(obj) -> Object of same type as a from ndarray obj. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', - """a.__copy__(|order) -> copy, possibly with different order. - - Return a copy of the array. - - Argument: - order -- Order of returned copy (default 'C') - If order is 'C' (False) then the result is contiguous (default). - If order is 'Fortran' (True) then the result has fortran order. - If order is 'Any' (None) then the result has fortran order - only if m is already in fortran order.; - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', - """a.__deepcopy__() -> Deep copy of array. - - Used if copy.deepcopy is called on an array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', - """a.__reduce__() - - For pickling. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', - """a.__setstate__(version, shape, typecode, isfortran, rawdata) - - For unpickling. - - Arguments: - version -- optional pickle version. If omitted defaults to 0. - shape -- a tuple giving the shape - typecode -- a typecode - isFortran -- a bool stating if Fortran or no - rawdata -- a binary string with the data (or a list if Object array) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('all', - """ a.all(axis=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('any', - """ a.any(axis=None, out=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', - """ a.argmax(axis=None, out=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', - """ a.argmin(axis=None, out=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', - """a.argsort(axis=-1, kind='quicksort', order=None) -> indices - - Perform an indirect sort along the given axis using the algorithm specified - by the kind keyword. It returns an array of indices of the same shape as - 'a' that index data along the given axis in sorted order. - - :Parameters: - - axis : integer - Axis to be indirectly sorted. None indicates that the flattened - array should be used. Default is -1. - - kind : string - Sorting algorithm to use. Possible values are 'quicksort', - 'mergesort', or 'heapsort'. Default is 'quicksort'. - - order : list type or None - When a is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - :Returns: - - indices : integer array - Array of indices that sort 'a' along the specified axis. - - :SeeAlso: - - - lexsort : indirect stable sort with multiple keys - - sort : inplace sort - - :Notes: - ------ - - The various sorts are characterized by average speed, worst case - performance, need for work space, and whether they are stable. A stable - sort keeps items with the same key in the same relative order. The three - available algorithms have the following properties: - - |------------------------------------------------------| - | kind | speed | worst case | work space | stable| - |------------------------------------------------------| - |'quicksort'| 1 | O(n^2) | 0 | no | - |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | - |'heapsort' | 3 | O(n*log(n)) | 0 | no | - |------------------------------------------------------| - - All the sort algorithms make temporary copies of the data when the sort is not - along the last axis. Consequently, sorts along the last axis are faster and use - less space than sorts along other axis. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', - """a.astype(t) -> Copy of array cast to type t. - - Cast array m to type t. t can be either a string representing a typecode, - or a python type object of type int, float, or complex. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', - """a.byteswap(False) -> View or copy. Swap the bytes in the array. - - Swap the bytes in the array. Return the byteswapped array. If the first - argument is True, byteswap in-place and return a reference to self. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', - """ a.choose(b0, b1, ..., bn, out=None, mode='raise') - - Return an array that merges the b_i arrays together using 'a' as - the index The b_i arrays and 'a' must all be broadcastable to the - same shape. The output at a particular position is the input - array b_i at that position depending on the value of 'a' at that - position. Therefore, 'a' must be an integer array with entries - from 0 to n+1.; - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', - """a.clip(min=, max=, out=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', - """a.compress(condition=, axis=None, out=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', - """a.conj() - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', - """a.conjugate() - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', - """a.copy(|order) -> copy, possibly with different order. - - Return a copy of the array. - - Argument: - order -- Order of returned copy (default 'C') - If order is 'C' (False) then the result is contiguous (default). - If order is 'Fortran' (True) then the result has fortran order. - If order is 'Any' (None) then the result has fortran order - only if m is already in fortran order.; - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', - """a.cumprod(axis=None, dtype=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', - """a.cumsum(axis=None, dtype=None, out=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', - """a.diagonal(offset=0, axis1=0, axis2=1) -> diagonals - - If a is 2-d, return the diagonal of self with the given offset, i.e., the - collection of elements of the form a[i,i+offset]. If a is n-d with n > 2, - then the axes specified by axis1 and axis2 are used to determine the 2-d - subarray whose diagonal is returned. The shape of the resulting array can - be determined by removing axis1 and axis2 and appending an index to the - right equal to the size of the resulting diagonals. - - :Parameters: - offset : integer - Offset of the diagonal from the main diagonal. Can be both positive - and negative. Defaults to main diagonal. - axis1 : integer - Axis to be used as the first axis of the 2-d subarrays from which - the diagonals should be taken. Defaults to first index. - axis2 : integer - Axis to be used as the second axis of the 2-d subarrays from which - the diagonals should be taken. Defaults to second index. - - :Returns: - array_of_diagonals : same type as original array - If a is 2-d, then a 1-d array containing the diagonal is returned. - If a is n-d, n > 2, then an array of diagonals is returned. - - :SeeAlso: - - diag : matlab workalike for 1-d and 2-d arrays. - - diagflat : creates diagonal arrays - - trace : sum along diagonals - - Examples - -------- - - >>> a = arange(4).reshape(2,2) - >>> a - array([[0, 1], - [2, 3]]) - >>> a.diagonal() - array([0, 3]) - >>> a.diagonal(1) - array([1]) - - >>> a = arange(8).reshape(2,2,2) - >>> a - array([[[0, 1], - [2, 3]], - - [[4, 5], - [6, 7]]]) - >>> a.diagonal(0,-2,-1) - array([[0, 3], - [4, 7]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', - """a.dump(file) Dump a pickle of the array to the specified file. - - The array can be read back with pickle.load or numpy.load - - Arguments: - file -- string naming the dump file. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', - """a.dumps() returns the pickle of the array as a string. - - pickle.loads or numpy.loads will convert the string back to an array. - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', - """a.fill(value) -> None. Fill the array with the scalar value. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', - """a.flatten([fortran]) return a 1-d array (always copy) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', - """a.getfield(dtype, offset) -> field of array as given type. - - Returns a field of the given array as a certain type. A field is a view of - the array data with each itemsize determined by the given type and the - offset into the current array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('item', - """a.item() -> copy of first array item as Python scalar. - - Copy the first element of array to a standard Python scalar and return - it. The array must be of size one. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('max', - """a.max(axis=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', - """a.mean(axis=None, dtype=None, out=None) -> mean - - Returns the average of the array elements. The average is taken over the - flattened array by default, otherwise over the specified axis. - - :Parameters: - - axis : integer - Axis along which the means are computed. The default is - to compute the standard deviation of the flattened array. - - dtype : type - Type to use in computing the means. For arrays of - integer type the default is float32, for arrays of float types it - is the same as the array type. - - out : ndarray - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - :Returns: - - mean : The return type varies, see above. - A new array holding the result is returned unless out is specified, - in which case a reference to out is returned. - - :SeeAlso: - - - var : variance - - std : standard deviation - - Notes - ----- - - The mean is the sum of the elements along the axis divided by the - number of elements. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('min', - """a.min(axis=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', - """a.newbyteorder() is equivalent to - a.view(a.dtype.newbytorder()) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', - """a.nonzero() returns a tuple of arrays - - Returns a tuple of arrays, one for each dimension of a, - containing the indices of the non-zero elements in that - dimension. The corresponding non-zero values can be obtained - with - a[a.nonzero()]. - - To group the indices by element, rather than dimension, use - transpose(a.nonzero()) - instead. The result of this is always a 2d array, with a row for - each non-zero element.; - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', - """a.prod(axis=None, dtype=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', - """a.ptp(axis=None) a.max(axis)-a.min(axis) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('put', - """a.put(indices, values, mode) sets a.flat[n] = values[n] for - each n in indices. If values is shorter than indices then it - will repeat. - """)) - - -add_newdoc('numpy.core.multiarray', 'putmask', - """putmask(a, mask, values) sets a.flat[n] = values[n] for each n where - mask.flat[n] is true. If values is not the same size of a and mask then - it will repeat. This gives different behavior than a[mask] = values. - """) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', - """a.ravel([fortran]) return a 1-d array (copy only if needed) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', - """a.repeat(repeats=, axis=none) - - copy elements of a, repeats times. the repeats argument must be a sequence - of length a.shape[axis] or a scalar. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', - """a.reshape(d1, d2, ..., dn, order='c') - - Return a new array from this one. The new array must have the same number - of elements as self. Also always returns a view or raises a ValueError if - that is impossible. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', - """a.resize(new_shape, refcheck=True, order=False) -> None. Change array shape. - - Change size and shape of self inplace. Array must own its own memory and - not be referenced by other arrays. Returns None. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('round', - """a.round(decimals=0, out=None) -> out (a). Rounds to 'decimals' places. - - Keyword arguments: - decimals -- number of decimals to round to (default 0). May be negative. - out -- existing array to use for output (default a). - - Return: - Reference to out, where None specifies the original array a. - - Round to the specified number of decimals. When 'decimals' is negative it - specifies the number of positions to the left of the decimal point. The - real and imaginary parts of complex numbers are rounded separately. Nothing - is done if the array is not of float type and 'decimals' is >= 0. - - The keyword 'out' may be used to specify a different array to hold the - result rather than the default 'a'. If the type of the array specified by - 'out' differs from that of 'a', the result is cast to the new type, - otherwise the original type is kept. Floats round to floats by default. - - Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to - 0.0, etc. Results may also be surprising due to the inexact representation - of decimal fractions in IEEE floating point and the errors introduced in - scaling the numbers when 'decimals' is something other than 0. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', - """a.searchsorted(v, side='left') -> index array. - - Find the indices into a sorted array such that if the corresponding keys in - v were inserted before the indices the order of a would be preserved. If - side='left', then the first such index is returned. If side='right', then - the last such index is returned. If there is no such index because the key - is out of bounds, then the length of a is returned, i.e., the key would - need to be appended. The returned index array has the same shape as v. - - :Parameters: - - v : array or list type - Array of keys to be searched for in a. - - side : string - Possible values are : 'left', 'right'. Default is 'left'. Return - the first or last index where the key could be inserted. - - :Returns: - - indices : integer array - The returned array has the same shape as v. - - :SeeAlso: - - - sort - - histogram - - :Notes: - ------- - - The array a must be 1-d and is assumed to be sorted in ascending order. - Searchsorted uses binary search to find the required insertion points. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', - """m.setfield(value, dtype, offset) -> None. - places val into field of the given array defined by the data type and offset. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', - """a.setflags(write=None, align=None, uic=None) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', - """a.sort(axis=-1, kind='quicksort', order=None) -> None. - - Perform an inplace sort along the given axis using the algorithm specified - by the kind keyword. - - :Parameters: - - axis : integer - Axis to be sorted along. None indicates that the flattened array - should be used. Default is -1. - - kind : string - Sorting algorithm to use. Possible values are 'quicksort', - 'mergesort', or 'heapsort'. Default is 'quicksort'. - - order : list type or None - When a is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - :Returns: - - None - - :SeeAlso: - - - argsort : indirect sort - - lexsort : indirect stable sort on multiple keys - - searchsorted : find keys in sorted array - - :Notes: - ------ - - The various sorts are characterized by average speed, worst case - performance, need for work space, and whether they are stable. A stable - sort keeps items with the same key in the same relative order. The three - available algorithms have the following properties: - - |------------------------------------------------------| - | kind | speed | worst case | work space | stable| - |------------------------------------------------------| - |'quicksort'| 1 | O(n^2) | 0 | no | - |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | - |'heapsort' | 3 | O(n*log(n)) | 0 | no | - |------------------------------------------------------| - - All the sort algorithms make temporary copies of the data when the sort is not - along the last axis. Consequently, sorts along the last axis are faster and use - less space than sorts along other axis. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', - """m.squeeze() eliminate all length-1 dimensions - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('std', - """a.std(axis=None, dtype=None, out=None) -> standard deviation. - - Returns the standard deviation of the array elements, a measure of the - spread of a distribution. The standard deviation is computed for the - flattened array by default, otherwise over the specified axis. - - :Parameters: - - axis : integer - Axis along which the standard deviation is computed. The default is - to compute the standard deviation of the flattened array. - - dtype : type - Type to use in computing the standard deviation. For arrays of - integer type the default is float32, for arrays of float types it - is the same as the array type. - - out : ndarray - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - :Returns: - - standard deviation : The return type varies, see above. - A new array holding the result is returned unless out is specified, - in which case a reference to out is returned. - - :SeeAlso: - - - var : variance - - mean : average - - Notes - ----- - - The standard deviation is the square root of the average of the squared - deviations from the mean, i.e. var = sqrt(mean((x - x.mean())**2)). The - computed standard deviation is biased, i.e., the mean is computed by - dividing by the number of elements, N, rather than by N-1. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', - """a.sum(axis=None, dtype=None) -> Sum of array over given axis. - - Sum the array over the given axis. If the axis is None, sum over - all dimensions of the array. - - The optional dtype argument is the data type for the returned - value and intermediate calculations. The default is to upcast - (promote) smaller integer types to the platform-dependent int. - For example, on 32-bit platforms: - - a.dtype default sum dtype - --------------------------------------------------- - bool, int8, int16, int32 int32 - - Warning: The arithmetic is modular and no error is raised on overflow. - - Examples: - - >>> array([0.5, 1.5]).sum() - 2.0 - >>> array([0.5, 1.5]).sum(dtype=int32) - 1 - >>> array([[0, 1], [0, 5]]).sum(axis=0) - array([0, 6]) - >>> array([[0, 1], [0, 5]]).sum(axis=1) - array([1, 5]) - >>> ones(128, dtype=int8).sum(dtype=int8) # overflow! - -128 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', - """a.swapaxes(axis1, axis2) -> new view with axes swapped. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('take', - """a.take(indices, axis=None, out=None, mode='raise') -> new array. - - The new array is formed from the elements of a indexed by indices along the - given axis. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', - """a.tofile(fid, sep="", format="%s") -> None. Write the data to a file. - - Required arguments: - file -- an open file object or a string containing a filename - - Keyword arguments: - sep -- separator for text output. Write binary if empty (default "") - format -- format string for text file output (default "%s") - - A convenience function for quick storage of array data. Information on - endianess and precision is lost, so this method is not a good choice for - files intended to archive data or transport data between machines with - different endianess. Some of these problems can be overcome by outputting - the data as text files at the expense of speed and file size. - - If 'sep' is empty this method is equivalent to file.write(a.tostring()). If - 'sep' is not empty each data item is converted to the nearest Python type - and formatted using "format"%item. The resulting strings are written to the - file separated by the contents of 'sep'. The data is always written in "C" - (row major) order independent of the order of 'a'. - - The data produced by this method can be recovered by using the function - fromfile(). - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', - """a.tolist() -> Array as hierarchical list. - - Copy the data portion of the array to a hierarchical python list and return - that list. Data items are converted to the nearest compatible Python type. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', - """a.tostring(order='C') -> raw copy of array data as a Python string. - - Keyword arguments: - order -- order of the data item in the copy {"C","F","A"} (default "C") - - Construct a Python string containing the raw bytes in the array. The order - of the data in arrays with ndim > 1 is specified by the 'order' keyword and - this keyword overrides the order of the array. The - choices are: - - "C" -- C order (row major) - "Fortran" -- Fortran order (column major) - "Any" -- Current order of array. - None -- Same as "Any" - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', - """a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) - return the sum along the offset diagonal of the array's indicated - axis1 and axis2. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', - """a.transpose(*axes) - - Returns a view of 'a' with axes transposed. If no axes are given, - or None is passed, switches the order of the axes. For a 2-d - array, this is the usual matrix transpose. If axes are given, - they describe how the axes are permuted. - - Example: - >>> a = array([[1,2],[3,4]]) - >>> a - array([[1, 2], - [3, 4]]) - >>> a.transpose() - array([[1, 3], - [2, 4]]) - >>> a.transpose((1,0)) - array([[1, 3], - [2, 4]]) - >>> a.transpose(1,0) - array([[1, 3], - [2, 4]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('var', - """a.var(axis=None, dtype=None, out=None) -> variance - - Returns the variance of the array elements, a measure of the spread of a - distribution. The variance is computed for the flattened array by default, - otherwise over the specified axis. - - :Parameters: - - axis : integer - Axis along which the variance is computed. The default is to - compute the variance of the flattened array. - - dtype : type - Type to use in computing the variance. For arrays of integer type - the default is float32, for arrays of float types it is the same as - the array type. - - out : ndarray - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - :Returns: - - variance : The return type varies, see above. - A new array holding the result is returned unless out is specified, - in which case a reference to out is returned. - - :SeeAlso: - - - std : standard deviation - - mean: average - - Notes - ----- - - The variance is the average of the squared deviations from the mean, i.e. - var = mean((x - x.mean())**2). The computed variance is biased, i.e., - the mean is computed by dividing by the number of elements, N, rather - than by N-1. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('view', - """a.view() -> new view of array with same data. - - Type can be either a new sub-type object or a data-descriptor object - - """)) diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py deleted file mode 100644 index 4d22394d5..000000000 --- a/numpy/core/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ - -from info import __doc__ -from numpy.version import version as __version__ - -import multiarray -import umath -import _internal # for freeze programs -import numerictypes as nt -multiarray.set_typeDict(nt.sctypeDict) -import _sort -from numeric import * -from fromnumeric import * -from defmatrix import * -import ma -import defchararray as char -import records as rec -from records import * -from memmap import * -from defchararray import * -import scalarmath -del nt - -from fromnumeric import amax as max, amin as min, \ - round_ as round -from numeric import absolute as abs - -__all__ = ['char','rec','memmap','ma'] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += defmatrix.__all__ -__all__ += rec.__all__ -__all__ += char.__all__ - - - -def test(level=1, verbosity=1): - from numpy.testing import NumpyTest - return NumpyTest().test(level, verbosity) diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py deleted file mode 100644 index b6e153580..000000000 --- a/numpy/core/_internal.py +++ /dev/null @@ -1,289 +0,0 @@ -#A place for code to be called from C-code -# that implements more complicated stuff. - -import re -import sys - -if (sys.byteorder == 'little'): - _nbo = '<' -else: - _nbo = '>' - -def _makenames_list(adict): - from multiarray import dtype - allfields = [] - fnames = adict.keys() - for fname in fnames: - obj = adict[fname] - n = len(obj) - if not isinstance(obj, tuple) or n not in [2,3]: - raise ValueError, "entry not a 2- or 3- tuple" - if (n > 2) and (obj[2] == fname): - continue - num = int(obj[1]) - if (num < 0): - raise ValueError, "invalid offset." - format = dtype(obj[0]) - if (format.itemsize == 0): - raise ValueError, "all itemsizes must be fixed." - if (n > 2): - title = obj[2] - else: - title = None - allfields.append((fname, format, num, title)) - # sort by offsets - allfields.sort(lambda x,y: cmp(x[2],y[2])) - names = [x[0] for x in allfields] - formats = [x[1] for x in allfields] - offsets = [x[2] for x in allfields] - titles = [x[3] for x in allfields] - - return names, formats, offsets, titles - -# Called in PyArray_DescrConverter function when -# a dictionary without "names" and "formats" -# fields is used as a data-type descriptor. -def _usefields(adict, align): - from multiarray import dtype - try: - names = adict[-1] - except KeyError: - names = None - if names is None: - names, formats, offsets, titles = _makenames_list(adict) - else: - formats = [] - offsets = [] - titles = [] - for name in names: - res = adict[name] - formats.append(res[0]) - offsets.append(res[1]) - if (len(res) > 2): - titles.append(res[2]) - else: - titles.append(None) - - return dtype({"names" : names, - "formats" : formats, - "offsets" : offsets, - "titles" : titles}, align) - - -# construct an array_protocol descriptor list -# from the fields attribute of a descriptor -# This calls itself recursively but should eventually hit -# a descriptor that has no fields and then return -# a simple typestring - -def _array_descr(descriptor): - fields = descriptor.fields - if fields is None: - return descriptor.str - - names = descriptor.names - ordered_fields = [fields[x] + (x,) for x in names] - result = [] - offset = 0 - for field in ordered_fields: - if field[1] > offset: - num = field[1] - offset - result.append(('','|V%d' % num)) - offset += num - if len(field) > 3: - name = (field[2],field[3]) - else: - name = field[2] - if field[0].subdtype: - tup = (name, _array_descr(field[0].subdtype[0]), - field[0].subdtype[1]) - else: - tup = (name, _array_descr(field[0])) - offset += field[0].itemsize - result.append(tup) - - return result - -# Build a new array from the information in a pickle. -# Note that the name numpy.core._internal._reconstruct is embedded in -# pickles of ndarrays made with NumPy before release 1.0 -# so don't remove the name here, or you'll -# break backward compatibilty. -def _reconstruct(subtype, shape, dtype): - from multiarray import ndarray - return ndarray.__new__(subtype, shape, dtype) - - -# format_re and _split were taken from numarray by J. Todd Miller - -def _split(input): - """Split the input formats string into field formats without splitting - the tuple used to specify multi-dimensional arrays.""" - - newlist = [] - hold = '' - - listinput = input.split(',') - for element in listinput: - if hold != '': - item = hold + ',' + element - else: - item = element - left = item.count('(') - right = item.count(')') - - # if the parenthesis is not balanced, hold the string - if left > right : - hold = item - - # when balanced, append to the output list and reset the hold - elif left == right: - newlist.append(item.strip()) - hold = '' - - # too many close parenthesis is unacceptable - else: - raise SyntaxError, item - - # if there is string left over in hold - if hold != '': - raise SyntaxError, hold - - return newlist - -format_re = re.compile(r'(?P[<>|=]?)(?P *[(]?[ ,0-9]*[)]? *)(?P[<>|=]?)(?P[A-Za-z0-9.]*)') - -# astr is a string (perhaps comma separated) - -_convorder = {'=': _nbo, - '|': '|', - '>': '>', - '<': '<'} - -def _commastring(astr): - res = _split(astr) - if (len(res)) < 1: - raise ValueError, "unrecognized formant" - result = [] - for k,item in enumerate(res): - # convert item - try: - (order1, repeats, order2, dtype) = format_re.match(item).groups() - except (TypeError, AttributeError): - raise ValueError('format %s is not recognized' % item) - - if order2 == '': - order = order1 - elif order1 == '': - order = order2 - else: - order1 = _convorder[order1] - order2 = _convorder[order2] - if (order1 != order2): - raise ValueError('in-consistent byte-order specification %s and %s' % (order1, order2)) - order = order1 - - if order in ['|', '=', _nbo]: - order = '' - dtype = '%s%s' % (order, dtype) - if (repeats == ''): - newitem = dtype - else: - newitem = (dtype, eval(repeats)) - result.append(newitem) - - return result - -def _getintp_ctype(): - from multiarray import dtype - val = _getintp_ctype.cache - if val is not None: - return val - char = dtype('p').char - import ctypes - if (char == 'i'): - val = ctypes.c_int - elif char == 'l': - val = ctypes.c_long - elif char == 'q': - val = ctypes.c_longlong - else: - val = ctypes.c_long - _getintp_ctype.cache = val - return val -_getintp_ctype.cache = None - -# Used for .ctypes attribute of ndarray - -class _missing_ctypes(object): - def cast(self, num, obj): - return num - - def c_void_p(self, num): - return num - -class _ctypes(object): - def __init__(self, array, ptr=None): - try: - import ctypes - self._ctypes = ctypes - except ImportError: - self._ctypes = _missing_ctypes() - self._arr = array - self._data = ptr - if self._arr.ndim == 0: - self._zerod = True - else: - self._zerod = False - - def data_as(self, obj): - return self._ctypes.cast(self._data, obj) - - def shape_as(self, obj): - if self._zerod: - return None - return (obj*self._arr.ndim)(*self._arr.shape) - - def strides_as(self, obj): - if self._zerod: - return None - return (obj*self._arr.ndim)(*self._arr.strides) - - def get_data(self): - return self._data - - def get_shape(self): - if self._zerod: - return None - return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape) - - def get_strides(self): - if self._zerod: - return None - return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides) - - def get_as_parameter(self): - return self._ctypes.c_void_p(self._data) - - data = property(get_data, None, doc="c-types data") - shape = property(get_shape, None, doc="c-types shape") - strides = property(get_strides, None, doc="c-types strides") - _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_") - - -# Given a datatype and an order object -# return a new names tuple -# with the order indicated -def _newnames(datatype, order): - oldnames = datatype.names - nameslist = list(oldnames) - if isinstance(order, str): - order = [order] - if isinstance(order, (list, tuple)): - for name in order: - try: - nameslist.remove(name) - except ValueError: - raise ValueError, "unknown field name: %s" % (name,) - return tuple(list(order) + nameslist) - raise ValueError, "unsupported order value: %s" % (order,) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py deleted file mode 100644 index 30035bbff..000000000 --- a/numpy/core/arrayprint.py +++ /dev/null @@ -1,452 +0,0 @@ -"""Array printing function - -$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ -""" -__all__ = ["array2string", "set_printoptions", "get_printoptions"] -__docformat__ = 'restructuredtext' - -# -# Written by Konrad Hinsen -# last revision: 1996-3-13 -# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) -# and by Perry Greenfield 2000-4-1 for numarray -# and by Travis Oliphant 2005-8-22 for numpy - -import sys -import numeric as _nc -import numerictypes as _nt -from umath import maximum, minimum, absolute, not_equal, isnan, isinf -from multiarray import format_longfloat -from fromnumeric import ravel - -def product(x, y): return x*y - -_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension -_summaryThreshold = 1000 # total items > triggers array summarization - -_float_output_precision = 8 -_float_output_suppress_small = False -_line_width = 75 -_nan_str = 'NaN' -_inf_str = 'Inf' - - -def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, - nanstr=None, infstr=None): - """Set options associated with printing. - - :Parameters: - precision : int - Number of digits of precision for floating point output (default 8). - threshold : int - Total number of array elements which trigger summarization - rather than full repr (default 1000). - edgeitems : int - Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth : int - The number of characters per line for the purpose of inserting - line breaks (default 75). - suppress : bool - Whether or not suppress printing of small floating point values - using scientific notation (default False). - nanstr : string - String representation of floating point not-a-number (default nan). - infstr : string - String representation of floating point infinity (default inf). - """ - - global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \ - _line_width, _float_output_suppress_small, _nan_str, _inf_str - if linewidth is not None: - _line_width = linewidth - if threshold is not None: - _summaryThreshold = threshold - if edgeitems is not None: - _summaryEdgeItems = edgeitems - if precision is not None: - _float_output_precision = precision - if suppress is not None: - _float_output_suppress_small = not not suppress - if nanstr is not None: - _nan_str = nanstr - if infstr is not None: - _inf_str = infstr - -def get_printoptions(): - """Return the current print options. - - :Returns: - dictionary of current print options with keys - - precision : int - - threshold : int - - edgeitems : int - - linewidth : int - - suppress : bool - - nanstr : string - - infstr : string - - :SeeAlso: - - set_printoptions : parameter descriptions - """ - d = dict(precision=_float_output_precision, - threshold=_summaryThreshold, - edgeitems=_summaryEdgeItems, - linewidth=_line_width, - suppress=_float_output_suppress_small, - nanstr=_nan_str, - infstr=_inf_str) - return d - -def _leading_trailing(a): - if a.ndim == 1: - if len(a) > 2*_summaryEdgeItems: - b = _nc.concatenate((a[:_summaryEdgeItems], - a[-_summaryEdgeItems:])) - else: - b = a - else: - if len(a) > 2*_summaryEdgeItems: - l = [_leading_trailing(a[i]) for i in range( - min(len(a), _summaryEdgeItems))] - l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), _summaryEdgeItems),0,-1)]) - else: - l = [_leading_trailing(a[i]) for i in range(0, len(a))] - b = _nc.concatenate(tuple(l)) - return b - -def _boolFormatter(x): - if x: return ' True' - else: return 'False' - - -def _array2string(a, max_line_width, precision, suppress_small, separator=' ', - prefix=""): - - if max_line_width is None: - max_line_width = _line_width - - if precision is None: - precision = _float_output_precision - - if suppress_small is None: - suppress_small = _float_output_suppress_small - - if a.size > _summaryThreshold: - summary_insert = "..., " - data = _leading_trailing(a) - else: - summary_insert = "" - data = ravel(a) - - try: - format_function = a._format - except AttributeError: - dtypeobj = a.dtype.type - if issubclass(dtypeobj, _nt.bool_): - # make sure True and False line up. - format_function = _boolFormatter - elif issubclass(dtypeobj, _nt.integer): - max_str_len = max(len(str(maximum.reduce(data))), - len(str(minimum.reduce(data)))) - format = '%' + str(max_str_len) + 'd' - format_function = lambda x: _formatInteger(x, format) - elif issubclass(dtypeobj, _nt.floating): - if issubclass(dtypeobj, _nt.longfloat): - format_function = _longfloatFormatter(precision) - else: - format_function = FloatFormat(data, precision, suppress_small) - elif issubclass(dtypeobj, _nt.complexfloating): - if issubclass(dtypeobj, _nt.clongfloat): - format_function = _clongfloatFormatter(precision) - else: - format_function = ComplexFormat(data, precision, suppress_small) - elif issubclass(dtypeobj, _nt.unicode_) or \ - issubclass(dtypeobj, _nt.string_): - format_function = repr - else: - format_function = str - - next_line_prefix = " " # skip over "[" - next_line_prefix += " "*len(prefix) # skip over array( - - lst = _formatArray(a, format_function, len(a.shape), max_line_width, - next_line_prefix, separator, - _summaryEdgeItems, summary_insert)[:-1] - - return lst - -def _convert_arrays(obj): - newtup = [] - for k in obj: - if isinstance(k, _nc.ndarray): - k = k.tolist() - elif isinstance(k, tuple): - k = _convert_arrays(k) - newtup.append(k) - return tuple(newtup) - - -def array2string(a, max_line_width = None, precision = None, - suppress_small = None, separator=' ', prefix="", - style=repr): - """Return a string representation of an array. - - :Parameters: - a : ndarray - Input array. - max_line_width : int - The maximum number of columns the string should span. Newline - characters splits the string appropriately after array elements. - precision : int - Floating point precision. - suppress_small : bool - Represent very small numbers as zero. - separator : string - Inserted between elements. - prefix : string - An array is typically printed as - - 'prefix(' + array2string(a) + ')' - - The length of the prefix string is used to align the - output correctly. - style : function - - Examples - -------- - - >>> x = N.array([1e-16,1,2,3]) - >>> print array2string(x,precision=2,separator=',',suppress_small=True) - [ 0., 1., 2., 3.] - - """ - - if a.shape == (): - x = a.item() - try: - lst = a._format(x) - except AttributeError: - if isinstance(x, tuple): - x = _convert_arrays(x) - lst = style(x) - elif reduce(product, a.shape) == 0: - # treat as a null array if any of shape elements == 0 - lst = "[]" - else: - lst = _array2string(a, max_line_width, precision, suppress_small, - separator, prefix) - return lst - -def _extendLine(s, line, word, max_line_len, next_line_prefix): - if len(line.rstrip()) + len(word.rstrip()) >= max_line_len: - s += line.rstrip() + "\n" - line = next_line_prefix - line += word - return s, line - - -def _formatArray(a, format_function, rank, max_line_len, - next_line_prefix, separator, edge_items, summary_insert): - """formatArray is designed for two modes of operation: - - 1. Full output - - 2. Summarized output - - """ - if rank == 0: - obj = a.item() - if isinstance(obj, tuple): - obj = _convert_arrays(obj) - return str(obj) - - if summary_insert and 2*edge_items < len(a): - leading_items, trailing_items, summary_insert1 = \ - edge_items, edge_items, summary_insert - else: - leading_items, trailing_items, summary_insert1 = 0, len(a), "" - - if rank == 1: - s = "" - line = next_line_prefix - for i in xrange(leading_items): - word = format_function(a[i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - if summary_insert1: - s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix) - - for i in xrange(trailing_items, 1, -1): - word = format_function(a[-i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - word = format_function(a[-1]) - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - s += line + "]\n" - s = '[' + s[len(next_line_prefix):] - else: - s = '[' - sep = separator.rstrip() - for i in xrange(leading_items): - if i > 0: - s += next_line_prefix - s += _formatArray(a[i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) - - if summary_insert1: - s += next_line_prefix + summary_insert1 + "\n" - - for i in xrange(trailing_items, 1, -1): - if leading_items or i != trailing_items: - s += next_line_prefix - s += _formatArray(a[-i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) - if leading_items or trailing_items > 1: - s += next_line_prefix - s += _formatArray(a[-1], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert).rstrip()+']\n' - return s - -class FloatFormat(object): - def __init__(self, data, precision, suppress_small, sign=False): - self.precision = precision - self.suppress_small = suppress_small - self.sign = sign - self.exp_format = False - self.large_exponent = False - self.max_str_len = 0 - self.fillFormat(data) - - def fillFormat(self, data): - errstate = _nc.seterr(all='ignore') - try: - special = isnan(data) | isinf(data) - non_zero = absolute(data.compress(not_equal(data, 0) & ~special)) - if len(non_zero) == 0: - max_val = 0. - min_val = 0. - else: - max_val = maximum.reduce(non_zero) - min_val = minimum.reduce(non_zero) - if max_val >= 1.e8: - self.exp_format = True - if not self.suppress_small and (min_val < 0.0001 - or max_val/min_val > 1000.): - self.exp_format = True - finally: - _nc.seterr(**errstate) - if self.exp_format: - self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100 - self.max_str_len = 8 + self.precision - if self.large_exponent: - self.max_str_len += 1 - if self.sign: - format = '%+' - else: - format = '%' - format = format + '%d.%de' % (self.max_str_len, self.precision) - else: - format = '%%.%df' % (self.precision,) - if len(non_zero): - precision = max([_digits(x, self.precision, format) - for x in non_zero]) - else: - precision = 0 - precision = min(self.precision, precision) - self.max_str_len = len(str(int(max_val))) + precision + 2 - if _nc.any(special): - self.max_str_len = max(self.max_str_len, - len(_nan_str), - len(_inf_str)+1) - if self.sign: - format = '%#+' - else: - format = '%#' - format = format + '%d.%df' % (self.max_str_len, precision) - self.special_fmt = '%%%ds' % (self.max_str_len,) - self.format = format - - def __call__(self, x, strip_zeros=True): - if isnan(x): - return self.special_fmt % (_nan_str,) - elif isinf(x): - if x > 0: - return self.special_fmt % (_inf_str,) - else: - return self.special_fmt % ('-' + _inf_str,) - s = self.format % x - if self.large_exponent: - # 3-digit exponent - expsign = s[-3] - if expsign == '+' or expsign == '-': - s = s[1:-2] + '0' + s[-2:] - elif self.exp_format: - # 2-digit exponent - if s[-3] == '0': - s = ' ' + s[:-3] + s[-2:] - elif strip_zeros: - z = s.rstrip('0') - s = z + ' '*(len(s)-len(z)) - return s - - -def _digits(x, precision, format): - s = format % x - z = s.rstrip('0') - return precision - len(s) + len(z) - - -_MAXINT = sys.maxint -_MININT = -sys.maxint-1 -def _formatInteger(x, format): - if _MININT < x < _MAXINT: - return format % x - else: - return "%s" % x - -def _longfloatFormatter(precision): - # XXX Have to add something to determine the width to use a la FloatFormat - # Right now, things won't line up properly - def formatter(x): - if isnan(x): - return _nan_str - elif isinf(x): - if x > 0: - return _inf_str - else: - return '-' + _inf_str - return format_longfloat(x, precision) - return formatter - -def _clongfloatFormatter(precision): - def formatter(x): - r = format_longfloat(x.real, precision) - i = format_longfloat(x.imag, precision) - return '%s+%sj' % (r, i) - return formatter - -class ComplexFormat(object): - def __init__(self, x, precision, suppress_small): - self.real_format = FloatFormat(x.real, precision, suppress_small) - self.imag_format = FloatFormat(x.imag, precision, suppress_small, - sign=True) - - def __call__(self, x): - r = self.real_format(x.real, strip_zeros=False) - i = self.imag_format(x.imag, strip_zeros=False) - if not self.imag_format.exp_format: - z = i.rstrip('0') - i = z + 'j' + ' '*(len(i)-len(z)) - else: - i = i + 'j' - return r + i - -## end diff --git a/numpy/core/blasdot/_dotblas.c b/numpy/core/blasdot/_dotblas.c deleted file mode 100644 index 933b21137..000000000 --- a/numpy/core/blasdot/_dotblas.c +++ /dev/null @@ -1,1113 +0,0 @@ -static char module_doc[] = -"This module provides a BLAS optimized\nmatrix multiply, inner product and dot for numpy arrays"; - -#include "Python.h" -#include "numpy/noprefix.h" -#ifndef CBLAS_HEADER -#define CBLAS_HEADER "cblas.h" -#endif -#include CBLAS_HEADER - -#include - -static PyArray_DotFunc *oldFunctions[PyArray_NTYPES]; - -static void -FLOAT_dot(void *a, intp stridea, void *b, intp strideb, void *res, - intp n, void *tmp) -{ - register int na = stridea / sizeof(float); - register int nb = strideb / sizeof(float); - - if ((sizeof(float) * na == stridea) && - (sizeof(float) * nb == strideb) && - (na >= 0) && (nb >= 0)) - *((float *)res) = cblas_sdot((int)n, (float *)a, na, (float *)b, nb); - - else - oldFunctions[PyArray_FLOAT](a, stridea, b, strideb, res, n, tmp); -} - -static void -DOUBLE_dot(void *a, intp stridea, void *b, intp strideb, void *res, - intp n, void *tmp) -{ - register int na = stridea / sizeof(double); - register int nb = strideb / sizeof(double); - - if ((sizeof(double) * na == stridea) && - (sizeof(double) * nb == strideb) && - (na >= 0) && (nb >= 0)) - *((double *)res) = cblas_ddot((int)n, (double *)a, na, (double *)b, nb); - else - oldFunctions[PyArray_DOUBLE](a, stridea, b, strideb, res, n, tmp); -} - -static void -CFLOAT_dot(void *a, intp stridea, void *b, intp strideb, void *res, - intp n, void *tmp) -{ - - register int na = stridea / sizeof(cfloat); - register int nb = strideb / sizeof(cfloat); - - if ((sizeof(cfloat) * na == stridea) && - (sizeof(cfloat) * nb == strideb) && - (na >= 0) && (nb >= 0)) - cblas_cdotu_sub((int)n, (float *)a, na, (float *)b, nb, (float *)res); - else - oldFunctions[PyArray_CFLOAT](a, stridea, b, strideb, res, n, tmp); -} - -static void -CDOUBLE_dot(void *a, intp stridea, void *b, intp strideb, void *res, - intp n, void *tmp) -{ - register int na = stridea / sizeof(cdouble); - register int nb = strideb / sizeof(cdouble); - - if ((sizeof(cdouble) * na == stridea) && - (sizeof(cdouble) * nb == strideb) && - (na >= 0) && (nb >= 0)) - cblas_zdotu_sub((int)n, (double *)a, na, (double *)b, nb, (double *)res); - else - oldFunctions[PyArray_CDOUBLE](a, stridea, b, strideb, res, n, tmp); -} - - -static Bool altered=FALSE; - -static char doc_alterdot[] = "alterdot() changes all dot functions to use blas."; - -static PyObject * -dotblas_alterdot(PyObject *dummy, PyObject *args) -{ - PyArray_Descr *descr; - - if (!PyArg_ParseTuple(args, "")) return NULL; - - /* Replace the dot functions to the ones using blas */ - - if (!altered) { - descr = PyArray_DescrFromType(PyArray_FLOAT); - oldFunctions[PyArray_FLOAT] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)FLOAT_dot; - - descr = PyArray_DescrFromType(PyArray_DOUBLE); - oldFunctions[PyArray_DOUBLE] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)DOUBLE_dot; - - descr = PyArray_DescrFromType(PyArray_CFLOAT); - oldFunctions[PyArray_CFLOAT] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)CFLOAT_dot; - - descr = PyArray_DescrFromType(PyArray_CDOUBLE); - oldFunctions[PyArray_CDOUBLE] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)CDOUBLE_dot; - - altered = TRUE; - } - - Py_INCREF(Py_None); - return Py_None; -} - -static char doc_restoredot[] = "restoredot() restores dots to defaults."; - -static PyObject * -dotblas_restoredot(PyObject *dummy, PyObject *args) -{ - PyArray_Descr *descr; - - if (!PyArg_ParseTuple(args, "")) return NULL; - - if (altered) { - descr = PyArray_DescrFromType(PyArray_FLOAT); - descr->f->dotfunc = oldFunctions[PyArray_FLOAT]; - oldFunctions[PyArray_FLOAT] = NULL; - Py_XDECREF(descr); - - descr = PyArray_DescrFromType(PyArray_DOUBLE); - descr->f->dotfunc = oldFunctions[PyArray_DOUBLE]; - oldFunctions[PyArray_DOUBLE] = NULL; - Py_XDECREF(descr); - - descr = PyArray_DescrFromType(PyArray_CFLOAT); - descr->f->dotfunc = oldFunctions[PyArray_CFLOAT]; - oldFunctions[PyArray_CFLOAT] = NULL; - Py_XDECREF(descr); - - descr = PyArray_DescrFromType(PyArray_CDOUBLE); - descr->f->dotfunc = oldFunctions[PyArray_CDOUBLE]; - oldFunctions[PyArray_CDOUBLE] = NULL; - Py_XDECREF(descr); - - altered = FALSE; - } - - Py_INCREF(Py_None); - return Py_None; -} - -typedef enum {_scalar, _column, _row, _matrix} MatrixShape; - -static MatrixShape -_select_matrix_shape(PyArrayObject *array) -{ - switch (array->nd) { - case 0: - return _scalar; - case 1: - if (array->dimensions[0] > 1) - return _column; - return _scalar; - case 2: - if (array->dimensions[0] > 1) { - if (array->dimensions[1] == 1) - return _column; - else - return _matrix; - } - if (array->dimensions[1] == 1) - return _scalar; - return _row; - } - return _matrix; -} - - -static int -_bad_strides(PyArrayObject *ap) -{ - register int itemsize = PyArray_ITEMSIZE(ap); - register int i, N=PyArray_NDIM(ap); - register intp *strides = PyArray_STRIDES(ap); - - for (i=0; ind > 2) || (ap2->nd > 2)) { - /* This function doesn't handle dimensions greater than 2 - (or negative striding) -- other - than to ensure the dot function is altered - */ - if (!altered) { - /* need to alter dot product */ - PyObject *tmp1, *tmp2; - tmp1 = PyTuple_New(0); - tmp2 = dotblas_alterdot(NULL, tmp1); - Py_DECREF(tmp1); - Py_DECREF(tmp2); - } - ret = (PyArrayObject *)PyArray_MatrixProduct((PyObject *)ap1, - (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } - - if (_bad_strides(ap1)) { - op1 = PyArray_NewCopy(ap1, PyArray_ANYORDER); - Py_DECREF(ap1); - ap1 = (PyArrayObject *)op1; - if (ap1 == NULL) goto fail; - } - if (_bad_strides(ap2)) { - op2 = PyArray_NewCopy(ap2, PyArray_ANYORDER); - Py_DECREF(ap2); - ap2 = (PyArrayObject *)op2; - if (ap2 == NULL) goto fail; - } - ap1shape = _select_matrix_shape(ap1); - ap2shape = _select_matrix_shape(ap2); - - if (ap1shape == _scalar || ap2shape == _scalar) { - PyArrayObject *oap1, *oap2; - oap1 = ap1; oap2 = ap2; - /* One of ap1 or ap2 is a scalar */ - if (ap1shape == _scalar) { /* Make ap2 the scalar */ - PyArrayObject *t = ap1; - ap1 = ap2; - ap2 = t; - ap1shape = ap2shape; - ap2shape = _scalar; - } - - if (ap1shape == _row) ap1stride = ap1->strides[1]; - else if (ap1->nd > 0) ap1stride = ap1->strides[0]; - - if (ap1->nd == 0 || ap2->nd == 0) { - intp *thisdims; - if (ap1->nd == 0) { - nd = ap2->nd; - thisdims = ap2->dimensions; - } - else { - nd = ap1->nd; - thisdims = ap1->dimensions; - } - l = 1; - for (j=0; jdimensions[oap1->nd-1]; - - if (oap2->dimensions[0] != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); - goto fail; - } - nd = ap1->nd + ap2->nd - 2; - /* nd = 0 or 1 or 2 */ - /* If nd == 0 do nothing ... */ - if (nd == 1) { - /* Either ap1->nd is 1 dim or ap2->nd is 1 dim - and the other is 2-dim */ - dimensions[0] = (oap1->nd == 2) ? oap1->dimensions[0] : oap2->dimensions[1]; - l = dimensions[0]; - /* Fix it so that dot(shape=(N,1), shape=(1,)) - and dot(shape=(1,), shape=(1,N)) both return - an (N,) array (but use the fast scalar code) - */ - } - else if (nd == 2) { - dimensions[0] = oap1->dimensions[0]; - dimensions[1] = oap2->dimensions[1]; - /* We need to make sure that dot(shape=(1,1), shape=(1,N)) - and dot(shape=(N,1),shape=(1,1)) uses - scalar multiplication appropriately - */ - if (ap1shape == _row) l = dimensions[1]; - else l = dimensions[0]; - } - } - } - else { /* (ap1->nd <= 2 && ap2->nd <= 2) */ - /* Both ap1 and ap2 are vectors or matrices */ - l = ap1->dimensions[ap1->nd-1]; - - if (ap2->dimensions[0] != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); - goto fail; - } - nd = ap1->nd+ap2->nd-2; - - if (nd == 1) - dimensions[0] = (ap1->nd == 2) ? ap1->dimensions[0] : ap2->dimensions[1]; - else if (nd == 2) { - dimensions[0] = ap1->dimensions[0]; - dimensions[1] = ap2->dimensions[1]; - } - } - - /* Choose which subtype to return */ - if (ap1->ob_type != ap2->ob_type) { - prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); - prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - subtype = (prior2 > prior1 ? ap2->ob_type : ap1->ob_type); - } - else { - prior1 = prior2 = 0.0; - subtype = ap1->ob_type; - } - - ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *) - (prior2 > prior1 ? ap2 : ap1)); - - if (ret == NULL) goto fail; - numbytes = PyArray_NBYTES(ret); - memset(ret->data, 0, numbytes); - if (numbytes==0 || l == 0) { - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } - - - if (ap2shape == _scalar) { - /* Multiplication by a scalar -- Level 1 BLAS */ - /* if ap1shape is a matrix and we are not contiguous, then we can't - just blast through the entire array using a single - striding factor */ - NPY_BEGIN_ALLOW_THREADS - - if (typenum == PyArray_DOUBLE) { - if (l == 1) { - *((double *)ret->data) = *((double *)ap2->data) * \ - *((double *)ap1->data); - } - else if (ap1shape != _matrix) { - cblas_daxpy(l, *((double *)ap2->data), (double *)ap1->data, - ap1stride/sizeof(double), (double *)ret->data, 1); - } - else { - int maxind, oind, i, a1s, rets; - char *ptr, *rptr; - double val; - maxind = (ap1->dimensions[0] >= ap1->dimensions[1] ? 0 : 1); - oind = 1-maxind; - ptr = ap1->data; - rptr = ret->data; - l = ap1->dimensions[maxind]; - val = *((double *)ap2->data); - a1s = ap1->strides[maxind] / sizeof(double); - rets = ret->strides[maxind] / sizeof(double); - for (i=0; i < ap1->dimensions[oind]; i++) { - cblas_daxpy(l, val, (double *)ptr, a1s, - (double *)rptr, rets); - ptr += ap1->strides[oind]; - rptr += ret->strides[oind]; - } - } - } - else if (typenum == PyArray_CDOUBLE) { - if (l == 1) { - cdouble *ptr1, *ptr2, *res; - ptr1 = (cdouble *)ap2->data; - ptr2 = (cdouble *)ap1->data; - res = (cdouble *)ret->data; - res->real = ptr1->real * ptr2->real - ptr1->imag * ptr2->imag; - res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real; - } - else if (ap1shape != _matrix) { - cblas_zaxpy(l, (double *)ap2->data, (double *)ap1->data, - ap1stride/sizeof(cdouble), (double *)ret->data, 1); - } - else { - int maxind, oind, i, a1s, rets; - char *ptr, *rptr; - double *pval; - maxind = (ap1->dimensions[0] >= ap1->dimensions[1] ? 0 : 1); - oind = 1-maxind; - ptr = ap1->data; - rptr = ret->data; - l = ap1->dimensions[maxind]; - pval = (double *)ap2->data; - a1s = ap1->strides[maxind] / sizeof(cdouble); - rets = ret->strides[maxind] / sizeof(cdouble); - for (i=0; i < ap1->dimensions[oind]; i++) { - cblas_zaxpy(l, pval, (double *)ptr, a1s, - (double *)rptr, rets); - ptr += ap1->strides[oind]; - rptr += ret->strides[oind]; - } - } - } - else if (typenum == PyArray_FLOAT) { - if (l == 1) { - *((float *)ret->data) = *((float *)ap2->data) * \ - *((float *)ap1->data); - } - else if (ap1shape != _matrix) { - cblas_saxpy(l, *((float *)ap2->data), (float *)ap1->data, - ap1stride/sizeof(float), (float *)ret->data, 1); - } - else { - int maxind, oind, i, a1s, rets; - char *ptr, *rptr; - float val; - maxind = (ap1->dimensions[0] >= ap1->dimensions[1] ? 0 : 1); - oind = 1-maxind; - ptr = ap1->data; - rptr = ret->data; - l = ap1->dimensions[maxind]; - val = *((float *)ap2->data); - a1s = ap1->strides[maxind] / sizeof(float); - rets = ret->strides[maxind] / sizeof(float); - for (i=0; i < ap1->dimensions[oind]; i++) { - cblas_saxpy(l, val, (float *)ptr, a1s, - (float *)rptr, rets); - ptr += ap1->strides[oind]; - rptr += ret->strides[oind]; - } - } - } - else if (typenum == PyArray_CFLOAT) { - if (l == 1) { - cfloat *ptr1, *ptr2, *res; - ptr1 = (cfloat *)ap2->data; - ptr2 = (cfloat *)ap1->data; - res = (cfloat *)ret->data; - res->real = ptr1->real * ptr2->real - ptr1->imag * ptr2->imag; - res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real; - } - else if (ap1shape != _matrix) { - cblas_caxpy(l, (float *)ap2->data, (float *)ap1->data, - ap1stride/sizeof(cfloat), (float *)ret->data, 1); - } - else { - int maxind, oind, i, a1s, rets; - char *ptr, *rptr; - float *pval; - maxind = (ap1->dimensions[0] >= ap1->dimensions[1] ? 0 : 1); - oind = 1-maxind; - ptr = ap1->data; - rptr = ret->data; - l = ap1->dimensions[maxind]; - pval = (float *)ap2->data; - a1s = ap1->strides[maxind] / sizeof(cfloat); - rets = ret->strides[maxind] / sizeof(cfloat); - for (i=0; i < ap1->dimensions[oind]; i++) { - cblas_caxpy(l, pval, (float *)ptr, a1s, - (float *)rptr, rets); - ptr += ap1->strides[oind]; - rptr += ret->strides[oind]; - } - } - } - NPY_END_ALLOW_THREADS - } - else if ((ap2shape == _column) && (ap1shape != _matrix)) { - int ap1s, ap2s; - NPY_BEGIN_ALLOW_THREADS - - ap2s = ap2->strides[0] / ap2->descr->elsize; - if (ap1shape == _row) { - ap1s = ap1->strides[1] / ap1->descr->elsize; - } - else { - ap1s = ap1->strides[0] / ap1->descr->elsize; - } - - /* Dot product between two vectors -- Level 1 BLAS */ - if (typenum == PyArray_DOUBLE) { - double result = cblas_ddot(l, (double *)ap1->data, ap1s, - (double *)ap2->data, ap2s); - *((double *)ret->data) = result; - } - else if (typenum == PyArray_FLOAT) { - float result = cblas_sdot(l, (float *)ap1->data, ap1s, - (float *)ap2->data, ap2s); - *((float *)ret->data) = result; - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zdotu_sub(l, (double *)ap1->data, ap1s, - (double *)ap2->data, ap2s, (double *)ret->data); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cdotu_sub(l, (float *)ap1->data, ap1s, - (float *)ap2->data, ap2s, (float *)ret->data); - } - NPY_END_ALLOW_THREADS - } - else if (ap1shape == _matrix && ap2shape != _matrix) { - /* Matrix vector multiplication -- Level 2 BLAS */ - /* lda must be MAX(M,1) */ - enum CBLAS_ORDER Order; - int ap2s; - - if (!PyArray_ISONESEGMENT(ap1)) { - PyObject *new; - new = PyArray_Copy(ap1); - Py_DECREF(ap1); - ap1 = (PyArrayObject *)new; - if (new == NULL) goto fail; - } - NPY_BEGIN_ALLOW_THREADS - if (PyArray_ISCONTIGUOUS(ap1)) { - Order = CblasRowMajor; - lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1); - } - else { - Order = CblasColMajor; - lda = (ap1->dimensions[0] > 1 ? ap1->dimensions[0] : 1); - } - ap2s = ap2->strides[0] / ap2->descr->elsize; - if (typenum == PyArray_DOUBLE) { - cblas_dgemv(Order, CblasNoTrans, - ap1->dimensions[0], ap1->dimensions[1], - 1.0, (double *)ap1->data, lda, - (double *)ap2->data, ap2s, 0.0, (double *)ret->data, 1); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemv(Order, CblasNoTrans, - ap1->dimensions[0], ap1->dimensions[1], - 1.0, (float *)ap1->data, lda, - (float *)ap2->data, ap2s, 0.0, (float *)ret->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemv(Order, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - oneD, (double *)ap1->data, lda, - (double *)ap2->data, ap2s, zeroD, - (double *)ret->data, 1); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemv(Order, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - oneF, (float *)ap1->data, lda, - (float *)ap2->data, ap2s, zeroF, - (float *)ret->data, 1); - } - NPY_END_ALLOW_THREADS - } - else if (ap1shape != _matrix && ap2shape == _matrix) { - /* Vector matrix multiplication -- Level 2 BLAS */ - enum CBLAS_ORDER Order; - int ap1s; - - if (!PyArray_ISONESEGMENT(ap2)) { - PyObject *new; - new = PyArray_Copy(ap2); - Py_DECREF(ap2); - ap2 = (PyArrayObject *)new; - if (new == NULL) goto fail; - } - NPY_BEGIN_ALLOW_THREADS - if (PyArray_ISCONTIGUOUS(ap2)) { - Order = CblasRowMajor; - lda = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1); - } - else { - Order = CblasColMajor; - lda = (ap2->dimensions[0] > 1 ? ap2->dimensions[0] : 1); - } - if (ap1shape == _row) { - ap1s = ap1->strides[1] / ap1->descr->elsize; - } - else { - ap1s = ap1->strides[0] / ap1->descr->elsize; - } - if (typenum == PyArray_DOUBLE) { - cblas_dgemv(Order, - CblasTrans, ap2->dimensions[0], ap2->dimensions[1], - 1.0, (double *)ap2->data, lda, - (double *)ap1->data, ap1s, 0.0, (double *)ret->data, 1); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemv(Order, - CblasTrans, ap2->dimensions[0], ap2->dimensions[1], - 1.0, (float *)ap2->data, lda, - (float *)ap1->data, ap1s, 0.0, (float *)ret->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemv(Order, - CblasTrans, ap2->dimensions[0], ap2->dimensions[1], - oneD, (double *)ap2->data, lda, - (double *)ap1->data, ap1s, zeroD, (double *)ret->data, 1); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemv(Order, - CblasTrans, ap2->dimensions[0], ap2->dimensions[1], - oneF, (float *)ap2->data, lda, - (float *)ap1->data, ap1s, zeroF, (float *)ret->data, 1); - } - NPY_END_ALLOW_THREADS - } - else { /* (ap1->nd == 2 && ap2->nd == 2) */ - /* Matrix matrix multiplication -- Level 3 BLAS */ - /* L x M multiplied by M x N */ - enum CBLAS_ORDER Order; - enum CBLAS_TRANSPOSE Trans1, Trans2; - int M, N, L; - - /* Optimization possible: */ - /* We may be able to handle single-segment arrays here - using appropriate values of Order, Trans1, and Trans2. - */ - - if (!PyArray_ISCONTIGUOUS(ap2)) { - PyObject *new; - new = PyArray_Copy(ap2); - Py_DECREF(ap2); - ap2 = (PyArrayObject *)new; - if (new == NULL) goto fail; - } - if (!PyArray_ISCONTIGUOUS(ap1)) { - PyObject *new; - new = PyArray_Copy(ap1); - Py_DECREF(ap1); - ap1 = (PyArrayObject *)new; - if (new == NULL) goto fail; - } - - NPY_BEGIN_ALLOW_THREADS - - Order = CblasRowMajor; - Trans1 = CblasNoTrans; - Trans2 = CblasNoTrans; - L = ap1->dimensions[0]; - N = ap2->dimensions[1]; - M = ap2->dimensions[0]; - lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1); - ldb = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1); - ldc = (ret->dimensions[1] > 1 ? ret->dimensions[1] : 1); - if (typenum == PyArray_DOUBLE) { - cblas_dgemm(Order, Trans1, Trans2, - L, N, M, - 1.0, (double *)ap1->data, lda, - (double *)ap2->data, ldb, - 0.0, (double *)ret->data, ldc); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemm(Order, Trans1, Trans2, - L, N, M, - 1.0, (float *)ap1->data, lda, - (float *)ap2->data, ldb, - 0.0, (float *)ret->data, ldc); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemm(Order, Trans1, Trans2, - L, N, M, - oneD, (double *)ap1->data, lda, - (double *)ap2->data, ldb, - zeroD, (double *)ret->data, ldc); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemm(Order, Trans1, Trans2, - L, N, M, - oneF, (float *)ap1->data, lda, - (float *)ap2->data, ldb, - zeroF, (float *)ret->data, ldc); - } - NPY_END_ALLOW_THREADS - } - - - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - - -static char doc_innerproduct[] = "innerproduct(a,b)\nReturns the inner product of a and b for arrays of floating point types.\nLike the generic NumPy equivalent the product sum is over\nthe last dimension of a and b.\nNB: The first argument is not conjugated."; - -static PyObject * -dotblas_innerproduct(PyObject *dummy, PyObject *args) -{ - PyObject *op1, *op2; - PyArrayObject *ap1, *ap2, *ret; - int j, l, lda, ldb, ldc; - int typenum, nd; - intp dimensions[MAX_DIMS]; - static const float oneF[2] = {1.0, 0.0}; - static const float zeroF[2] = {0.0, 0.0}; - static const double oneD[2] = {1.0, 0.0}; - static const double zeroD[2] = {0.0, 0.0}; - PyTypeObject *subtype; - double prior1, prior2; - - if (!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL; - - /* - * Inner product using the BLAS. The product sum is taken along the last - * dimensions of the two arrays. - * Only speeds things up for float double and complex types. - */ - - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - /* This function doesn't handle other types */ - if ((typenum != PyArray_DOUBLE && typenum != PyArray_CDOUBLE && - typenum != PyArray_FLOAT && typenum != PyArray_CFLOAT)) { - return PyArray_Return((PyArrayObject *)PyArray_InnerProduct(op1, op2)); - } - - ret = NULL; - ap1 = (PyArrayObject *)PyArray_ContiguousFromObject(op1, typenum, 0, 0); - if (ap1 == NULL) return NULL; - ap2 = (PyArrayObject *)PyArray_ContiguousFromObject(op2, typenum, 0, 0); - if (ap2 == NULL) goto fail; - - if ((ap1->nd > 2) || (ap2->nd > 2)) { - /* This function doesn't handle dimensions greater than 2 -- other - than to ensure the dot function is altered - */ - if (!altered) { - /* need to alter dot product */ - PyObject *tmp1, *tmp2; - tmp1 = PyTuple_New(0); - tmp2 = dotblas_alterdot(NULL, tmp1); - Py_DECREF(tmp1); - Py_DECREF(tmp2); - } - ret = (PyArrayObject *)PyArray_InnerProduct((PyObject *)ap1, - (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } - - if (ap1->nd == 0 || ap2->nd == 0) { - /* One of ap1 or ap2 is a scalar */ - if (ap1->nd == 0) { /* Make ap2 the scalar */ - PyArrayObject *t = ap1; - ap1 = ap2; - ap2 = t; - } - for (l = 1, j = 0; j < ap1->nd; j++) { - dimensions[j] = ap1->dimensions[j]; - l *= dimensions[j]; - } - nd = ap1->nd; - } - else { /* (ap1->nd <= 2 && ap2->nd <= 2) */ - /* Both ap1 and ap2 are vectors or matrices */ - l = ap1->dimensions[ap1->nd-1]; - - if (ap2->dimensions[ap2->nd-1] != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); - goto fail; - } - nd = ap1->nd+ap2->nd-2; - - if (nd == 1) - dimensions[0] = (ap1->nd == 2) ? ap1->dimensions[0] : ap2->dimensions[0]; - else if (nd == 2) { - dimensions[0] = ap1->dimensions[0]; - dimensions[1] = ap2->dimensions[0]; - } - } - - /* Choose which subtype to return */ - prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); - prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - subtype = (prior2 > prior1 ? ap2->ob_type : ap1->ob_type); - - ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *)\ - (prior2 > prior1 ? ap2 : ap1)); - - if (ret == NULL) goto fail; - NPY_BEGIN_ALLOW_THREADS - memset(ret->data, 0, PyArray_NBYTES(ret)); - - if (ap2->nd == 0) { - /* Multiplication by a scalar -- Level 1 BLAS */ - if (typenum == PyArray_DOUBLE) { - cblas_daxpy(l, *((double *)ap2->data), (double *)ap1->data, 1, - (double *)ret->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zaxpy(l, (double *)ap2->data, (double *)ap1->data, 1, - (double *)ret->data, 1); - } - else if (typenum == PyArray_FLOAT) { - cblas_saxpy(l, *((float *)ap2->data), (float *)ap1->data, 1, - (float *)ret->data, 1); - } - else if (typenum == PyArray_CFLOAT) { - cblas_caxpy(l, (float *)ap2->data, (float *)ap1->data, 1, - (float *)ret->data, 1); - } - } - else if (ap1->nd == 1 && ap2->nd == 1) { - /* Dot product between two vectors -- Level 1 BLAS */ - if (typenum == PyArray_DOUBLE) { - double result = cblas_ddot(l, (double *)ap1->data, 1, - (double *)ap2->data, 1); - *((double *)ret->data) = result; - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zdotu_sub(l, (double *)ap1->data, 1, - (double *)ap2->data, 1, (double *)ret->data); - } - else if (typenum == PyArray_FLOAT) { - float result = cblas_sdot(l, (float *)ap1->data, 1, - (float *)ap2->data, 1); - *((float *)ret->data) = result; - } - else if (typenum == PyArray_CFLOAT) { - cblas_cdotu_sub(l, (float *)ap1->data, 1, - (float *)ap2->data, 1, (float *)ret->data); - } - } - else if (ap1->nd == 2 && ap2->nd == 1) { - /* Matrix-vector multiplication -- Level 2 BLAS */ - lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1); - if (typenum == PyArray_DOUBLE) { - cblas_dgemv(CblasRowMajor, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - 1.0, (double *)ap1->data, lda, - (double *)ap2->data, 1, 0.0, (double *)ret->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemv(CblasRowMajor, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - oneD, (double *)ap1->data, lda, - (double *)ap2->data, 1, zeroD, (double *)ret->data, 1); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemv(CblasRowMajor, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - 1.0, (float *)ap1->data, lda, - (float *)ap2->data, 1, 0.0, (float *)ret->data, 1); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemv(CblasRowMajor, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - oneF, (float *)ap1->data, lda, - (float *)ap2->data, 1, zeroF, (float *)ret->data, 1); - } - } - else if (ap1->nd == 1 && ap2->nd == 2) { - /* Vector matrix multiplication -- Level 2 BLAS */ - lda = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1); - if (typenum == PyArray_DOUBLE) { - cblas_dgemv(CblasRowMajor, - CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1], - 1.0, (double *)ap2->data, lda, - (double *)ap1->data, 1, 0.0, (double *)ret->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemv(CblasRowMajor, - CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1], - oneD, (double *)ap2->data, lda, - (double *)ap1->data, 1, zeroD, (double *)ret->data, 1); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemv(CblasRowMajor, - CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1], - 1.0, (float *)ap2->data, lda, - (float *)ap1->data, 1, 0.0, (float *)ret->data, 1); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemv(CblasRowMajor, - CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1], - oneF, (float *)ap2->data, lda, - (float *)ap1->data, 1, zeroF, (float *)ret->data, 1); - } - } - else { /* (ap1->nd == 2 && ap2->nd == 2) */ - /* Matrix matrix multiplication -- Level 3 BLAS */ - lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1); - ldb = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1); - ldc = (ret->dimensions[1] > 1 ? ret->dimensions[1] : 1); - if (typenum == PyArray_DOUBLE) { - cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1], - 1.0, (double *)ap1->data, lda, - (double *)ap2->data, ldb, - 0.0, (double *)ret->data, ldc); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1], - 1.0, (float *)ap1->data, lda, - (float *)ap2->data, ldb, - 0.0, (float *)ret->data, ldc); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1], - oneD, (double *)ap1->data, lda, - (double *)ap2->data, ldb, - zeroD, (double *)ret->data, ldc); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1], - oneF, (float *)ap1->data, lda, - (float *)ap2->data, ldb, - zeroF, (float *)ret->data, ldc); - } - } - NPY_END_ALLOW_THREADS - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - - -static char doc_vdot[] = "vdot(a,b)\nReturns the dot product of a and b for scalars and vectors\nof floating point and complex types. The first argument, a, is conjugated."; - - -static PyObject *dotblas_vdot(PyObject *dummy, PyObject *args) { - PyObject *op1, *op2; - PyArrayObject *ap1=NULL, *ap2=NULL, *ret=NULL; - int l; - int typenum; - intp dimensions[MAX_DIMS]; - PyArray_Descr *type; - - if (!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL; - - /* - * Conjugating dot product using the BLAS for vectors. - * Multiplies op1 and op2, each of which must be vector. - */ - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - type = PyArray_DescrFromType(typenum); - Py_INCREF(type); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, type, 0, 0, 0, NULL); - if (ap1==NULL) {Py_DECREF(type); goto fail;} - op1 = PyArray_Flatten(ap1, 0); - if (op1==NULL) {Py_DECREF(type); goto fail;} - Py_DECREF(ap1); - ap1 = (PyArrayObject *)op1; - - ap2 = (PyArrayObject *)PyArray_FromAny(op2, type, 0, 0, 0, NULL); - if (ap2==NULL) goto fail; - op2 = PyArray_Flatten(ap2, 0); - if (op2 == NULL) goto fail; - Py_DECREF(ap2); - ap2 = (PyArrayObject *)op2; - - if (typenum != PyArray_FLOAT && typenum != PyArray_DOUBLE && - typenum != PyArray_CFLOAT && typenum != PyArray_CDOUBLE) { - if (!altered) { - /* need to alter dot product */ - PyObject *tmp1, *tmp2; - tmp1 = PyTuple_New(0); - tmp2 = dotblas_alterdot(NULL, tmp1); - Py_DECREF(tmp1); - Py_DECREF(tmp2); - } - if (PyTypeNum_ISCOMPLEX(typenum)) { - op1 = PyArray_Conjugate(ap1, NULL); - if (op1==NULL) goto fail; - Py_DECREF(ap1); - ap1 = (PyArrayObject *)op1; - } - ret = (PyArrayObject *)PyArray_InnerProduct((PyObject *)ap1, - (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } - - if (ap2->dimensions[0] != ap1->dimensions[ap1->nd-1]) { - PyErr_SetString(PyExc_ValueError, "vectors have different lengths"); - goto fail; - } - l = ap1->dimensions[ap1->nd-1]; - - ret = (PyArrayObject *)PyArray_SimpleNew(0, dimensions, typenum); - if (ret == NULL) goto fail; - - NPY_BEGIN_ALLOW_THREADS - - /* Dot product between two vectors -- Level 1 BLAS */ - if (typenum == PyArray_DOUBLE) { - *((double *)ret->data) = cblas_ddot(l, (double *)ap1->data, 1, - (double *)ap2->data, 1); - } - else if (typenum == PyArray_FLOAT) { - *((float *)ret->data) = cblas_sdot(l, (float *)ap1->data, 1, - (float *)ap2->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zdotc_sub(l, (double *)ap1->data, 1, - (double *)ap2->data, 1, (double *)ret->data); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cdotc_sub(l, (float *)ap1->data, 1, - (float *)ap2->data, 1, (float *)ret->data); - } - - NPY_END_ALLOW_THREADS - - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - -static struct PyMethodDef dotblas_module_methods[] = { - {"dot", (PyCFunction)dotblas_matrixproduct, 1, doc_matrixproduct}, - {"inner", (PyCFunction)dotblas_innerproduct, 1, doc_innerproduct}, - {"vdot", (PyCFunction)dotblas_vdot, 1, doc_vdot}, - {"alterdot", (PyCFunction)dotblas_alterdot, 1, doc_alterdot}, - {"restoredot", (PyCFunction)dotblas_restoredot, 1, doc_restoredot}, - {NULL, NULL, 0} /* sentinel */ -}; - -/* Initialization function for the module */ -PyMODINIT_FUNC init_dotblas(void) { - int i; - PyObject *d, *s; - - /* Create the module and add the functions */ - Py_InitModule3("_dotblas", dotblas_module_methods, module_doc); - - /* Import the array object */ - import_array(); - - /* Initialise the array of dot functions */ - for (i = 0; i < PyArray_NTYPES; i++) - oldFunctions[i] = NULL; - - /* alterdot at load */ - d = PyTuple_New(0); - s = dotblas_alterdot(NULL, d); - Py_DECREF(d); - Py_DECREF(s); - -} diff --git a/numpy/core/blasdot/cblas.h b/numpy/core/blasdot/cblas.h deleted file mode 100644 index 25de09edf..000000000 --- a/numpy/core/blasdot/cblas.h +++ /dev/null @@ -1,578 +0,0 @@ -#ifndef CBLAS_H -#define CBLAS_H -#include - -/* Allow the use in C++ code. */ -#ifdef __cplusplus -extern "C" -{ -#endif - -/* - * Enumerated and derived types - */ -#define CBLAS_INDEX size_t /* this may vary between platforms */ - -enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102}; -enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113}; -enum CBLAS_UPLO {CblasUpper=121, CblasLower=122}; -enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132}; -enum CBLAS_SIDE {CblasLeft=141, CblasRight=142}; - -/* - * =========================================================================== - * Prototypes for level 1 BLAS functions (complex are recast as routines) - * =========================================================================== - */ -float cblas_sdsdot(const int N, const float alpha, const float *X, - const int incX, const float *Y, const int incY); -double cblas_dsdot(const int N, const float *X, const int incX, const float *Y, - const int incY); -float cblas_sdot(const int N, const float *X, const int incX, - const float *Y, const int incY); -double cblas_ddot(const int N, const double *X, const int incX, - const double *Y, const int incY); - -/* - * Functions having prefixes Z and C only - */ -void cblas_cdotu_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotu); -void cblas_cdotc_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotc); - -void cblas_zdotu_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotu); -void cblas_zdotc_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotc); - - -/* - * Functions having prefixes S D SC DZ - */ -float cblas_snrm2(const int N, const float *X, const int incX); -float cblas_sasum(const int N, const float *X, const int incX); - -double cblas_dnrm2(const int N, const double *X, const int incX); -double cblas_dasum(const int N, const double *X, const int incX); - -float cblas_scnrm2(const int N, const void *X, const int incX); -float cblas_scasum(const int N, const void *X, const int incX); - -double cblas_dznrm2(const int N, const void *X, const int incX); -double cblas_dzasum(const int N, const void *X, const int incX); - - -/* - * Functions having standard 4 prefixes (S D C Z) - */ -CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX); -CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX); -CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX); -CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX); - -/* - * =========================================================================== - * Prototypes for level 1 BLAS routines - * =========================================================================== - */ - -/* - * Routines with standard 4 prefixes (s, d, c, z) - */ -void cblas_sswap(const int N, float *X, const int incX, - float *Y, const int incY); -void cblas_scopy(const int N, const float *X, const int incX, - float *Y, const int incY); -void cblas_saxpy(const int N, const float alpha, const float *X, - const int incX, float *Y, const int incY); - -void cblas_dswap(const int N, double *X, const int incX, - double *Y, const int incY); -void cblas_dcopy(const int N, const double *X, const int incX, - double *Y, const int incY); -void cblas_daxpy(const int N, const double alpha, const double *X, - const int incX, double *Y, const int incY); - -void cblas_cswap(const int N, void *X, const int incX, - void *Y, const int incY); -void cblas_ccopy(const int N, const void *X, const int incX, - void *Y, const int incY); -void cblas_caxpy(const int N, const void *alpha, const void *X, - const int incX, void *Y, const int incY); - -void cblas_zswap(const int N, void *X, const int incX, - void *Y, const int incY); -void cblas_zcopy(const int N, const void *X, const int incX, - void *Y, const int incY); -void cblas_zaxpy(const int N, const void *alpha, const void *X, - const int incX, void *Y, const int incY); - - -/* - * Routines with S and D prefix only - */ -void cblas_srotg(float *a, float *b, float *c, float *s); -void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P); -void cblas_srot(const int N, float *X, const int incX, - float *Y, const int incY, const float c, const float s); -void cblas_srotm(const int N, float *X, const int incX, - float *Y, const int incY, const float *P); - -void cblas_drotg(double *a, double *b, double *c, double *s); -void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P); -void cblas_drot(const int N, double *X, const int incX, - double *Y, const int incY, const double c, const double s); -void cblas_drotm(const int N, double *X, const int incX, - double *Y, const int incY, const double *P); - - -/* - * Routines with S D C Z CS and ZD prefixes - */ -void cblas_sscal(const int N, const float alpha, float *X, const int incX); -void cblas_dscal(const int N, const double alpha, double *X, const int incX); -void cblas_cscal(const int N, const void *alpha, void *X, const int incX); -void cblas_zscal(const int N, const void *alpha, void *X, const int incX); -void cblas_csscal(const int N, const float alpha, void *X, const int incX); -void cblas_zdscal(const int N, const double alpha, void *X, const int incX); - -/* - * =========================================================================== - * Prototypes for level 2 BLAS - * =========================================================================== - */ - -/* - * Routines with standard 4 prefixes (S, D, C, Z) - */ -void cblas_sgemv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const float alpha, const float *A, const int lda, - const float *X, const int incX, const float beta, - float *Y, const int incY); -void cblas_sgbmv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const float alpha, - const float *A, const int lda, const float *X, - const int incX, const float beta, float *Y, const int incY); -void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *A, const int lda, - float *X, const int incX); -void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const float *A, const int lda, - float *X, const int incX); -void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *Ap, float *X, const int incX); -void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *A, const int lda, float *X, - const int incX); -void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const float *A, const int lda, - float *X, const int incX); -void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *Ap, float *X, const int incX); - -void cblas_dgemv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const double alpha, const double *A, const int lda, - const double *X, const int incX, const double beta, - double *Y, const int incY); -void cblas_dgbmv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const double alpha, - const double *A, const int lda, const double *X, - const int incX, const double beta, double *Y, const int incY); -void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *A, const int lda, - double *X, const int incX); -void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const double *A, const int lda, - double *X, const int incX); -void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *Ap, double *X, const int incX); -void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *A, const int lda, double *X, - const int incX); -void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const double *A, const int lda, - double *X, const int incX); -void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *Ap, double *X, const int incX); - -void cblas_cgemv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *X, const int incX, const void *beta, - void *Y, const int incY); -void cblas_cgbmv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const void *alpha, - const void *A, const int lda, const void *X, - const int incX, const void *beta, void *Y, const int incY); -void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, - void *X, const int incX); -void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); -void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, void *X, - const int incX); -void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); - -void cblas_zgemv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *X, const int incX, const void *beta, - void *Y, const int incY); -void cblas_zgbmv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const void *alpha, - const void *A, const int lda, const void *X, - const int incX, const void *beta, void *Y, const int incY); -void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, - void *X, const int incX); -void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); -void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, void *X, - const int incX); -void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); - - -/* - * Routines with S and D prefixes only - */ -void cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *A, - const int lda, const float *X, const int incX, - const float beta, float *Y, const int incY); -void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const float alpha, const float *A, - const int lda, const float *X, const int incX, - const float beta, float *Y, const int incY); -void cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *Ap, - const float *X, const int incX, - const float beta, float *Y, const int incY); -void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N, - const float alpha, const float *X, const int incX, - const float *Y, const int incY, float *A, const int lda); -void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, float *A, const int lda); -void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, float *Ap); -void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, const float *Y, const int incY, float *A, - const int lda); -void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, const float *Y, const int incY, float *A); - -void cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *A, - const int lda, const double *X, const int incX, - const double beta, double *Y, const int incY); -void cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const double alpha, const double *A, - const int lda, const double *X, const int incX, - const double beta, double *Y, const int incY); -void cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *Ap, - const double *X, const int incX, - const double beta, double *Y, const int incY); -void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N, - const double alpha, const double *X, const int incX, - const double *Y, const int incY, double *A, const int lda); -void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, double *A, const int lda); -void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, double *Ap); -void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, const double *Y, const int incY, double *A, - const int lda); -void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, const double *Y, const int incY, double *A); - - -/* - * Routines with C and Z prefixes only - */ -void cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *Ap, - const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const void *X, const int incX, - void *A, const int lda); -void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const void *X, - const int incX, void *A); -void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *Ap); - -void cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *Ap, - const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const void *X, const int incX, - void *A, const int lda); -void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const void *X, - const int incX, void *A); -void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *Ap); - -/* - * =========================================================================== - * Prototypes for level 3 BLAS - * =========================================================================== - */ - -/* - * Routines with standard 4 prefixes (S, D, C, Z) - */ -void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const float alpha, const float *A, - const int lda, const float *B, const int ldb, - const float beta, float *C, const int ldc); -void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const float alpha, const float *A, const int lda, - const float *B, const int ldb, const float beta, - float *C, const int ldc); -void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const float alpha, const float *A, const int lda, - const float beta, float *C, const int ldc); -void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const float alpha, const float *A, const int lda, - const float *B, const int ldb, const float beta, - float *C, const int ldc); -void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const float alpha, const float *A, const int lda, - float *B, const int ldb); -void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const float alpha, const float *A, const int lda, - float *B, const int ldb); - -void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const double alpha, const double *A, - const int lda, const double *B, const int ldb, - const double beta, double *C, const int ldc); -void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const double alpha, const double *A, const int lda, - const double *B, const int ldb, const double beta, - double *C, const int ldc); -void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const double alpha, const double *A, const int lda, - const double beta, double *C, const int ldc); -void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const double alpha, const double *A, const int lda, - const double *B, const int ldb, const double beta, - double *C, const int ldc); -void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const double alpha, const double *A, const int lda, - double *B, const int ldb); -void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const double alpha, const double *A, const int lda, - double *B, const int ldb); - -void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const void *alpha, const void *A, - const int lda, const void *B, const int ldb, - const void *beta, void *C, const int ldc); -void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *beta, void *C, const int ldc); -void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); -void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); - -void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const void *alpha, const void *A, - const int lda, const void *B, const int ldb, - const void *beta, void *C, const int ldc); -void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *beta, void *C, const int ldc); -void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); -void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); - - -/* - * Routines with prefixes C and Z only - */ -void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const float alpha, const void *A, const int lda, - const float beta, void *C, const int ldc); -void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const float beta, - void *C, const int ldc); - -void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const double alpha, const void *A, const int lda, - const double beta, void *C, const int ldc); -void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const double beta, - void *C, const int ldc); - -void cblas_xerbla(int p, const char *rout, const char *form, ...); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/numpy/core/code_generators/array_api_order.txt b/numpy/core/code_generators/array_api_order.txt deleted file mode 100644 index 858b89684..000000000 --- a/numpy/core/code_generators/array_api_order.txt +++ /dev/null @@ -1,85 +0,0 @@ -# The functions in the numpy_core C API -# They are defined here so that the order is set. -PyArray_SetNumericOps -PyArray_GetNumericOps -PyArray_INCREF -PyArray_XDECREF -PyArray_SetStringFunction -PyArray_DescrFromType -PyArray_TypeObjectFromType -PyArray_Zero -PyArray_One -PyArray_CastToType -PyArray_CastTo -PyArray_CastAnyTo -PyArray_CanCastSafely -PyArray_CanCastTo -PyArray_ObjectType -PyArray_DescrFromObject -PyArray_ConvertToCommonType -PyArray_DescrFromScalar -PyArray_DescrFromTypeObject -PyArray_Size -PyArray_Scalar -PyArray_FromScalar -PyArray_ScalarAsCtype -PyArray_CastScalarToCtype -PyArray_CastScalarDirect -PyArray_ScalarFromObject -PyArray_GetCastFunc -PyArray_FromDims -PyArray_FromDimsAndDataAndDescr -PyArray_FromAny -PyArray_EnsureArray -PyArray_EnsureAnyArray -PyArray_FromFile -PyArray_FromString -PyArray_FromBuffer -PyArray_FromIter -PyArray_Return -PyArray_GetField -PyArray_SetField -PyArray_Byteswap -PyArray_Resize -PyArray_MoveInto -PyArray_CopyInto -PyArray_CopyAnyInto -PyArray_CopyObject -PyArray_NewCopy -PyArray_ToList -PyArray_ToString -PyArray_ToFile -PyArray_Dump -PyArray_Dumps -PyArray_ValidType -PyArray_UpdateFlags -PyArray_New -PyArray_NewFromDescr -PyArray_DescrNew -PyArray_DescrNewFromType -PyArray_GetPriority -PyArray_IterNew -PyArray_MultiIterNew -PyArray_PyIntAsInt -PyArray_PyIntAsIntp -PyArray_Broadcast -PyArray_FillObjectArray -PyArray_FillWithScalar -PyArray_CheckStrides -PyArray_DescrNewByteorder -PyArray_IterAllButAxis -PyArray_CheckFromAny -PyArray_FromArray -PyArray_FromInterface -PyArray_FromStructInterface -PyArray_FromArrayAttr -PyArray_ScalarKind -PyArray_CanCoerceScalar -PyArray_NewFlagsObject -PyArray_CanCastScalar -PyArray_CompareUCS4 -PyArray_RemoveSmallest -PyArray_ElementStrides -PyArray_Item_INCREF -PyArray_Item_XDECREF -PyArray_FieldNames diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py deleted file mode 100644 index 6c57b6de0..000000000 --- a/numpy/core/code_generators/genapi.py +++ /dev/null @@ -1,295 +0,0 @@ -""" -Get API information encoded in C files. - -See ``find_function`` for how functions should be formatted, and -``read_order`` for how the order of the functions should be -specified. -""" -import sys, os, re -import md5 -import textwrap - -__docformat__ = 'restructuredtext' - -# The files under src/ that are scanned for API functions -API_FILES = ['arraymethods.c', - 'arrayobject.c', - 'arraytypes.inc.src', - 'multiarraymodule.c', - 'scalartypes.inc.src', - 'ufuncobject.c', - ] -THIS_DIR = os.path.dirname(__file__) -API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES] - -def file_in_this_dir(filename): - return os.path.join(THIS_DIR, filename) - -def remove_whitespace(s): - return ''.join(s.split()) - -def _repl(str): - return str.replace('intp', 'npy_intp').replace('Bool','npy_bool') - -class Function(object): - def __init__(self, name, return_type, args, doc=''): - self.name = name - self.return_type = _repl(return_type) - self.args = args - self.doc = doc - - def _format_arg(self, (typename, name)): - if typename.endswith('*'): - return typename + name - else: - return typename + ' ' + name - - def argtypes_string(self): - if not self.args: - return 'void' - argstr = ', '.join([_repl(a[0]) for a in self.args]) - return argstr - - def __str__(self): - argstr = ', '.join([self._format_arg(a) for a in self.args]) - if self.doc: - doccomment = '/* %s */\n' % self.doc - else: - doccomment = '' - return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr) - - def to_ReST(self): - lines = ['::', '', ' ' + self.return_type] - argstr = ',\000'.join([self._format_arg(a) for a in self.args]) - name = ' %s' % (self.name,) - s = textwrap.wrap('(%s)' % (argstr,), width=72, - initial_indent=name, - subsequent_indent=' ' * (len(name)+1), - break_long_words=False) - for l in s: - lines.append(l.replace('\000', ' ').rstrip()) - lines.append('') - if self.doc: - lines.append(textwrap.dedent(self.doc)) - return '\n'.join(lines) - - def api_hash(self): - m = md5.new() - m.update(remove_whitespace(self.return_type)) - m.update('\000') - m.update(self.name) - m.update('\000') - for typename, name in self.args: - m.update(remove_whitespace(typename)) - m.update('\000') - return m.hexdigest()[:8] - -class ParseError(Exception): - def __init__(self, filename, lineno, msg): - self.filename = filename - self.lineno = lineno - self.msg = msg - - def __str__(self): - return '%s:%s:%s' % (self.filename, self.lineno, self.msg) - -def skip_brackets(s, lbrac, rbrac): - count = 0 - for i, c in enumerate(s): - if c == lbrac: - count += 1 - elif c == rbrac: - count -= 1 - if count == 0: - return i - raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s)) - -def split_arguments(argstr): - arguments = [] - bracket_counts = {'(': 0, '[': 0} - current_argument = [] - state = 0 - i = 0 - def finish_arg(): - if current_argument: - argstr = ''.join(current_argument).strip() - m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr) - if m: - typename = m.group(1).strip() - name = m.group(3) - else: - typename = argstr - name = '' - arguments.append((typename, name)) - del current_argument[:] - while i < len(argstr): - c = argstr[i] - if c == ',': - finish_arg() - elif c == '(': - p = skip_brackets(argstr[i:], '(', ')') - current_argument += argstr[i:i+p] - i += p-1 - else: - current_argument += c - i += 1 - finish_arg() - return arguments - - -def find_functions(filename, tag='API'): - """ - Scan the file, looking for tagged functions. - - Assuming ``tag=='API'``, a tagged function looks like:: - - /*API*/ - static returntype* - function_name(argtype1 arg1, argtype2 arg2) - { - } - - where the return type must be on a separate line, the function - name must start the line, and the opening ``{`` must start the line. - - An optional documentation comment in ReST format may follow the tag, - as in:: - - /*API - This function does foo... - */ - """ - fo = open(filename, 'r') - functions = [] - return_type = None - function_name = None - function_args = [] - doclist = [] - SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = range(5) - state = SCANNING - tagcomment = '/*' + tag - for lineno, line in enumerate(fo): - try: - line = line.strip() - if state == SCANNING: - if line.startswith(tagcomment): - if line.endswith('*/'): - state = STATE_RETTYPE - else: - state = STATE_DOC - elif state == STATE_DOC: - if line.startswith('*/'): - state = STATE_RETTYPE - else: - line = line.lstrip(' *') - doclist.append(line) - elif state == STATE_RETTYPE: - # first line of declaration with return type - m = re.match(r'static\s+(.*)$', line) - if m: - line = m.group(1) - return_type = line - state = STATE_NAME - elif state == STATE_NAME: - # second line, with function name - m = re.match(r'(\w+)\s*\(', line) - if m: - function_name = m.group(1) - else: - raise ParseError(filename, lineno+1, - 'could not find function name') - function_args.append(line[m.end():]) - state = STATE_ARGS - elif state == STATE_ARGS: - if line.startswith('{'): - # finished - fargs_str = ' '.join(function_args).rstrip(' )') - fargs = split_arguments(fargs_str) - f = Function(function_name, return_type, fargs, - '\n'.join(doclist)) - functions.append(f) - return_type = None - function_name = None - function_args = [] - doclist = [] - state = SCANNING - else: - function_args.append(line) - except: - print filename, lineno+1 - raise - fo.close() - return functions - -def read_order(order_file): - """ - Read the order of the API functions from a file. - - Comments can be put on lines starting with # - """ - fo = open(order_file, 'r') - order = {} - i = 0 - for line in fo: - line = line.strip() - if not line.startswith('#'): - order[line] = i - i += 1 - fo.close() - return order - -def get_api_functions(tagname, order_file): - if not os.path.exists(order_file): - order_file = file_in_this_dir(order_file) - order = read_order(order_file) - functions = [] - for f in API_FILES: - functions.extend(find_functions(f, tagname)) - dfunctions = [] - for func in functions: - o = order[func.name] - dfunctions.append( (o, func) ) - dfunctions.sort() - return [a[1] for a in dfunctions] - -def add_api_list(offset, APIname, api_list, - module_list, extension_list, init_list): - """Add the API function declarations to the appropiate lists for use in - the headers. - """ - for k, func in enumerate(api_list): - num = offset + k - astr = "static %s %s \\\n (%s);" % \ - (func.return_type, func.name, func.argtypes_string()) - module_list.append(astr) - astr = "#define %s \\\n (*(%s (*)(%s)) \\\n"\ - " %s[%d])" % (func.name,func.return_type, - func.argtypes_string(), APIname, num) - extension_list.append(astr) - astr = " (void *) %s," % func.name - init_list.append(astr) - -def should_rebuild(targets, source_files): - from distutils.dep_util import newer_group - for t in targets: - if not os.path.exists(t): - return True - sources = API_FILES + list(source_files) + [__file__] - if newer_group(sources, targets[0], missing='newer'): - return True - return False - -def main(): - tagname = sys.argv[1] - order_file = sys.argv[2] - functions = get_api_functions(tagname, order_file) - m = md5.new(tagname) - for func in functions: - print func - ah = func.api_hash() - m.update(ah) - print hex(int(ah,16)) - print hex(int(m.hexdigest()[:8],16)) - -if __name__ == '__main__': - main() diff --git a/numpy/core/code_generators/generate_array_api.py b/numpy/core/code_generators/generate_array_api.py deleted file mode 100644 index c6f73c33f..000000000 --- a/numpy/core/code_generators/generate_array_api.py +++ /dev/null @@ -1,208 +0,0 @@ -import os -import genapi - -OBJECT_API_ORDER = 'array_api_order.txt' -MULTIARRAY_API_ORDER = 'multiarray_api_order.txt' - -types = ['Generic','Number','Integer','SignedInteger','UnsignedInteger', - 'Inexact', - 'Floating', 'ComplexFloating', 'Flexible', 'Character', - 'Byte','Short','Int', 'Long', 'LongLong', 'UByte', 'UShort', - 'UInt', 'ULong', 'ULongLong', 'Float', 'Double', 'LongDouble', - 'CFloat', 'CDouble', 'CLongDouble', 'Object', 'String', 'Unicode', - 'Void'] - -h_template = r""" -#ifdef _MULTIARRAYMODULE - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - - -static unsigned int PyArray_GetNDArrayCVersion (void); -static PyTypeObject PyBigArray_Type; -static PyTypeObject PyArray_Type; -static PyTypeObject PyArrayDescr_Type; -static PyTypeObject PyArrayFlags_Type; -static PyTypeObject PyArrayIter_Type; -static PyTypeObject PyArrayMapIter_Type; -static PyTypeObject PyArrayMultiIter_Type; -static int NPY_NUMUSERTYPES=0; -static PyTypeObject PyBoolArrType_Type; -static PyBoolScalarObject _PyArrayScalar_BoolValues[2]; - -%s - -#else - -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -#else -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -#else -static void **PyArray_API=NULL; -#endif -#endif - -#define PyArray_GetNDArrayCVersion (*(unsigned int (*)(void)) PyArray_API[0]) -#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1]) -#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) -#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) -#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4]) -#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) -#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) -#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) -#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) -#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) - -%s - -#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int -_import_array(void) -{ - PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); - PyObject *c_api = NULL; - if (numpy == NULL) return -1; - c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); - if (c_api == NULL) {Py_DECREF(numpy); return -1;} - if (PyCObject_Check(c_api)) { - PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); - } - Py_DECREF(c_api); - Py_DECREF(numpy); - if (PyArray_API == NULL) return -1; - /* Perform runtime check of C API version */ - if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "version %%x of C-API but this version of numpy is %%x", \ - (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); - return -1; - } - return 0; -} - -#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return; } } - -#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } - -#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } - -#endif - -#endif -""" - - -c_template = r""" -/* These pointers will be stored in the C-object for use in other - extension modules -*/ - -void *PyArray_API[] = { - (void *) PyArray_GetNDArrayCVersion, - (void *) &PyBigArray_Type, - (void *) &PyArray_Type, - (void *) &PyArrayDescr_Type, - (void *) &PyArrayFlags_Type, - (void *) &PyArrayIter_Type, - (void *) &PyArrayMultiIter_Type, - (int *) &NPY_NUMUSERTYPES, - (void *) &PyBoolArrType_Type, - (void *) &_PyArrayScalar_BoolValues, -%s -}; -""" - -def generate_api(output_dir, force=False): - header_file = os.path.join(output_dir, '__multiarray_api.h') - c_file = os.path.join(output_dir,'__multiarray_api.c') - doc_file = os.path.join(output_dir, 'multiarray_api.txt') - - targets = (header_file, c_file, doc_file) - if (not force - and not genapi.should_rebuild(targets, - [OBJECT_API_ORDER, - MULTIARRAY_API_ORDER, - __file__])): - return targets - - objectapi_list = genapi.get_api_functions('OBJECT_API', - OBJECT_API_ORDER) - multiapi_list = genapi.get_api_functions('MULTIARRAY_API', - MULTIARRAY_API_ORDER) - # API fixes for __arrayobject_api.h - - fixed = 10 - numtypes = len(types) + fixed - numobject = len(objectapi_list) + numtypes - nummulti = len(multiapi_list) - numtotal = numobject + nummulti - - module_list = [] - extension_list = [] - init_list = [] - - # setup types - for k, atype in enumerate(types): - num = fixed + k - astr = " (void *) &Py%sArrType_Type," % types[k] - init_list.append(astr) - astr = "static PyTypeObject Py%sArrType_Type;" % types[k] - module_list.append(astr) - astr = "#define Py%sArrType_Type (*(PyTypeObject *)PyArray_API[%d])" % \ - (types[k], num) - extension_list.append(astr) - - # set up object API - genapi.add_api_list(numtypes, 'PyArray_API', objectapi_list, - module_list, extension_list, init_list) - - # set up multiarray module API - genapi.add_api_list(numobject, 'PyArray_API', multiapi_list, - module_list, extension_list, init_list) - - - # Write to header - fid = open(header_file, 'w') - s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) - fid.write(s) - fid.close() - - # Write to c-code - fid = open(c_file, 'w') - s = c_template % '\n'.join(init_list) - fid.write(s) - fid.close() - - # write to documentation - fid = open(doc_file, 'w') - fid.write(''' -=========== -Numpy C-API -=========== - -Object API -========== -''') - for func in objectapi_list: - fid.write(func.to_ReST()) - fid.write('\n\n') - fid.write(''' - -Multiarray API -============== -''') - for func in multiapi_list: - fid.write(func.to_ReST()) - fid.write('\n\n') - fid.close() - - return targets diff --git a/numpy/core/code_generators/generate_ufunc_api.py b/numpy/core/code_generators/generate_ufunc_api.py deleted file mode 100644 index 96bb47cae..000000000 --- a/numpy/core/code_generators/generate_ufunc_api.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -import genapi - -UFUNC_API_ORDER = 'ufunc_api_order.txt' - -h_template = r""" -#ifdef _UMATHMODULE - -static PyTypeObject PyUFunc_Type; - -%s - -#else - -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) -extern void **PyUFunc_API; -#else -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -void **PyUFunc_API; -#else -static void **PyUFunc_API=NULL; -#endif -#endif - -#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) - -%s - -static int -_import_umath(void) -{ - PyObject *numpy = PyImport_ImportModule("numpy.core.umath"); - PyObject *c_api = NULL; - - if (numpy == NULL) return -1; - c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); - if (c_api == NULL) {Py_DECREF(numpy); return -1;} - if (PyCObject_Check(c_api)) { - PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api); - } - Py_DECREF(c_api); - Py_DECREF(numpy); - if (PyUFunc_API == NULL) return -1; - return 0; -} - -#define import_umath() { UFUNC_NOFPE if (_import_umath() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); return; }} - -#define import_umath1(ret) { UFUNC_NOFPE if (_import_umath() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); return ret; }} - -#define import_umath2(msg, ret) { UFUNC_NOFPE if (_import_umath() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; }} - -#define import_ufunc() { UFUNC_NOFPE if (_import_umath() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); }} - - -#endif -""" - -c_template = r""" -/* These pointers will be stored in the C-object for use in other - extension modules -*/ - -void *PyUFunc_API[] = { - (void *) &PyUFunc_Type, -%s -}; -""" - -def generate_api(output_dir, force=False): - header_file = os.path.join(output_dir, '__ufunc_api.h') - c_file = os.path.join(output_dir, '__ufunc_api.c') - doc_file = os.path.join(output_dir, 'ufunc_api.txt') - - targets = (header_file, c_file, doc_file) - if (not force - and not genapi.should_rebuild(targets, - [UFUNC_API_ORDER, __file__])): - return targets - - ufunc_api_list = genapi.get_api_functions('UFUNC_API', UFUNC_API_ORDER) - - # API fixes for __arrayobject_api.h - - fixed = 1 - nummulti = len(ufunc_api_list) - numtotal = fixed + nummulti - - module_list = [] - extension_list = [] - init_list = [] - - # set up object API - genapi.add_api_list(fixed, 'PyUFunc_API', ufunc_api_list, - module_list, extension_list, init_list) - - # Write to header - fid = open(header_file, 'w') - s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) - fid.write(s) - fid.close() - - # Write to c-code - fid = open(c_file, 'w') - s = c_template % '\n'.join(init_list) - fid.write(s) - fid.close() - - # Write to documentation - fid = open(doc_file, 'w') - fid.write(''' -================= -Numpy Ufunc C-API -================= -''') - for func in ufunc_api_list: - fid.write(func.to_ReST()) - fid.write('\n\n') - fid.close() - - return targets diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py deleted file mode 100644 index 0150417f0..000000000 --- a/numpy/core/code_generators/generate_umath.py +++ /dev/null @@ -1,679 +0,0 @@ -import re - -Zero = "PyUFunc_Zero" -One = "PyUFunc_One" -None_ = "PyUFunc_None" - -class TypeDescription(object): - """Type signature for a ufunc - - Attributes - ---------- - - type: character representing the type - func_data: - in_: - out: - """ - def __init__(self, type, f=None, in_=None, out=None): - self.type = type - self.func_data = f - if in_ is not None: - in_ = in_.replace('.', type) - self.in_ = in_ - if out is not None: - out = out.replace('.', type) - self.out = out - - def finish_signature(self, nin, nout): - if self.in_ is None: - self.in_ = self.type * nin - assert len(self.in_) == nin - if self.out is None: - self.out = self.type * nout - assert len(self.out) == nout - -_fdata_map = dict(f='%sf', d='%s', g='%sl', - F='nc_%sf', D='nc_%s', G='nc_%sl') -def build_func_data(types, f): - func_data = [] - for t in types: - d = _fdata_map.get(t, '%s') % (f,) - func_data.append(d) - return func_data - -def TD(types, f=None, in_=None, out=None): - if f is not None: - if isinstance(f, str): - func_data = build_func_data(types, f) - else: - assert len(f) == len(types) - func_data = f - else: - func_data = (None,) * len(types) - if isinstance(in_, str): - in_ = (in_,) * len(types) - elif in_ is None: - in_ = (None,) * len(types) - if isinstance(out, str): - out = (out,) * len(types) - elif out is None: - out = (None,) * len(types) - tds = [] - for t, fd, i, o in zip(types, func_data, in_, out): - tds.append(TypeDescription(t, f=fd, in_=i, out=o)) - return tds - -class Ufunc(object): - """Description of a ufunc. - - Attributes - ---------- - - nin: number of input arguments - nout: number of output arguments - identity: identity element for a two-argument function - docstring: docstring for the ufunc - type_descriptions: list of TypeDescription objects - """ - def __init__(self, nin, nout, identity, docstring, - *type_descriptions): - self.nin = nin - self.nout = nout - if identity is None: - identity = None_ - self.identity = identity - self.docstring = docstring - self.type_descriptions = [] - for td in type_descriptions: - self.type_descriptions.extend(td) - for td in self.type_descriptions: - td.finish_signature(self.nin, self.nout) - -#each entry in defdict is a Ufunc object. - -#name: [string of chars for which it is defined, -# string of characters using func interface, -# tuple of strings giving funcs for data, -# (in, out), or (instr, outstr) giving the signature as character codes, -# identity, -# docstring, -# output specification (optional) -# ] - -all = '?bBhHiIlLqQfdgFDGO' -O = 'O' -M = 'M' -ints = 'bBhHiIlLqQ' -intsO = ints + O -bints = '?' + ints -bintsO = bints + O -flts = 'fdg' -fltsO = flts + O -fltsM = flts + M -cmplx = 'FDG' -cmplxO = cmplx + O -cmplxM = cmplx + M -inexact = flts + cmplx -noint = inexact+O -nointM = inexact+M -allM = bints+flts+cmplxM -nobool = all[1:] -nobool_or_obj = all[1:-1] -intflt = ints+flts -intfltcmplx = nobool_or_obj -nocmplx = bints+flts -nocmplxO = nocmplx+O -nocmplxM = nocmplx+M -noobj = all[:-1] - -defdict = { -'add' : - Ufunc(2, 1, Zero, - 'adds the arguments elementwise.', - TD(noobj), - TD(O, f='PyNumber_Add'), - ), -'subtract' : - Ufunc(2, 1, Zero, - 'subtracts the arguments elementwise.', - TD(noobj), - TD(O, f='PyNumber_Subtract'), - ), -'multiply' : - Ufunc(2, 1, One, - 'multiplies the arguments elementwise.', - TD(noobj), - TD(O, f='PyNumber_Multiply'), - ), -'divide' : - Ufunc(2, 1, One, - 'divides the arguments elementwise.', - TD(intfltcmplx), - TD(O, f='PyNumber_Divide'), - ), -'floor_divide' : - Ufunc(2, 1, One, - 'floor divides the arguments elementwise.', - TD(intfltcmplx), - TD(O, f='PyNumber_FloorDivide'), - ), -'true_divide' : - Ufunc(2, 1, One, - 'true divides the arguments elementwise.', - TD('bBhH', out='f'), - TD('iIlLqQ', out='d'), - TD(flts+cmplx), - TD(O, f='PyNumber_TrueDivide'), - ), -'conjugate' : - Ufunc(1, 1, None, - 'takes the conjugate of x elementwise.', - TD(nobool_or_obj), - TD(M, f='conjugate'), - ), -'fmod' : - Ufunc(2, 1, Zero, - 'computes (C-like) x1 % x2 elementwise.', - TD(ints), - TD(flts, f='fmod'), - TD(M, f='fmod'), - ), -'square' : - Ufunc(1, 1, None, - 'compute x**2.', - TD(nobool_or_obj), - TD(O, f='Py_square'), - ), -'reciprocal' : - Ufunc(1, 1, None, - 'compute 1/x', - TD(nobool_or_obj), - TD(O, f='Py_reciprocal'), - ), -'ones_like' : - Ufunc(1, 1, None, - 'returns an array of ones of the shape and typecode of x.', - TD(nobool_or_obj), - TD(O, f='Py_get_one'), - ), -'power' : - Ufunc(2, 1, One, - 'computes x1**x2 elementwise.', - TD(ints), - TD(inexact, f='pow'), - TD(O, f='PyNumber_Power'), - ), -'absolute' : - Ufunc(1, 1, None, - 'takes |x| elementwise.', - TD(nocmplx), - TD(cmplx, out=('f', 'd', 'g')), - TD(O, f='PyNumber_Absolute'), - ), -'negative' : - Ufunc(1, 1, None, - 'determines -x elementwise', - TD(nocmplx), - TD(cmplx, f='neg'), - TD(O, f='PyNumber_Negative'), - ), -'sign' : - Ufunc(1, 1, None, - 'returns -1 if x < 0 and 0 if x==0 and 1 if x > 0', - TD(nobool), - ), -'greater' : - Ufunc(2, 1, None, - 'returns elementwise x1 > x2 in a bool array.', - TD(all, out='?'), - ), -'greater_equal' : - Ufunc(2, 1, None, - 'returns elementwise x1 >= x2 in a bool array.', - TD(all, out='?'), - ), -'less' : - Ufunc(2, 1, None, - 'returns elementwise x1 < x2 in a bool array.', - TD(all, out='?'), - ), -'less_equal' : - Ufunc(2, 1, None, - 'returns elementwise x1 <= x2 in a bool array', - TD(all, out='?'), - ), -'equal' : - Ufunc(2, 1, None, - 'returns elementwise x1 == x2 in a bool array', - TD(all, out='?'), - ), -'not_equal' : - Ufunc(2, 1, None, - 'returns elementwise x1 |= x2', - TD(all, out='?'), - ), -'logical_and' : - Ufunc(2, 1, One, - 'returns x1 and x2 elementwise.', - TD(noobj, out='?'), - TD(M, f='logical_and', out='?'), - ), -'logical_not' : - Ufunc(1, 1, None, - 'returns not x elementwise.', - TD(noobj, out='?'), - TD(M, f='logical_not', out='?'), - ), -'logical_or' : - Ufunc(2, 1, Zero, - 'returns x1 or x2 elementwise.', - TD(noobj, out='?'), - TD(M, f='logical_or', out='?'), - ), -'logical_xor' : - Ufunc(2, 1, None, - 'returns x1 xor x2 elementwise.', - TD(noobj, out='?'), - TD(M, f='logical_xor', out='?'), - ), -'maximum' : - Ufunc(2, 1, None, - 'returns maximum (if x1 > x2: x1; else: x2) elementwise.', - TD(noobj), - TD(O, f='_npy_ObjectMax') - ), -'minimum' : - Ufunc(2, 1, None, - 'returns minimum (if x1 < x2: x1; else: x2) elementwise', - TD(noobj), - TD(O, f='_npy_ObjectMin') - ), -'bitwise_and' : - Ufunc(2, 1, One, - 'computes x1 & x2 elementwise.', - TD(bints), - TD(O, f='PyNumber_And'), - ), -'bitwise_or' : - Ufunc(2, 1, Zero, - 'computes x1 | x2 elementwise.', - TD(bints), - TD(O, f='PyNumber_Or'), - ), -'bitwise_xor' : - Ufunc(2, 1, None, - 'computes x1 ^ x2 elementwise.', - TD(bints), - TD(O, f='PyNumber_Xor'), - ), -'invert' : - Ufunc(1, 1, None, - 'computes ~x (bit inversion) elementwise.', - TD(bints), - TD(O, f='PyNumber_Invert'), - ), -'left_shift' : - Ufunc(2, 1, None, - 'computes x1 << x2 (x1 shifted to left by x2 bits) elementwise.', - TD(ints), - TD(O, f='PyNumber_Lshift'), - ), -'right_shift' : - Ufunc(2, 1, None, - 'computes x1 >> x2 (x1 shifted to right by x2 bits) elementwise.', - TD(ints), - TD(O, f='PyNumber_Rshift'), - ), -'degrees' : - Ufunc(1, 1, None, - 'converts angle from radians to degrees', - TD(fltsM, f='degrees'), - ), -'radians' : - Ufunc(1, 1, None, - 'converts angle from degrees to radians', - TD(fltsM, f='radians'), - ), -'arccos' : - Ufunc(1, 1, None, - 'inverse cosine elementwise.', - TD(inexact, f='acos'), - TD(M, f='arccos'), - ), -'arccosh' : - Ufunc(1, 1, None, - 'inverse hyperbolic cosine elementwise.', - TD(inexact, f='acosh'), - TD(M, f='arccosh'), - ), -'arcsin' : - Ufunc(1, 1, None, - 'inverse sine elementwise.', - TD(inexact, f='asin'), - TD(M, f='arcsin'), - ), -'arcsinh' : - Ufunc(1, 1, None, - 'inverse hyperbolic sine elementwise.', - TD(inexact, f='asinh'), - TD(M, f='arcsinh'), - ), -'arctan' : - Ufunc(1, 1, None, - 'inverse tangent elementwise.', - TD(inexact, f='atan'), - TD(M, f='arctan'), - ), -'arctanh' : - Ufunc(1, 1, None, - 'inverse hyperbolic tangent elementwise.', - TD(inexact, f='atanh'), - TD(M, f='arctanh'), - ), -'cos' : - Ufunc(1, 1, None, - 'cosine elementwise.', - TD(inexact, f='cos'), - TD(M, f='cos'), - ), -'sin' : - Ufunc(1, 1, None, - 'sine elementwise.', - TD(inexact, f='sin'), - TD(M, f='sin'), - ), -'tan' : - Ufunc(1, 1, None, - 'tangent elementwise.', - TD(inexact, f='tan'), - TD(M, f='tan'), - ), -'cosh' : - Ufunc(1, 1, None, - 'hyperbolic cosine elementwise.', - TD(inexact, f='cosh'), - TD(M, f='cosh'), - ), -'sinh' : - Ufunc(1, 1, None, - 'hyperbolic sine elementwise.', - TD(inexact, f='sinh'), - TD(M, f='sinh'), - ), -'tanh' : - Ufunc(1, 1, None, - 'hyperbolic tangent elementwise.', - TD(inexact, f='tanh'), - TD(M, f='tanh'), - ), -'exp' : - Ufunc(1, 1, None, - 'e**x elementwise.', - TD(inexact, f='exp'), - TD(M, f='exp'), - ), -'expm1' : - Ufunc(1, 1, None, - 'e**x-1 elementwise.', - TD(inexact, f='expm1'), - TD(M, f='expm1'), - ), -'log' : - Ufunc(1, 1, None, - 'logarithm base e elementwise.', - TD(inexact, f='log'), - TD(M, f='log'), - ), -'log10' : - Ufunc(1, 1, None, - 'logarithm base 10 elementwise.', - TD(inexact, f='log10'), - TD(M, f='log10'), - ), -'log1p' : - Ufunc(1, 1, None, - 'log(1+x) to base e elementwise.', - TD(inexact, f='log1p'), - TD(M, f='log1p'), - ), -'sqrt' : - Ufunc(1, 1, None, - 'square-root elementwise. For real x, the domain is restricted to x>=0.', - TD(inexact, f='sqrt'), - TD(M, f='sqrt'), - ), -'ceil' : - Ufunc(1, 1, None, - 'elementwise smallest integer >= x.', - TD(flts, f='ceil'), - TD(M, f='ceil'), - ), -'fabs' : - Ufunc(1, 1, None, - 'absolute values.', - TD(flts, f='fabs'), - TD(M, f='fabs'), - ), -'floor' : - Ufunc(1, 1, None, - 'elementwise largest integer <= x', - TD(flts, f='floor'), - TD(M, f='floor'), - ), -'rint' : - Ufunc(1, 1, None, - 'round x elementwise to the nearest integer, round halfway cases away from zero', - TD(inexact, f='rint'), - TD(M, f='rint'), - ), -'arctan2' : - Ufunc(2, 1, None, - 'a safe and correct arctan(x1/x2)', - TD(flts, f='atan2'), - TD(M, f='arctan2'), - ), -'remainder' : - Ufunc(2, 1, None, - 'computes x1-n*x2 where n is floor(x1 / x2)', - TD(intflt), - TD(O, f='PyNumber_Remainder'), - ), -'hypot' : - Ufunc(2, 1, None, - 'sqrt(x1**2 + x2**2) elementwise', - TD(flts, f='hypot'), - TD(M, f='hypot'), - ), -'isnan' : - Ufunc(1, 1, None, - 'returns True where x is Not-A-Number', - TD(inexact, out='?'), - ), -'isinf' : - Ufunc(1, 1, None, - 'returns True where x is +inf or -inf', - TD(inexact, out='?'), - ), -'isfinite' : - Ufunc(1, 1, None, - 'returns True where x is finite', - TD(inexact, out='?'), - ), -'signbit' : - Ufunc(1, 1, None, - 'returns True where signbit of x is set (x<0).', - TD(flts, out='?'), - ), -'modf' : - Ufunc(1, 2, None, - 'breaks x into fractional (y1) and integral (y2) parts.\\n\\n Each output has the same sign as the input.', - TD(flts), - ), -} - -def indent(st,spaces): - indention = ' '*spaces - indented = indention + st.replace('\n','\n'+indention) - # trim off any trailing spaces - indented = re.sub(r' +$',r'',indented) - return indented - -chartoname = {'?': 'bool', - 'b': 'byte', - 'B': 'ubyte', - 'h': 'short', - 'H': 'ushort', - 'i': 'int', - 'I': 'uint', - 'l': 'long', - 'L': 'ulong', - 'q': 'longlong', - 'Q': 'ulonglong', - 'f': 'float', - 'd': 'double', - 'g': 'longdouble', - 'F': 'cfloat', - 'D': 'cdouble', - 'G': 'clongdouble', - 'O': 'OBJECT', - 'M': 'OBJECT', - } - -chartotype1 = {'f': 'f_f', - 'd': 'd_d', - 'g': 'g_g', - 'F': 'F_F', - 'D': 'D_D', - 'G': 'G_G', - 'O': 'O_O', - 'M': 'O_O_method'} - -chartotype2 = {'f': 'ff_f', - 'd': 'dd_d', - 'g': 'gg_g', - 'F': 'FF_F', - 'D': 'DD_D', - 'G': 'GG_G', - 'O': 'OO_O', - 'M': 'OO_O_method'} -#for each name -# 1) create functions, data, and signature -# 2) fill in functions and data in InitOperators -# 3) add function. - -def make_arrays(funcdict): - # functions array contains an entry for every type implemented - # NULL should be placed where PyUfunc_ style function will be filled in later - # - code1list = [] - code2list = [] - names = funcdict.keys() - names.sort() - for name in names: - uf = funcdict[name] - funclist = [] - datalist = [] - siglist = [] - k = 0 - sub = 0 - - if uf.nin > 1: - assert uf.nin == 2 - thedict = chartotype2 # two inputs and one output - else: - thedict = chartotype1 # one input and one output - - for t in uf.type_descriptions: - if t.func_data is not None: - funclist.append('NULL') - astr = '%s_functions[%d] = PyUFunc_%s;' % \ - (name, k, thedict[t.type]) - code2list.append(astr) - if t.type == 'O': - astr = '%s_data[%d] = (void *) %s;' % \ - (name, k, t.func_data) - code2list.append(astr) - datalist.append('(void *)NULL') - elif t.type == 'M': - datalist.append('(void *)"%s"' % t.func_data) - else: - astr = '%s_data[%d] = (void *) %s;' % \ - (name, k, t.func_data) - code2list.append(astr) - datalist.append('(void *)NULL') - #datalist.append('(void *)%s' % t.func_data) - sub += 1 - else: - datalist.append('(void *)NULL'); - tname = chartoname[t.type].upper() - funclist.append('%s_%s' % (tname, name)) - - for x in t.in_ + t.out: - siglist.append('PyArray_%s' % (chartoname[x].upper(),)) - - k += 1 - - funcnames = ', '.join(funclist) - signames = ', '.join(siglist) - datanames = ', '.join(datalist) - code1list.append("static PyUFuncGenericFunction %s_functions[] = { %s };" \ - % (name, funcnames)) - code1list.append("static void * %s_data[] = { %s };" \ - % (name, datanames)) - code1list.append("static char %s_signatures[] = { %s };" \ - % (name, signames)) - return "\n".join(code1list),"\n".join(code2list) - -def make_ufuncs(funcdict): - code3list = [] - names = funcdict.keys() - names.sort() - for name in names: - uf = funcdict[name] - mlist = [] - mlist.append(\ -r"""f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d, - %d, %d, %s, "%s", - "%s", 0);""" % (name, name, name, - len(uf.type_descriptions), - uf.nin, uf.nout, - uf.identity, - name, uf.docstring)) - mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name) - mlist.append(r"""Py_DECREF(f);""") - code3list.append('\n'.join(mlist)) - return '\n'.join(code3list) - - -def make_code(funcdict,filename): - code1, code2 = make_arrays(funcdict) - code3 = make_ufuncs(funcdict) - code2 = indent(code2,4) - code3 = indent(code3,4) - code = r""" - -/** Warning this file is autogenerated!!! - - Please make changes to the code generator program (%s) -**/ - -%s - -static void -InitOperators(PyObject *dictionary) { - PyObject *f; - -%s -%s -} -""" % (filename, code1, code2, code3) - return code; - - -if __name__ == "__main__": - filename = __file__ - fid = open('__umath_generated.c','w') - code = make_code(defdict, filename) - fid.write(code) - fid.close() diff --git a/numpy/core/code_generators/multiarray_api_order.txt b/numpy/core/code_generators/multiarray_api_order.txt deleted file mode 100644 index 03a75a576..000000000 --- a/numpy/core/code_generators/multiarray_api_order.txt +++ /dev/null @@ -1,83 +0,0 @@ -PyArray_Transpose -PyArray_TakeFrom -PyArray_PutTo -PyArray_PutMask -PyArray_Repeat -PyArray_Choose -PyArray_Sort -PyArray_ArgSort -PyArray_SearchSorted -PyArray_ArgMax -PyArray_ArgMin -PyArray_Reshape -PyArray_Newshape -PyArray_Squeeze -PyArray_View -PyArray_SwapAxes -PyArray_Max -PyArray_Min -PyArray_Ptp -PyArray_Mean -PyArray_Trace -PyArray_Diagonal -PyArray_Clip -PyArray_Conjugate -PyArray_Nonzero -PyArray_Std -PyArray_Sum -PyArray_CumSum -PyArray_Prod -PyArray_CumProd -PyArray_All -PyArray_Any -PyArray_Compress -PyArray_Flatten -PyArray_Ravel -PyArray_MultiplyList -PyArray_MultiplyIntList -PyArray_GetPtr -PyArray_CompareLists -PyArray_AsCArray -PyArray_As1D -PyArray_As2D -PyArray_Free -PyArray_Converter -PyArray_IntpFromSequence -PyArray_Concatenate -PyArray_InnerProduct -PyArray_MatrixProduct -PyArray_CopyAndTranspose -PyArray_Correlate -PyArray_TypestrConvert -PyArray_DescrConverter -PyArray_DescrConverter2 -PyArray_IntpConverter -PyArray_BufferConverter -PyArray_AxisConverter -PyArray_BoolConverter -PyArray_ByteorderConverter -PyArray_OrderConverter -PyArray_EquivTypes -PyArray_Zeros -PyArray_Empty -PyArray_Where -PyArray_Arange -PyArray_ArangeObj -PyArray_SortkindConverter -PyArray_LexSort -PyArray_Round -PyArray_EquivTypenums -PyArray_RegisterDataType -PyArray_RegisterCastFunc -PyArray_RegisterCanCast -PyArray_InitArrFuncs -PyArray_IntTupleFromIntp -PyArray_TypeNumFromName -PyArray_ClipmodeConverter -PyArray_OutputConverter -PyArray_BroadcastToShape -_PyArray_SigintHandler -_PyArray_GetSigintBuf -PyArray_DescrAlignConverter -PyArray_DescrAlignConverter2 -PyArray_SearchsideConverter diff --git a/numpy/core/code_generators/ufunc_api_order.txt b/numpy/core/code_generators/ufunc_api_order.txt deleted file mode 100644 index 816d3121d..000000000 --- a/numpy/core/code_generators/ufunc_api_order.txt +++ /dev/null @@ -1,30 +0,0 @@ -PyUFunc_FromFuncAndData -PyUFunc_RegisterLoopForType -PyUFunc_GenericFunction -PyUFunc_f_f_As_d_d -PyUFunc_d_d -PyUFunc_f_f -PyUFunc_g_g -PyUFunc_F_F_As_D_D -PyUFunc_F_F -PyUFunc_D_D -PyUFunc_G_G -PyUFunc_O_O -PyUFunc_ff_f_As_dd_d -PyUFunc_ff_f -PyUFunc_dd_d -PyUFunc_gg_g -PyUFunc_FF_F_As_DD_D -PyUFunc_DD_D -PyUFunc_FF_F -PyUFunc_GG_G -PyUFunc_OO_O -PyUFunc_O_O_method -PyUFunc_OO_O_method -PyUFunc_On_Om -PyUFunc_GetPyValues -PyUFunc_checkfperr -PyUFunc_clearfperr -PyUFunc_getfperr -PyUFunc_handlefperr -PyUFunc_ReplaceLoopBySignature \ No newline at end of file diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py deleted file mode 100644 index 33e90c965..000000000 --- a/numpy/core/defchararray.py +++ /dev/null @@ -1,340 +0,0 @@ -import sys -from numerictypes import string_, unicode_, integer, object_ -from numeric import ndarray, broadcast, empty, compare_chararrays -from numeric import array as narray - -__all__ = ['chararray'] - -_globalvar = 0 -_unicode = unicode - -# special sub-class for character arrays (string_ and unicode_) -# This adds + and * operations and methods of str and unicode types -# which operate on an element-by-element basis - -# It also strips white-space on element retrieval and on -# comparisons - -class chararray(ndarray): - def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, - offset=0, strides=None, order='C'): - global _globalvar - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - _globalvar = 1 - if buffer is None: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), - order=order) - else: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), - buffer=buffer, - offset=offset, strides=strides, - order=order) - _globalvar = 0 - return self - - def __array_finalize__(self, obj): - # The b is a special case because it is used for reconstructing. - if not _globalvar and self.dtype.char not in 'SUb': - raise ValueError, "Can only create a chararray from string data." - - def __getitem__(self, obj): - val = ndarray.__getitem__(self, obj) - if isinstance(val, (string_, unicode_)): - temp = val.rstrip() - if len(temp) == 0: - val = '' - else: - val = temp - return val - - def __eq__(self, other): - return compare_chararrays(self, other, '==', True) - - def __ne__(self, other): - return compare_chararrays(self, other, '!=', True) - - def __ge__(self, other): - return compare_chararrays(self, other, '>=', True) - - def __le__(self, other): - return compare_chararrays(self, other, '<=', True) - - def __gt__(self, other): - return compare_chararrays(self, other, '>', True) - - def __lt__(self, other): - return compare_chararrays(self, other, '<', True) - - def __add__(self, other): - b = broadcast(self, other) - arr = b.iters[1].base - outitem = self.itemsize + arr.itemsize - result = chararray(b.shape, outitem, self.dtype is unicode_) - res = result.flat - for k, val in enumerate(b): - res[k] = (val[0] + val[1]) - return result - - def __radd__(self, other): - b = broadcast(other, self) - outitem = b.iters[0].base.itemsize + \ - b.iters[1].base.itemsize - result = chararray(b.shape, outitem, self.dtype is unicode_) - res = result.flat - for k, val in enumerate(b): - res[k] = (val[0] + val[1]) - return result - - def __mul__(self, other): - b = broadcast(self, other) - arr = b.iters[1].base - if not issubclass(arr.dtype.type, integer): - raise ValueError, "Can only multiply by integers" - outitem = b.iters[0].base.itemsize * arr.max() - result = chararray(b.shape, outitem, self.dtype is unicode_) - res = result.flat - for k, val in enumerate(b): - res[k] = val[0]*val[1] - return result - - def __rmul__(self, other): - b = broadcast(self, other) - arr = b.iters[1].base - if not issubclass(arr.dtype.type, integer): - raise ValueError, "Can only multiply by integers" - outitem = b.iters[0].base.itemsize * arr.max() - result = chararray(b.shape, outitem, self.dtype is unicode_) - res = result.flat - for k, val in enumerate(b): - res[k] = val[0]*val[1] - return result - - def __mod__(self, other): - b = broadcast(self, other) - res = [None]*b.size - maxsize = -1 - for k,val in enumerate(b): - newval = val[0] % val[1] - maxsize = max(len(newval), maxsize) - res[k] = newval - newarr = chararray(b.shape, maxsize, self.dtype is unicode_) - newarr[:] = res - return newarr - - def __rmod__(self, other): - return NotImplemented - - def argsort(self, axis=-1, kind='quicksort', order=None): - return self.__array__().argsort(axis, kind, order) - - def _generalmethod(self, name, myiter): - res = [None]*myiter.size - maxsize = -1 - for k, val in enumerate(myiter): - newval = [] - for chk in val[1:]: - if not chk or (chk.dtype is object_ and chk.item() is None): - break - newval.append(chk) - newitem = getattr(val[0],name)(*newval) - maxsize = max(len(newitem), maxsize) - res[k] = newitem - newarr = chararray(myiter.shape, maxsize, self.dtype is unicode_) - newarr[:] = res - return newarr - - def _typedmethod(self, name, myiter, dtype): - result = empty(myiter.shape, dtype=dtype) - res = result.flat - for k, val in enumerate(myiter): - newval = [] - for chk in val[1:]: - if not chk or (chk.dtype is object_ and chk.item() is None): - break - newval.append(chk) - this_str = val[0].rstrip('\x00') - newitem = getattr(this_str,name)(*newval) - res[k] = newitem - return result - - def _samemethod(self, name): - result = self.copy() - res = result.flat - for k, val in enumerate(self.flat): - res[k] = getattr(val, name)() - return result - - def capitalize(self): - return self._samemethod('capitalize') - - if sys.version[:3] >= '2.4': - def center(self, width, fillchar=' '): - return self._generalmethod('center', - broadcast(self, width, fillchar)) - def ljust(self, width, fillchar=' '): - return self._generalmethod('ljust', - broadcast(self, width, fillchar)) - def rjust(self, width, fillchar=' '): - return self._generalmethod('rjust', - broadcast(self, width, fillchar)) - def rsplit(self, sep=None, maxsplit=None): - return self._typedmethod('rsplit', broadcast(self, sep, maxsplit), - object) - else: - def ljust(self, width): - return self._generalmethod('ljust', broadcast(self, width)) - def rjust(self, width): - return self._generalmethod('rjust', broadcast(self, width)) - def center(self, width): - return self._generalmethod('center', broadcast(self, width)) - - def count(self, sub, start=None, end=None): - return self._typedmethod('count', broadcast(self, sub, start, end), int) - - def decode(self,encoding=None,errors=None): - return self._generalmethod('decode', broadcast(self, encoding, errors)) - - def encode(self,encoding=None,errors=None): - return self._generalmethod('encode', broadcast(self, encoding, errors)) - - def endswith(self, suffix, start=None, end=None): - return self._typedmethod('endswith', broadcast(self, suffix, start, end), bool) - - def expandtabs(self, tabsize=None): - return self._generalmethod('endswith', broadcast(self, tabsize)) - - def find(self, sub, start=None, end=None): - return self._typedmethod('find', broadcast(self, sub, start, end), int) - - def index(self, sub, start=None, end=None): - return self._typedmethod('index', broadcast(self, sub, start, end), int) - - def _ismethod(self, name): - result = empty(self.shape, dtype=bool) - res = result.flat - for k, val in enumerate(self.flat): - item = val.rstrip('\x00') - res[k] = getattr(item, name)() - return result - - def isalnum(self): - return self._ismethod('isalnum') - - def isalpha(self): - return self._ismethod('isalpha') - - def isdigit(self): - return self._ismethod('isdigit') - - def islower(self): - return self._ismethod('islower') - - def isspace(self): - return self._ismethod('isspace') - - def istitle(self): - return self._ismethod('istitle') - - def isupper(self): - return self._ismethod('isupper') - - def join(self, seq): - return self._generalmethod('join', broadcast(self, seq)) - - def lower(self): - return self._samemethod('lower') - - def lstrip(self, chars): - return self._generalmethod('lstrip', broadcast(self, chars)) - - def replace(self, old, new, count=None): - return self._generalmethod('replace', broadcast(self, old, new, count)) - - def rfind(self, sub, start=None, end=None): - return self._typedmethod('rfind', broadcast(self, sub, start, end), int) - - def rindex(self, sub, start=None, end=None): - return self._typedmethod('rindex', broadcast(self, sub, start, end), int) - - def rstrip(self, chars=None): - return self._generalmethod('rstrip', broadcast(self, chars)) - - def split(self, sep=None, maxsplit=None): - return self._typedmethod('split', broadcast(self, sep, maxsplit), object) - - def splitlines(self, keepends=None): - return self._typedmethod('splitlines', broadcast(self, keepends), object) - - def startswith(self, prefix, start=None, end=None): - return self._typedmethod('startswith', broadcast(self, prefix, start, end), bool) - - def strip(self, chars=None): - return self._generalmethod('strip', broadcast(self, chars)) - - def swapcase(self): - return self._samemethod('swapcase') - - def title(self): - return self._samemethod('title') - - def translate(self, table, deletechars=None): - if self.dtype is unicode_: - return self._generalmethod('translate', broadcast(self, table)) - else: - return self._generalmethod('translate', broadcast(self, table, deletechars)) - - def upper(self): - return self._samemethod('upper') - - def zfill(self, width): - return self._generalmethod('zfill', broadcast(self, width)) - - -def array(obj, itemsize=None, copy=True, unicode=False, order=None): - - if isinstance(obj, chararray): - if itemsize is None: - itemsize = obj.itemsize - if copy or (itemsize != obj.itemsize) \ - or (not unicode and obj.dtype == unicode_) \ - or (unicode and obj.dtype == string_): - return obj.astype("%s%d" % (obj.dtype.char, itemsize)) - else: - return obj - - if isinstance(obj, ndarray) and (obj.dtype in [unicode_, string_]): - new = obj.view(chararray) - if unicode and obj.dtype == string_: - return new.astype((unicode_, obj.itemsize)) - elif obj.dtype == unicode_: - return new.astype((string_, obj.itemsize)) - - if copy: return new.copy() - else: return new - - if unicode: dtype = "U" - else: dtype = "S" - - if itemsize is not None: - dtype += str(itemsize) - - if isinstance(obj, (str, _unicode)): - if itemsize is None: - itemsize = len(obj) - shape = len(obj) / itemsize - return chararray(shape, itemsize=itemsize, unicode=unicode, - buffer=obj) - - # default - val = narray(obj, dtype=dtype, order=order, subok=1) - - return val.view(chararray) - -def asarray(obj, itemsize=None, unicode=False, order=None): - return array(obj, itemsize, copy=False, - unicode=unicode, order=order) diff --git a/numpy/core/defmatrix.py b/numpy/core/defmatrix.py deleted file mode 100644 index 37cef8fc0..000000000 --- a/numpy/core/defmatrix.py +++ /dev/null @@ -1,494 +0,0 @@ -__all__ = ['matrix', 'bmat', 'mat', 'asmatrix'] - -import sys -import numeric as N -from numeric import concatenate, isscalar, binary_repr - -# make translation table -_table = [None]*256 -for k in range(256): - _table[k] = chr(k) -_table = ''.join(_table) - -_numchars = '0123456789.-+jeEL' -_todelete = [] -for k in _table: - if k not in _numchars: - _todelete.append(k) -_todelete = ''.join(_todelete) -del k - -def _eval(astr): - return eval(astr.translate(_table,_todelete)) - -def _convert_from_string(data): - rows = data.split(';') - newdata = [] - count = 0 - for row in rows: - trow = row.split(',') - newrow = [] - for col in trow: - temp = col.split() - newrow.extend(map(_eval,temp)) - if count == 0: - Ncols = len(newrow) - elif len(newrow) != Ncols: - raise ValueError, "Rows not the same size." - count += 1 - newdata.append(newrow) - return newdata - -def asmatrix(data, dtype=None): - """ Returns 'data' as a matrix. Unlike matrix(), no copy is performed - if 'data' is already a matrix or array. Equivalent to: - matrix(data, copy=False) - """ - return matrix(data, dtype=dtype, copy=False) - - - -class matrix(N.ndarray): - __array_priority__ = 10.0 - def __new__(subtype, data, dtype=None, copy=True): - if isinstance(data, matrix): - dtype2 = data.dtype - if (dtype is None): - dtype = dtype2 - if (dtype2 == dtype) and (not copy): - return data - return data.astype(dtype) - - if isinstance(data, N.ndarray): - if dtype is None: - intype = data.dtype - else: - intype = N.dtype(dtype) - new = data.view(subtype) - if intype != data.dtype: - return new.astype(intype) - if copy: return new.copy() - else: return new - - if isinstance(data, str): - data = _convert_from_string(data) - - # now convert data to an array - arr = N.array(data, dtype=dtype, copy=copy) - ndim = arr.ndim - shape = arr.shape - if (ndim > 2): - raise ValueError, "matrix must be 2-dimensional" - elif ndim == 0: - shape = (1,1) - elif ndim == 1: - shape = (1,shape[0]) - - order = False - if (ndim == 2) and arr.flags.fortran: - order = True - - if not (order or arr.flags.contiguous): - arr = arr.copy() - - ret = N.ndarray.__new__(subtype, shape, arr.dtype, - buffer=arr, - order=order) - return ret - - def __array_finalize__(self, obj): - self._getitem = False - if (isinstance(obj, matrix) and obj._getitem): return - ndim = self.ndim - if (ndim == 2): - return - if (ndim > 2): - newshape = tuple([x for x in self.shape if x > 1]) - ndim = len(newshape) - if ndim == 2: - self.shape = newshape - return - elif (ndim > 2): - raise ValueError, "shape too large to be a matrix." - else: - newshape = self.shape - if ndim == 0: - self.shape = (1,1) - elif ndim == 1: - self.shape = (1,newshape[0]) - return - - def __getitem__(self, index): - self._getitem = True - try: - out = N.ndarray.__getitem__(self, index) - finally: - self._getitem = False - - if not isinstance(out, N.ndarray): - return out - - if out.ndim == 0: - return out[()] - if out.ndim == 1: - sh = out.shape[0] - # Determine when we should have a column array - try: - n = len(index) - except: - n = 0 - if n > 1 and isscalar(index[1]): - out.shape = (sh,1) - else: - out.shape = (1,sh) - return out - - def _get_truendim(self): - shp = self.shape - truend = 0 - for val in shp: - if (val > 1): truend += 1 - return truend - - - def __mul__(self, other): - if isinstance(other,(N.ndarray, list, tuple)) : - # This promotes 1-D vectors to row vectors - return N.dot(self, asmatrix(other)) - if N.isscalar(other) or not hasattr(other, '__rmul__') : - return N.dot(self, other) - return NotImplemented - - def __rmul__(self, other): - return N.dot(other, self) - - def __imul__(self, other): - self[:] = self * other - return self - - def __pow__(self, other): - shape = self.shape - if len(shape) != 2 or shape[0] != shape[1]: - raise TypeError, "matrix is not square" - if type(other) in (type(1), type(1L)): - if other==0: - return matrix(N.identity(shape[0])) - if other<0: - x = self.I - other=-other - else: - x=self - result = x - if other <= 3: - while(other>1): - result=result*x - other=other-1 - return result - # binary decomposition to reduce the number of Matrix - # Multiplies for other > 3. - beta = binary_repr(other) - t = len(beta) - Z,q = x.copy(),0 - while beta[t-q-1] == '0': - Z *= Z - q += 1 - result = Z.copy() - for k in range(q+1,t): - Z *= Z - if beta[t-k-1] == '1': - result *= Z - return result - else: - raise TypeError, "exponent must be an integer" - - def __rpow__(self, other): - return NotImplemented - - def __repr__(self): - s = repr(self.__array__()).replace('array', 'matrix') - # now, 'matrix' has 6 letters, and 'array' 5, so the columns don't - # line up anymore. We need to add a space. - l = s.splitlines() - for i in range(1, len(l)): - if l[i]: - l[i] = ' ' + l[i] - return '\n'.join(l) - - def __str__(self): - return str(self.__array__()) - - def _align(self, axis): - """A convenience function for operations that need to preserve axis - orientation. - """ - if axis is None: - return self[0,0] - elif axis==0: - return self - elif axis==1: - return self.transpose() - else: - raise ValueError, "unsupported axis" - - # To preserve orientation of result... - def sum(self, axis=None, dtype=None, out=None): - """Sum the matrix over the given axis. If the axis is None, sum - over all dimensions. This preserves the orientation of the - result as a row or column. - """ - return N.ndarray.sum(self, axis, dtype, out)._align(axis) - - def mean(self, axis=None, out=None): - """Compute the mean along the specified axis. - - Returns the average of the array elements. The average is taken over - the flattened array by default, otherwise over the specified axis. - - :Parameters: - - axis : integer - Axis along which the means are computed. The default is - to compute the standard deviation of the flattened array. - - dtype : type - Type to use in computing the means. For arrays of integer type - the default is float32, for arrays of float types it is the - same as the array type. - - out : ndarray - Alternative output array in which to place the result. It must - have the same shape as the expected output but the type will be - cast if necessary. - - :Returns: - - mean : The return type varies, see above. - A new array holding the result is returned unless out is - specified, in which case a reference to out is returned. - - :SeeAlso: - - - var : variance - - std : standard deviation - - Notes - ----- - - The mean is the sum of the elements along the axis divided by the - number of elements. - - """ - return N.ndarray.mean(self, axis, out)._align(axis) - - def std(self, axis=None, dtype=None, out=None): - """Compute the standard deviation along the specified axis. - - Returns the standard deviation of the array elements, a measure of the - spread of a distribution. The standard deviation is computed for the - flattened array by default, otherwise over the specified axis. - - :Parameters: - - axis : integer - Axis along which the standard deviation is computed. The - default is to compute the standard deviation of the flattened - array. - - dtype : type - Type to use in computing the standard deviation. For arrays of - integer type the default is float32, for arrays of float types - it is the same as the array type. - - out : ndarray - Alternative output array in which to place the result. It must - have the same shape as the expected output but the type will be - cast if necessary. - - :Returns: - - standard deviation : The return type varies, see above. - A new array holding the result is returned unless out is - specified, in which case a reference to out is returned. - - :SeeAlso: - - - var : variance - - mean : average - - Notes - ----- - - The standard deviation is the square root of the average of the - squared deviations from the mean, i.e. var = sqrt(mean((x - - x.mean())**2)). The computed standard deviation is biased, i.e., the - mean is computed by dividing by the number of elements, N, rather - than by N-1. - - """ - return N.ndarray.std(self, axis, dtype, out)._align(axis) - - def var(self, axis=None, dtype=None, out=None): - """Compute the variance along the specified axis. - - Returns the variance of the array elements, a measure of the spread of - a distribution. The variance is computed for the flattened array by - default, otherwise over the specified axis. - - :Parameters: - - axis : integer - Axis along which the variance is computed. The default is to - compute the variance of the flattened array. - - dtype : type - Type to use in computing the variance. For arrays of integer - type the default is float32, for arrays of float types it is - the same as the array type. - - out : ndarray - Alternative output array in which to place the result. It must - have the same shape as the expected output but the type will be - cast if necessary. - - :Returns: - - variance : depends, see above - A new array holding the result is returned unless out is - specified, in which case a reference to out is returned. - - :SeeAlso: - - - std : standard deviation - - mean : average - - Notes - ----- - - The variance is the average of the squared deviations from the mean, - i.e. var = mean((x - x.mean())**2). The computed variance is - biased, i.e., the mean is computed by dividing by the number of - elements, N, rather than by N-1. - - """ - return N.ndarray.var(self, axis, dtype, out)._align(axis) - - def prod(self, axis=None, dtype=None, out=None): - return N.ndarray.prod(self, axis, dtype, out)._align(axis) - - def any(self, axis=None, out=None): - return N.ndarray.any(self, axis, out)._align(axis) - - def all(self, axis=None, out=None): - return N.ndarray.all(self, axis, out)._align(axis) - - def max(self, axis=None, out=None): - return N.ndarray.max(self, axis, out)._align(axis) - - def argmax(self, axis=None, out=None): - return N.ndarray.argmax(self, axis, out)._align(axis) - - def min(self, axis=None, out=None): - return N.ndarray.min(self, axis, out)._align(axis) - - def argmin(self, axis=None, out=None): - return N.ndarray.argmin(self, axis, out)._align(axis) - - def ptp(self, axis=None, out=None): - return N.ndarray.ptp(self, axis, out)._align(axis) - - # Needed becase tolist method expects a[i] - # to have dimension a.ndim-1 - def tolist(self): - return self.__array__().tolist() - - def getI(self): - M,N = self.shape - if M == N: - from numpy.dual import inv as func - else: - from numpy.dual import pinv as func - return asmatrix(func(self)) - - def getA(self): - return self.__array__() - - def getA1(self): - return self.__array__().ravel() - - def getT(self): - return self.transpose() - - def getH(self): - if issubclass(self.dtype.type, N.complexfloating): - return self.transpose().conjugate() - else: - return self.transpose() - - T = property(getT, None, doc="transpose") - A = property(getA, None, doc="base array") - A1 = property(getA1, None, doc="1-d base array") - H = property(getH, None, doc="hermitian (conjugate) transpose") - I = property(getI, None, doc="inverse") - -def _from_string(str,gdict,ldict): - rows = str.split(';') - rowtup = [] - for row in rows: - trow = row.split(',') - newrow = [] - for x in trow: - newrow.extend(x.split()) - trow = newrow - coltup = [] - for col in trow: - col = col.strip() - try: - thismat = ldict[col] - except KeyError: - try: - thismat = gdict[col] - except KeyError: - raise KeyError, "%s not found" % (col,) - - coltup.append(thismat) - rowtup.append(concatenate(coltup,axis=-1)) - return concatenate(rowtup,axis=0) - - -def bmat(obj, ldict=None, gdict=None): - """Build a matrix object from string, nested sequence, or array. - - Ex: F = bmat('A, B; C, D') - F = bmat([[A,B],[C,D]]) - F = bmat(r_[c_[A,B],c_[C,D]]) - - all produce the same Matrix Object [ A B ] - [ C D ] - - if A, B, C, and D are appropriately shaped 2-d arrays. - """ - if isinstance(obj, str): - if gdict is None: - # get previous frame - frame = sys._getframe().f_back - glob_dict = frame.f_globals - loc_dict = frame.f_locals - else: - glob_dict = gdict - loc_dict = ldict - - return matrix(_from_string(obj, glob_dict, loc_dict)) - - if isinstance(obj, (tuple, list)): - # [[A,B],[C,D]] - arr_rows = [] - for row in obj: - if isinstance(row, N.ndarray): # not 2-d - return matrix(concatenate(obj,axis=-1)) - else: - arr_rows.append(concatenate(row,axis=-1)) - return matrix(concatenate(arr_rows,axis=0)) - if isinstance(obj, N.ndarray): - return matrix(obj) - -mat = asmatrix diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py deleted file mode 100644 index fa2c6d337..000000000 --- a/numpy/core/fromnumeric.py +++ /dev/null @@ -1,1519 +0,0 @@ -# Module containing non-deprecated functions borrowed from Numeric. -__docformat__ = "restructuredtext en" - -# functions that are now methods -__all__ = ['take', 'reshape', 'choose', 'repeat', 'put', - 'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin', - 'searchsorted', 'alen', - 'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape', - 'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue', - 'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim', - 'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze', - 'amax', 'amin', - ] - -import multiarray as mu -import umath as um -import numerictypes as nt -from numeric import asarray, array, asanyarray, concatenate -_dt_ = nt.sctype2char - -import types - -try: - _gentype = types.GeneratorType -except AttributeError: - _gentype = types.NoneType - -# save away Python sum -_sum_ = sum - -# functions that are now methods -def _wrapit(obj, method, *args, **kwds): - try: - wrap = obj.__array_wrap__ - except AttributeError: - wrap = None - result = getattr(asarray(obj),method)(*args, **kwds) - if wrap: - if not isinstance(result, mu.ndarray): - result = asarray(result) - result = wrap(result) - return result - - -def take(a, indices, axis=None, out=None, mode='raise'): - """Return an array formed from the elements of a at the given indices. - - This function does the same thing as "fancy" indexing; however, it can - be easier to use if you need to specify a given axis. - - *Parameters*: - - a : array - The source array - indices : int array - The indices of the values to extract. - axis : {None, int}, optional - The axis over which to select values. None signifies that the - operation should be performed over the flattened array. - out : {None, array}, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - 'raise' -- raise an error - 'wrap' -- wrap around - 'clip' -- clip to the range - - *Returns*: - - subarray : array - The returned array has the same type as a. - - *See Also*: - - `ndarray.take` : equivalent method - - """ - try: - take = a.take - except AttributeError: - return _wrapit(a, 'take', indices, axis, out, mode) - return take(indices, axis, out, mode) - - -# not deprecated --- copy if necessary, view otherwise -def reshape(a, newshape, order='C'): - """Returns an array containing the data of a, but with a new shape. - - *Parameters*: - - a : array - Array to be reshaped. - newshape : shape tuple or int - The new shape should be compatible with the original shape. If an - integer, then the result will be a 1D array of that length. - order : {'C', 'FORTRAN'}, optional - Determines whether the array data should be viewed as in C - (row-major) order or FORTRAN (column-major) order. - - *Returns*: - - reshaped_array : array - This will be a new view object if possible; otherwise, it will - return a copy. - - *See Also*: - - `ndarray.reshape` : Equivalent method. - - """ - try: - reshape = a.reshape - except AttributeError: - return _wrapit(a, 'reshape', newshape, order=order) - return reshape(newshape, order=order) - - -def choose(a, choices, out=None, mode='raise'): - """Use an index array to construct a new array from a set of choices. - - Given an array of integers in {0, 1, ..., n-1} and a set of n choice arrays, - this function will create a new array that merges each of the choice arrays. - Where a value in `a` is i, then the new array will have the value that - choices[i] contains in the same place. - - *Parameters*: - - a : int array - This array must contain integers in [0, n-1], where n is the number - of choices. - choices : sequence of arrays - Each of the choice arrays should have the same shape as the index - array. - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - 'raise' : raise an error - 'wrap' : wrap around - 'clip' : clip to the range - - *Returns*: - - merged_array : array - - *See Also*: - - `ndarray.choose` : equivalent method - - *Examples* - - >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], - ... [20, 21, 22, 23], [30, 31, 32, 33]] - >>> choose([2, 3, 1, 0], choices) - array([20, 31, 12, 3]) - >>> choose([2, 4, 1, 0], choices, mode='clip') - array([20, 31, 12, 3]) - >>> choose([2, 4, 1, 0], choices, mode='wrap') - array([20, 1, 12, 3]) - - """ - try: - choose = a.choose - except AttributeError: - return _wrapit(a, 'choose', choices, out=out, mode=mode) - return choose(choices, out=out, mode=mode) - - -def repeat(a, repeats, axis=None): - """Repeat elements of an array. - - *Parameters*: - - a : {array_like} - Blah. - repeats : {integer, integer_array} - The number of repetitions for each element. If a plain integer, then - it is applied to all elements. If an array, it needs to be of the - same length as the chosen axis. - axis : {None, integer}, optional - The axis along which to repeat values. If None, then this function - will operated on the flattened array `a` and return a similarly flat - result. - - *Returns*: - - repeated_array : array - - *See Also*: - - `ndarray.repeat` : equivalent method - - *Examples* - - >>> repeat([0, 1, 2], 2) - array([0, 0, 1, 1, 2, 2]) - >>> repeat([0, 1, 2], [2, 3, 4]) - array([0, 0, 1, 1, 1, 2, 2, 2, 2]) - - """ - try: - repeat = a.repeat - except AttributeError: - return _wrapit(a, 'repeat', repeats, axis) - return repeat(repeats, axis) - - -def put (a, ind, v, mode='raise'): - """Set a[n] = v[n] for all n in ind. - - If v is shorter than mask it will be repeated as necessary. In particular v - can be a scalar or length 1 array. The routine put is the equivalent of the - following (although the loop is in C for speed): - - ind = array(indices, copy=False) - v = array(values, copy=False).astype(a.dtype) - for i in ind: a.flat[i] = v[i] - - a must be a contiguous numpy array. - - """ - return a.put(ind, v, mode) - - -def swapaxes(a, axis1, axis2): - """Return array a with axis1 and axis2 interchanged. - - Blah, Blah. - - """ - try: - swapaxes = a.swapaxes - except AttributeError: - return _wrapit(a, 'swapaxes', axis1, axis2) - return swapaxes(axis1, axis2) - - -def transpose(a, axes=None): - """Return a view of the array with dimensions permuted. - - Permutes axis according to list axes. If axes is None (default) returns - array with dimensions reversed. - - """ - try: - transpose = a.transpose - except AttributeError: - return _wrapit(a, 'transpose', axes) - return transpose(axes) - - -def sort(a, axis=-1, kind='quicksort', order=None): - """Return copy of 'a' sorted along the given axis. - - Perform an inplace sort along the given axis using the algorithm - specified by the kind keyword. - - *Parameters*: - - a : array - Array to be sorted. - axis : {None, int} optional - Axis along which to sort. None indicates that the flattened - array should be used. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm to use. - order : {None, list type}, optional - When a is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - *Returns*: - - sorted_array : array of same type as a - - *See Also*: - - `argsort` : Indirect sort. - - `lexsort` : Indirect stable sort on multiple keys. - - `searchsorted` : Find keys in sorted array. - - *Notes* - - The various sorts are characterized by average speed, worst case - performance, need for work space, and whether they are stable. A - stable sort keeps items with the same key in the same relative - order. The three available algorithms have the following - properties: - - +-----------+-------+-------------+------------+-------+ - | kind | speed | worst case | work space | stable| - +===========+=======+=============+============+=======+ - | quicksort | 1 | O(n^2) | 0 | no | - +-----------+-------+-------------+------------+-------+ - | mergesort | 2 | O(n*log(n)) | ~n/2 | yes | - +-----------+-------+-------------+------------+-------+ - | heapsort | 3 | O(n*log(n)) | 0 | no | - +-----------+-------+-------------+------------+-------+ - - All the sort algorithms make temporary copies of the data when - the sort is not along the last axis. Consequently, sorts along - the last axis are faster and use less space than sorts along - other axis. - - """ - if axis is None: - a = asanyarray(a).flatten() - axis = 0 - else: - a = asanyarray(a).copy() - a.sort(axis, kind, order) - return a - - -def argsort(a, axis=-1, kind='quicksort', order=None): - """Returns array of indices that index 'a' in sorted order. - - Perform an indirect sort along the given axis using the algorithm specified - by the kind keyword. It returns an array of indices of the same shape as a - that index data along the given axis in sorted order. - - *Parameters*: - - a : array - Array to be sorted. - axis : {None, int} optional - Axis along which to sort. None indicates that the flattened - array should be used. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm to use. - order : {None, list type}, optional - When a is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - *Returns*: - - index_array : {integer_array} - Array of indices that sort 'a' along the specified axis. - - *See Also*: - - `lexsort` : Indirect stable sort with multiple keys. - - `sort` : Inplace sort. - - *Notes* - - The various sorts are characterized by average speed, worst case - performance, need for work space, and whether they are stable. A - stable sort keeps items with the same key in the same relative - order. The three available algorithms have the following - properties: - - +-----------+-------+-------------+------------+-------+ - | kind | speed | worst case | work space | stable| - +===========+=======+=============+============+=======+ - | quicksort | 1 | O(n^2) | 0 | no | - +-----------+-------+-------------+------------+-------+ - | mergesort | 2 | O(n*log(n)) | ~n/2 | yes | - +-----------+-------+-------------+------------+-------+ - | heapsort | 3 | O(n*log(n)) | 0 | no | - +-----------+-------+-------------+------------+-------+ - - All the sort algorithms make temporary copies of the data when - the sort is not along the last axis. Consequently, sorts along - the last axis are faster and use less space than sorts along - other axis. - - """ - try: - argsort = a.argsort - except AttributeError: - return _wrapit(a, 'argsort', axis, kind, order) - return argsort(axis, kind, order) - - -def argmax(a, axis=None): - """Returns array of indices of the maximum values of along the given axis. - - *Parameters*: - - a : {array_like} - Array to look in. - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis - - *Returns*: - - index_array : {integer_array} - - *Examples* - - >>> a = arange(6).reshape(2,3) - >>> argmax(a) - 5 - >>> argmax(a,0) - array([1, 1, 1]) - >>> argmax(a,1) - array([2, 2]) - - """ - try: - argmax = a.argmax - except AttributeError: - return _wrapit(a, 'argmax', axis) - return argmax(axis) - - -def argmin(a, axis=None): - """Return array of indices to the minimum values along the given axis. - - *Parameters*: - - a : {array_like} - Array to look in. - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis - - *Returns*: - - index_array : {integer_array} - - *Examples* - - >>> a = arange(6).reshape(2,3) - >>> argmin(a) - 0 - >>> argmin(a,0) - array([0, 0, 0]) - >>> argmin(a,1) - array([0, 0]) - - """ - try: - argmin = a.argmin - except AttributeError: - return _wrapit(a, 'argmin', axis) - return argmin(axis) - - -def searchsorted(a, v, side='left'): - """Return indices where keys in v should be inserted to maintain order. - - Find the indices into a sorted array such that if the corresponding keys in - v were inserted before the indices the order of a would be preserved. If - side='left', then the first such index is returned. If side='right', then - the last such index is returned. If there is no such index because the key - is out of bounds, then the length of a is returned, i.e., the key would need - to be appended. The returned index array has the same shape as v. - - *Parameters*: - - a : 1-d array - Array must be sorted in ascending order. - v : array or list type - Array of keys to be searched for in a. - side : {'left', 'right'}, optional - If 'left', the index of the first location where the key could be - inserted is found, if 'right', the index of the last such element is - returned. If the is no such element, then either 0 or N is returned, - where N is the size of the array. - - *Returns*: - - indices : integer array - Array of insertion points with the same shape as v. - - *See Also*: - - `sort` : Inplace sort. - - `histogram` : Produce histogram from 1-d data. - - *Notes* - - The array a must be 1-d and is assumed to be sorted in ascending - order. Searchsorted uses binary search to find the required - insertion points. - - *Examples* - - >>> searchsorted([1,2,3,4,5],[6,4,0]) - array([5, 3, 0]) - - """ - try: - searchsorted = a.searchsorted - except AttributeError: - return _wrapit(a, 'searchsorted', v, side) - return searchsorted(v, side) - - -def resize(a, new_shape): - """Return a new array with the specified shape. - - The original array's total size can be any size. The new array is - filled with repeated copies of a. - - Note that a.resize(new_shape) will fill the array with 0's beyond - current definition of a. - - *Parameters*: - - a : {array_like} - Array to be reshaped. - - new_shape : {tuple} - Shape of reshaped array. - - *Returns*: - - reshaped_array : {array} - The new array is formed from the data in the old array, repeated if - necessary to fill out the required number of elements, with the new - shape. - - """ - if isinstance(new_shape, (int, nt.integer)): - new_shape = (new_shape,) - a = ravel(a) - Na = len(a) - if not Na: return mu.zeros(new_shape, a.dtype.char) - total_size = um.multiply.reduce(new_shape) - n_copies = int(total_size / Na) - extra = total_size % Na - - if total_size == 0: - return a[:0] - - if extra != 0: - n_copies = n_copies+1 - extra = Na-extra - - a = concatenate( (a,)*n_copies) - if extra > 0: - a = a[:-extra] - - return reshape(a, new_shape) - - -def squeeze(a): - """Remove single-dimensional entries from the shape of a. - - *Examples* - - >>> x = array([[[1,1,1],[2,2,2],[3,3,3]]]) - >>> x - array([[[1, 1, 1], - [2, 2, 2], - [3, 3, 3]]]) - >>> x.shape - (1, 3, 3) - >>> squeeze(x).shape - (3, 3) - - """ - try: - squeeze = a.squeeze - except AttributeError: - return _wrapit(a, 'squeeze') - return squeeze() - - -def diagonal(a, offset=0, axis1=0, axis2=1): - """Return specified diagonals. - - If a is 2-d, returns the diagonal of self with the given offset, i.e., the - collection of elements of the form a[i,i+offset]. If a has more than two - dimensions, then the axes specified by axis1 and axis2 are used to determine - the 2-d subarray whose diagonal is returned. The shape of the resulting - array can be determined by removing axis1 and axis2 and appending an index - to the right equal to the size of the resulting diagonals. - - *Parameters*: - - a : {array_like} - Array from whis the diagonals are taken. - offset : {0, integer}, optional - Offset of the diagonal from the main diagonal. Can be both positive - and negative. Defaults to main diagonal. - axis1 : {0, integer}, optional - Axis to be used as the first axis of the 2-d subarrays from which - the diagonals should be taken. Defaults to first axis. - axis2 : {1, integer}, optional - Axis to be used as the second axis of the 2-d subarrays from which - the diagonals should be taken. Defaults to second axis. - - *Returns*: - - array_of_diagonals : array of same type as a - If a is 2-d, a 1-d array containing the diagonal is - returned. If a has larger dimensions, then an array of - diagonals is returned. - - *See Also*: - - `diag` : Matlab workalike for 1-d and 2-d arrays. - - `diagflat` : Create diagonal arrays. - - `trace` : Sum along diagonals. - - *Examples* - - >>> a = arange(4).reshape(2,2) - >>> a - array([[0, 1], - [2, 3]]) - >>> a.diagonal() - array([0, 3]) - >>> a.diagonal(1) - array([1]) - - >>> a = arange(8).reshape(2,2,2) - >>> a - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> a.diagonal(0,-2,-1) - array([[0, 3], - [4, 7]]) - - """ - return asarray(a).diagonal(offset, axis1, axis2) - - -def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """Return the sum along diagonals of the array. - - If a is 2-d, returns the sum along the diagonal of self with the given offset, i.e., the - collection of elements of the form a[i,i+offset]. If a has more than two - dimensions, then the axes specified by axis1 and axis2 are used to determine - the 2-d subarray whose trace is returned. The shape of the resulting - array can be determined by removing axis1 and axis2 and appending an index - to the right equal to the size of the resulting diagonals. Arrays of integer - type are summed - - *Parameters*: - - a : {array_like} - Array from whis the diagonals are taken. - offset : {0, integer}, optional - Offset of the diagonal from the main diagonal. Can be both positive - and negative. Defaults to main diagonal. - axis1 : {0, integer}, optional - Axis to be used as the first axis of the 2-d subarrays from which - the diagonals should be taken. Defaults to first axis. - axis2 : {1, integer}, optional - Axis to be used as the second axis of the 2-d subarrays from which - the diagonals should be taken. Defaults to second axis. - dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are summed. If dtype has the value None and a is - of integer type of precision less than the default integer - precision, then the default integer precision is used. Otherwise, - the precision is the same as that of a. - out : {None, array}, optional - Array into which the sum can be placed. It's type is preserved and - it must be of the right shape to hold the output. - - *Returns*: - - sum_along_diagonals : array - If a is 2-d, a 0-d array containing the diagonal is - returned. If a has larger dimensions, then an array of - diagonals is returned. - - *Examples* - - >>> trace(eye(3)) - 3.0 - >>> a = arange(8).reshape((2,2,2)) - >>> trace(a) - array([6, 8]) - - """ - return asarray(a).trace(offset, axis1, axis2, dtype, out) - -def ravel(a, order='C'): - """Return a 1d array containing the elements of a. - - Returns the elements of a as a 1d array. The elements in the new array - are taken in the order specified by the order keyword. The new array is - a view of a if possible, otherwise it is a copy. - - *Parameters*: - - a : {array_like} - - order : {'C','F'}, optional - If order is 'C' the elements are taken in row major order. If order - is 'F' they are taken in column major order. - - *Returns*: - - 1d_array : {array} - - *See Also*: - - `ndarray.flat` : 1d iterator over the array. - - `ndarray.flatten` : 1d array copy of the elements of a in C order. - - *Examples* - - >>> x = array([[1,2,3],[4,5,6]]) - >>> x - array([[1, 2, 3], - [4, 5, 6]]) - >>> ravel(x) - array([1, 2, 3, 4, 5, 6]) - - """ - return asarray(a).ravel(order) - - -def nonzero(a): - """Return the indices of the elements of a which are not zero. - - *Parameters*: - - a : {array_like} - - *Returns*: - - tuple_of_arrays : {tuple} - - *Examples* - - >>> eye(3)[nonzero(eye(3))] - array([ 1., 1., 1.]) - >>> nonzero(eye(3)) - (array([0, 1, 2]), array([0, 1, 2])) - >>> eye(3)[nonzero(eye(3))] - array([ 1., 1., 1.]) - - """ - try: - nonzero = a.nonzero - except AttributeError: - res = _wrapit(a, 'nonzero') - else: - res = nonzero() - return res - - -def shape(a): - """Return the shape of a. - - *Parameters*: - - a : {array_like} - Array whose shape is desired. If a is not an array, a conversion is - attempted. - - *Returns*: - - tuple_of_integers : - The elements of the tuple are the length of the corresponding array - dimension. - - *Examples* - - >>> shape(eye(3)) - (3, 3) - >>> shape([[1,2]]) - (1, 2) - - """ - try: - result = a.shape - except AttributeError: - result = asarray(a).shape - return result - - -def compress(condition, a, axis=None, out=None): - """Return a where condition is true. - - Equivalent to a[condition]. - - """ - try: - compress = a.compress - except AttributeError: - return _wrapit(a, 'compress', condition, axis, out) - return compress(condition, axis, out) - - -def clip(a, a_min, a_max): - """Limit the values of a to [a_min, a_max]. Equivalent to - - a[a < a_min] = a_min - a[a > a_max] = a_max - - """ - try: - clip = a.clip - except AttributeError: - return _wrapit(a, 'clip', a_min, a_max) - return clip(a_min, a_max) - - -def sum(a, axis=None, dtype=None, out=None): - """Sum the array over the given axis. - - *Parameters*: - - a : {array_type} - Array containing elements whose sum is desired. If a is not an array, a - conversion is attempted. - axis : {None, integer} - Axis over which the sum is taken. If None is used, then the sum is - over all the array elements. - dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are summed. If dtype has the value None and the - type of a is an integer type of precision less than the default - platform integer, then the default platform integer precision is - used. Otherwise, the dtype is the same as that of a. - out : {None, array}, optional - Array into which the sum can be placed. It's type is preserved and - it must be of the right shape to hold the output. - - *Returns*: - - sum_along_axis : {array, scalar}, see dtype parameter above. - Returns an array whose shape is the same as a with the specified - axis removed. Returns a 0d array when a is 1d or dtype=None. - Returns a reference to the specified output array if specified. - - *See Also*: - - `ndarray.sum` : equivalent method - - *Examples* - - >>> sum([0.5, 1.5]) - 2.0 - >>> sum([0.5, 1.5], dtype=N.int32) - 1 - >>> sum([[0, 1], [0, 5]]) - 6 - >>> sum([[0, 1], [0, 5]], axis=1) - array([1, 5]) - - """ - if isinstance(a, _gentype): - res = _sum_(a) - if out is not None: - out[...] = res - return out - return res - try: - sum = a.sum - except AttributeError: - return _wrapit(a, 'sum', axis, dtype, out) - return sum(axis, dtype, out) - - -def product (a, axis=None, dtype=None, out=None): - """Product of the array elements over the given axis. - - *Parameters*: - - a : {array_like} - Array containing elements whose product is desired. If a is not an array, a - conversion is attempted. - axis : {None, integer} - Axis over which the product is taken. If None is used, then the - product is over all the array elements. - dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are multiplied. If dtype has the value None and - the type of a is an integer type of precision less than the default - platform integer, then the default platform integer precision is - used. Otherwise, the dtype is the same as that of a. - out : {None, array}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - *Returns*: - - product_along_axis : {array, scalar}, see dtype parameter above. - Returns an array whose shape is the same as a with the specified - axis removed. Returns a 0d array when a is 1d or dtype=None. - Returns a reference to the specified output array if specified. - - *See Also*: - - `ndarray.prod` : equivalent method - - *Examples* - - >>> product([1.,2.]) - 2.0 - >>> product([1.,2.], dtype=int32) - 2 - >>> product([[1.,2.],[3.,4.]]) - 24.0 - >>> product([[1.,2.],[3.,4.]], axis=1) - array([ 2., 12.]) - - - """ - try: - prod = a.prod - except AttributeError: - return _wrapit(a, 'prod', axis, dtype, out) - return prod(axis, dtype, out) - - -def sometrue (a, axis=None, out=None): - """Perform a logical_or over the given axis. - - *See Also*: - - `ndarray.any` : equivalent method - - """ - try: - any = a.any - except AttributeError: - return _wrapit(a, 'any', axis, out) - return any(axis, out) - - -def alltrue (a, axis=None, out=None): - """Perform a logical_and over the given axis. - - *See Also*: - - `ndarray.all` : equivalent method - - `all` : equivalent function - - """ - try: - all = a.all - except AttributeError: - return _wrapit(a, 'all', axis, out) - return all(axis, out) - - -def any(a,axis=None, out=None): - """Return true if any elements of x are true. - - *See Also*: - - `ndarray.any` : equivalent method - - """ - try: - any = a.any - except AttributeError: - return _wrapit(a, 'any', axis, out) - return any(axis, out) - - -def all(a,axis=None, out=None): - """Return true if all elements of x are true: - - *See Also*: - - `ndarray.all` : equivalent method - - `alltrue` : equivalent function - - """ - try: - all = a.all - except AttributeError: - return _wrapit(a, 'all', axis, out) - return all(axis, out) - - -def cumsum (a, axis=None, dtype=None, out=None): - """Sum the array over the given axis. - - Blah, Blah. - - """ - try: - cumsum = a.cumsum - except AttributeError: - return _wrapit(a, 'cumsum', axis, dtype, out) - return cumsum(axis, dtype, out) - - -def cumproduct (a, axis=None, dtype=None, out=None): - """Return the cumulative product over the given axis. - - Blah, Blah. - - """ - try: - cumprod = a.cumprod - except AttributeError: - return _wrapit(a, 'cumprod', axis, dtype, out) - return cumprod(axis, dtype, out) - - -def ptp(a, axis=None, out=None): - """Return maximum - minimum along the the given dimension. - - Blah, Blah. - - """ - try: - ptp = a.ptp - except AttributeError: - return _wrapit(a, 'ptp', axis, out) - return ptp(axis, out) - - -def amax(a, axis=None, out=None): - """Return the maximum of 'a' along dimension axis. - - Blah, Blah. - - """ - try: - amax = a.max - except AttributeError: - return _wrapit(a, 'max', axis, out) - return amax(axis, out) - - -def amin(a, axis=None, out=None): - """Return the minimum of a along dimension axis. - - Blah, Blah. - - """ - try: - amin = a.min - except AttributeError: - return _wrapit(a, 'min', axis, out) - return amin(axis, out) - - -def alen(a): - """Return the length of a Python object interpreted as an array - of at least 1 dimension. - - Blah, Blah. - - """ - try: - return len(a) - except TypeError: - return len(array(a,ndmin=1)) - - -def prod(a, axis=None, dtype=None, out=None): - """Return the product of the elements along the given axis. - - Blah, Blah. - - """ - try: - prod = a.prod - except AttributeError: - return _wrapit(a, 'prod', axis, dtype, out) - return prod(axis, dtype, out) - - -def cumprod(a, axis=None, dtype=None, out=None): - """Return the cumulative product of the elements along the given axis. - - Blah, Blah. - - """ - try: - cumprod = a.cumprod - except AttributeError: - return _wrapit(a, 'cumprod', axis, dtype, out) - return cumprod(axis, dtype, out) - - -def ndim(a): - """Return the number of dimensions of a. - - If a is not already an array, a conversion is attempted. Scalars are zero - dimensional. - - *Parameters*: - - a : {array_like} - Array whose number of dimensions are desired. If a is not an array, a - conversion is attempted. - - *Returns*: - - number_of_dimensions : {integer} - Returns the number of dimensions. - - *See Also*: - - `rank` : equivalent function. - - `ndarray.ndim` : equivalent method - - `shape` : dimensions of array - - `ndarray.shape` : dimensions of array - - *Examples* - - >>> ndim([[1,2,3],[4,5,6]]) - 2 - >>> ndim(array([[1,2,3],[4,5,6]])) - 2 - >>> ndim(1) - 0 - - """ - try: - return a.ndim - except AttributeError: - return asarray(a).ndim - - -def rank(a): - """Return the number of dimensions of a. - - In old Numeric, rank was the term used for the number of dimensions. If a is - not already an array, a conversion is attempted. Scalars are zero - dimensional. - - *Parameters*: - - a : {array_like} - Array whose number of dimensions is desired. If a is not an array, a - conversion is attempted. - - *Returns*: - - number_of_dimensions : {integer} - Returns the number of dimensions. - - *See Also*: - - `ndim` : equivalent function - - `ndarray.ndim` : equivalent method - - `shape` : dimensions of array - - `ndarray.shape` : dimensions of array - - *Examples* - - >>> rank([[1,2,3],[4,5,6]]) - 2 - >>> rank(array([[1,2,3],[4,5,6]])) - 2 - >>> rank(1) - 0 - - """ - try: - return a.ndim - except AttributeError: - return asarray(a).ndim - - -def size(a, axis=None): - """Return the number of elements along given axis. - - *Parameters*: - - a : {array_like} - Array whose axis size is desired. If a is not an array, a conversion - is attempted. - axis : {None, integer}, optional - Axis along which the elements are counted. None means all elements - in the array. - - *Returns*: - - element_count : {integer} - Count of elements along specified axis. - - *See Also*: - - `shape` : dimensions of array - - `ndarray.shape` : dimensions of array - - `ndarray.size` : number of elements in array - - *Examples* - - >>> a = array([[1,2,3],[4,5,6]]) - >>> size(a) - 6 - >>> size(a,1) - 3 - >>> size(a,0) - 2 - - """ - if axis is None: - try: - return a.size - except AttributeError: - return asarray(a).size - else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] - - -def around(a, decimals=0, out=None): - """Round a to the given number of decimals. - - The real and imaginary parts of complex numbers are rounded separately. The - result of rounding a float is a float so the type must be cast if integers - are desired. Nothing is done if the input is an integer array and the - decimals parameter has a value >= 0. - - *Parameters*: - - a : {array_like} - Array containing numbers whose rounded values are desired. If a is - not an array, a conversion is attempted. - decimals : {0, int}, optional - Number of decimal places to round to. When decimals is negative it - specifies the number of positions to the left of the decimal point. - out : {None, array}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. Numpy rounds floats to floats by default. - - *Returns*: - - rounded_array : {array} - If out=None, returns a new array of the same type as a containing - the rounded values, otherwise a reference to the output array is - returned. - - *See Also*: - - `round_` : equivalent function - - `ndarray.round` : equivalent method - - *Notes* - - Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round - to 0.0, etc. Results may also be surprising due to the inexact - representation of decimal fractions in IEEE floating point and the - errors introduced when scaling by powers of ten. - - *Examples* - - >>> around([.5, 1.5, 2.5, 3.5, 4.5]) - array([ 0., 2., 2., 4., 4.]) - >>> around([1,2,3,11], decimals=1) - array([ 1, 2, 3, 11]) - >>> around([1,2,3,11], decimals=-1) - array([ 0, 0, 0, 10]) - - """ - try: - round = a.round - except AttributeError: - return _wrapit(a, 'round', decimals, out) - return round(decimals, out) - - -def round_(a, decimals=0, out=None): - """Round a to the given number of decimals. - - The real and imaginary parts of complex numbers are rounded separately. The - result of rounding a float is a float so the type must be cast if integers - are desired. Nothing is done if the input is an integer array and the - decimals parameter has a value >= 0. - - *Parameters*: - - a : {array_like} - Array containing numbers whose rounded values are desired. If a is - not an array, a conversion is attempted. - decimals : {0, integer}, optional - Number of decimal places to round to. When decimals is negative it - specifies the number of positions to the left of the decimal point. - out : {None, array}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - *Returns*: - - rounded_array : {array} - If out=None, returns a new array of the same type as a containing - the rounded values, otherwise a reference to the output array is - returned. - - *See Also*: - - `around` : equivalent function - - `ndarray.round` : equivalent method - - *Notes* - - Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round - to 0.0, etc. Results may also be surprising due to the inexact - representation of decimal fractions in IEEE floating point and the - errors introduced when scaling by powers of ten. - - *Examples* - - >>> round_([.5, 1.5, 2.5, 3.5, 4.5]) - array([ 0., 2., 2., 4., 4.]) - >>> round_([1,2,3,11], decimals=1) - array([ 1, 2, 3, 11]) - >>> round_([1,2,3,11], decimals=-1) - array([ 0, 0, 0, 10]) - - """ - try: - round = a.round - except AttributeError: - return _wrapit(a, 'round', decimals, out) - return round(decimals, out) - - -def mean(a, axis=None, dtype=None, out=None): - """Compute the mean along the specified axis. - - Returns the average of the array elements. The average is taken - over the flattened array by default, otherwise over the specified - axis. The dtype returned for integer type arrays is float - - *Parameters*: - - a : {array_like} - Array containing numbers whose mean is desired. If a is not an - array, a conversion is attempted. - axis : {None, integer}, optional - Axis along which the means are computed. The default is to compute - the standard deviation of the flattened array. - dtype : {None, dtype}, optional - Type to use in computing the means. For arrays of integer type the - default is float32, for arrays of float types it is the same as the - array type. - out : {None, array}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - *Returns*: - - mean : {array, scalar}, see dtype parameter above - If out=None, returns a new array containing the mean values, - otherwise a reference to the output array is returned. - - *See Also*: - - `var` : Variance - - `std` : Standard deviation - - *Notes* - - The mean is the sum of the elements along the axis divided by the number - of elements. - - *Examples* - - >>> a = array([[1,2],[3,4]]) - >>> mean(a) - 2.5 - >>> mean(a,0) - array([ 2., 3.]) - >>> mean(a,1) - array([ 1.5, 3.5]) - - """ - try: - mean = a.mean - except AttributeError: - return _wrapit(a, 'mean', axis, dtype, out) - return mean(axis, dtype, out) - - -def std(a, axis=None, dtype=None, out=None): - """Compute the standard deviation along the specified axis. - - Returns the standard deviation of the array elements, a measure of the - spread of a distribution. The standard deviation is computed for the - flattened array by default, otherwise over the specified axis. - - *Parameters*: - - a : {array_like} - Array containing numbers whose standard deviation is desired. If a - is not an array, a conversion is attempted. - axis : {None, integer}, optional - Axis along which the standard deviation is computed. The default is - to compute the standard deviation of the flattened array. - dtype : {None, dtype}, optional - Type to use in computing the standard deviation. For arrays of - integer type the default is float32, for arrays of float types it is - the same as the array type. - out : {None, array}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - *Returns*: - - standard_deviation : {array, scalar}, see dtype parameter above. - If out=None, returns a new array containing the standard deviation, - otherwise a reference to the output array is returned. - - *See Also*: - - `var` : Variance - - `mean` : Average - - *Notes* - - The standard deviation is the square root of the average of the squared - deviations from the mean, i.e. var = sqrt(mean((x - x.mean())**2)). The - computed standard deviation is biased, i.e., the mean is computed by - dividing by the number of elements, N, rather than by N-1. - - *Examples* - - >>> a = array([[1,2],[3,4]]) - >>> std(a) - 1.1180339887498949 - >>> std(a,0) - array([ 1., 1.]) - >>> std(a,1) - array([ 0.5, 0.5]) - - """ - try: - std = a.std - except AttributeError: - return _wrapit(a, 'std', axis, dtype, out) - return std(axis, dtype, out) - - -def var(a, axis=None, dtype=None, out=None): - """Compute the variance along the specified axis. - - Returns the variance of the array elements, a measure of the spread of a - distribution. The variance is computed for the flattened array by default, - otherwise over the specified axis. - - *Parameters*: - - a : {array_like} - Array containing numbers whose variance is desired. If a is not an - array, a conversion is attempted. - axis : {None, integer}, optional - Axis along which the variance is computed. The default is to compute - the variance of the flattened array. - dtype : {None, dtype}, optional - Type to use in computing the variance. For arrays of integer type - the default is float32, for arrays of float types it is the same as - the array type. - out : {None, array}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - *Returns*: - - variance : {array, scalar}, see dtype parameter above - If out=None, returns a new array containing the variance, otherwise - a reference to the output array is returned. - - *See Also*: - - `std` : Standard deviation - - `mean` : Average - - *Notes* - - The variance is the average of the squared deviations from the mean, - i.e. var = mean((x - x.mean())**2). The computed variance is biased, - i.e., the mean is computed by dividing by the number of elements, N, - rather than by N-1. - - *Examples* - - >>> a = array([[1,2],[3,4]]) - >>> var(a) - 1.25 - >>> var(a,0) - array([ 1., 1.]) - >>> var(a,1) - array([ 0.25, 0.25]) - - """ - try: - var = a.var - except AttributeError: - return _wrapit(a, 'var', axis, dtype, out) - return var(axis, dtype, out) diff --git a/numpy/core/include/numpy/arrayobject.h b/numpy/core/include/numpy/arrayobject.h deleted file mode 100644 index f64d2a6c3..000000000 --- a/numpy/core/include/numpy/arrayobject.h +++ /dev/null @@ -1,21 +0,0 @@ - -/* This expects the following variables to be defined (besides - the usual ones from pyconfig.h - - SIZEOF_LONG_DOUBLE -- sizeof(long double) or sizeof(double) if no - long double is present on platform. - CHAR_BIT -- number of bits in a char (usually 8) - (should be in limits.h) - -*/ - -#ifndef Py_ARRAYOBJECT_H -#define Py_ARRAYOBJECT_H -#include "ndarrayobject.h" -#ifdef NPY_NO_PREFIX -#include "noprefix.h" -#endif - -#include "npy_interrupt.h" - -#endif diff --git a/numpy/core/include/numpy/arrayscalars.h b/numpy/core/include/numpy/arrayscalars.h deleted file mode 100644 index 4c1658f4c..000000000 --- a/numpy/core/include/numpy/arrayscalars.h +++ /dev/null @@ -1,152 +0,0 @@ -#ifndef _MULTIARRAYMODULE -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; -#endif - - -typedef struct { - PyObject_HEAD - signed char obval; -} PyByteScalarObject; - - -typedef struct { - PyObject_HEAD - short obval; -} PyShortScalarObject; - - -typedef struct { - PyObject_HEAD - int obval; -} PyIntScalarObject; - - -typedef struct { - PyObject_HEAD - long obval; -} PyLongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_longlong obval; -} PyLongLongScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned char obval; -} PyUByteScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned short obval; -} PyUShortScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned int obval; -} PyUIntScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned long obval; -} PyULongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_ulonglong obval; -} PyULongLongScalarObject; - - -typedef struct { - PyObject_HEAD - float obval; -} PyFloatScalarObject; - - -typedef struct { - PyObject_HEAD - double obval; -} PyDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_longdouble obval; -} PyLongDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_cfloat obval; -} PyCFloatScalarObject; - - -typedef struct { - PyObject_HEAD - npy_cdouble obval; -} PyCDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_clongdouble obval; -} PyCLongDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - PyObject * obval; -} PyObjectScalarObject; - - -typedef struct { - PyObject_HEAD - char obval; -} PyScalarObject; - -#define PyStringScalarObject PyStringObject -#define PyUnicodeScalarObject PyUnicodeObject - -typedef struct { - PyObject_VAR_HEAD - char *obval; - PyArray_Descr *descr; - int flags; - PyObject *base; -} PyVoidScalarObject; - -/* Macros - PyScalarObject - PyArrType_Type - are defined in ndarrayobject.h -*/ - -#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) -#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) -#define PyArrayScalar_FromLong(i) \ - ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) -#define PyArrayScalar_RETURN_FALSE \ - return Py_INCREF(PyArrayScalar_False), \ - PyArrayScalar_False -#define PyArrayScalar_RETURN_TRUE \ - return Py_INCREF(PyArrayScalar_True), \ - PyArrayScalar_True - -#define PyArrayScalar_New(cls) \ - Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) -#define PyArrayScalar_VAL(obj, cls) \ - ((Py##cls##ScalarObject *)obj)->obval -#define PyArrayScalar_ASSIGN(obj, cls, val) \ - PyArrayScalar_VAL(obj, cls) = val diff --git a/numpy/core/include/numpy/fenv/fenv.c b/numpy/core/include/numpy/fenv/fenv.c deleted file mode 100644 index 169642ce1..000000000 --- a/numpy/core/include/numpy/fenv/fenv.c +++ /dev/null @@ -1,38 +0,0 @@ -/*- - * Copyright (c) 2004 David Schultz - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#include -#include "fenv.h" - -const fenv_t npy__fe_dfl_env = { - 0xffff0000, - 0xffff0000, - 0xffffffff, - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff } -}; diff --git a/numpy/core/include/numpy/fenv/fenv.h b/numpy/core/include/numpy/fenv/fenv.h deleted file mode 100644 index a1371770f..000000000 --- a/numpy/core/include/numpy/fenv/fenv.h +++ /dev/null @@ -1,224 +0,0 @@ -/*- - * Copyright (c) 2004 David Schultz - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#ifndef _FENV_H_ -#define _FENV_H_ - -#include -#include - -typedef struct { - __uint32_t __control; - __uint32_t __status; - __uint32_t __tag; - char __other[16]; -} fenv_t; - -typedef __uint16_t fexcept_t; - -/* Exception flags */ -#define FE_INVALID 0x01 -#define FE_DENORMAL 0x02 -#define FE_DIVBYZERO 0x04 -#define FE_OVERFLOW 0x08 -#define FE_UNDERFLOW 0x10 -#define FE_INEXACT 0x20 -#define FE_ALL_EXCEPT (FE_DIVBYZERO | FE_DENORMAL | FE_INEXACT | \ - FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW) - -/* Rounding modes */ -#define FE_TONEAREST 0x0000 -#define FE_DOWNWARD 0x0400 -#define FE_UPWARD 0x0800 -#define FE_TOWARDZERO 0x0c00 -#define _ROUND_MASK (FE_TONEAREST | FE_DOWNWARD | \ - FE_UPWARD | FE_TOWARDZERO) - -__BEGIN_DECLS - -/* Default floating-point environment */ -extern const fenv_t npy__fe_dfl_env; -#define FE_DFL_ENV (&npy__fe_dfl_env) - -#define __fldcw(__cw) __asm __volatile("fldcw %0" : : "m" (__cw)) -#define __fldenv(__env) __asm __volatile("fldenv %0" : : "m" (__env)) -#define __fnclex() __asm __volatile("fnclex") -#define __fnstenv(__env) __asm("fnstenv %0" : "=m" (*(__env))) -#define __fnstcw(__cw) __asm("fnstcw %0" : "=m" (*(__cw))) -#define __fnstsw(__sw) __asm("fnstsw %0" : "=am" (*(__sw))) -#define __fwait() __asm __volatile("fwait") - -static __inline int -feclearexcept(int __excepts) -{ - fenv_t __env; - - if (__excepts == FE_ALL_EXCEPT) { - __fnclex(); - } else { - __fnstenv(&__env); - __env.__status &= ~__excepts; - __fldenv(__env); - } - return (0); -} - -static __inline int -fegetexceptflag(fexcept_t *__flagp, int __excepts) -{ - int __status; - - __fnstsw(&__status); - *__flagp = __status & __excepts; - return (0); -} - -static __inline int -fesetexceptflag(const fexcept_t *__flagp, int __excepts) -{ - fenv_t __env; - - __fnstenv(&__env); - __env.__status &= ~__excepts; - __env.__status |= *__flagp & __excepts; - __fldenv(__env); - return (0); -} - -static __inline int -feraiseexcept(int __excepts) -{ - fexcept_t __ex = __excepts; - - fesetexceptflag(&__ex, __excepts); - __fwait(); - return (0); -} - -static __inline int -fetestexcept(int __excepts) -{ - int __status; - - __fnstsw(&__status); - return (__status & __excepts); -} - -static __inline int -fegetround(void) -{ - int __control; - - __fnstcw(&__control); - return (__control & _ROUND_MASK); -} - -static __inline int -fesetround(int __round) -{ - int __control; - - if (__round & ~_ROUND_MASK) - return (-1); - __fnstcw(&__control); - __control &= ~_ROUND_MASK; - __control |= __round; - __fldcw(__control); - return (0); -} - -static __inline int -fegetenv(fenv_t *__envp) -{ - int __control; - - /* - * fnstenv masks all exceptions, so we need to save and - * restore the control word to avoid this side effect. - */ - __fnstcw(&__control); - __fnstenv(__envp); - __fldcw(__control); - return (0); -} - -static __inline int -feholdexcept(fenv_t *__envp) -{ - - __fnstenv(__envp); - __fnclex(); - return (0); -} - -static __inline int -fesetenv(const fenv_t *__envp) -{ - - __fldenv(*__envp); - return (0); -} - -static __inline int -feupdateenv(const fenv_t *__envp) -{ - int __status; - - __fnstsw(&__status); - __fldenv(*__envp); - feraiseexcept(__status & FE_ALL_EXCEPT); - return (0); -} - -#if __BSD_VISIBLE - -static __inline int -fesetmask(int __mask) -{ - int __control; - - __fnstcw(&__control); - __mask = (__control | FE_ALL_EXCEPT) & ~__mask; - __fldcw(__mask); - return (~__control & FE_ALL_EXCEPT); -} - -static __inline int -fegetmask(void) -{ - int __control; - - __fnstcw(&__control); - return (~__control & FE_ALL_EXCEPT); -} - -#endif /* __BSD_VISIBLE */ - -__END_DECLS - -#endif /* !_FENV_H_ */ diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h deleted file mode 100644 index 62986a9eb..000000000 --- a/numpy/core/include/numpy/ndarrayobject.h +++ /dev/null @@ -1,1998 +0,0 @@ -/* DON'T INCLUDE THIS DIRECTLY. - */ - -#ifndef NPY_NDARRAYOBJECT_H -#define NPY_NDARRAYOBJECT_H -#ifdef __cplusplus -#define CONFUSE_EMACS { -#define CONFUSE_EMACS2 } -extern "C" CONFUSE_EMACS -#undef CONFUSE_EMACS -#undef CONFUSE_EMACS2 -/* ... otherwise a semi-smart identer (like emacs) tries to indent - everything when you're typing */ -#endif -/* This is auto-generated by the installer */ -#include "config.h" - -/* There are several places in the code where an array of dimensions is - * allocated statically. This is the size of that static allocation. - * - * The array creation itself could have arbitrary dimensions but - * all the places where static allocation is used would need to - * be changed to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - - /* Helpful to distinguish what is installed */ -#define NPY_VERSION 0x01000009 - - /* Some platforms don't define bool, long long, or long double. - Handle that here. - */ - -#define NPY_BYTE_FMT "hhd" -#define NPY_UBYTE_FMT "hhu" -#define NPY_SHORT_FMT "hd" -#define NPY_USHORT_FMT "hu" -#define NPY_INT_FMT "d" -#define NPY_UINT_FMT "u" -#define NPY_LONG_FMT "ld" -#define NPY_ULONG_FMT "lu" -#define NPY_FLOAT_FMT "g" -#define NPY_DOUBLE_FMT "g" - -#ifdef PY_LONG_LONG -typedef PY_LONG_LONG npy_longlong; -typedef unsigned PY_LONG_LONG npy_ulonglong; -# ifdef _MSC_VER -# define NPY_LONGLONG_FMT "I64d" -# define NPY_ULONGLONG_FMT "I64u" -# define NPY_LONGLONG_SUFFIX(x) (x##i64) -# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) -# else - /* #define LONGLONG_FMT "lld" Another possible variant - #define ULONGLONG_FMT "llu" - - #define LONGLONG_FMT "qd" -- BSD perhaps? - #define ULONGLONG_FMT "qu" - */ -# define NPY_LONGLONG_FMT "Ld" -# define NPY_ULONGLONG_FMT "Lu" -# define NPY_LONGLONG_SUFFIX(x) (x##LL) -# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) -# endif -#else -typedef long npy_longlong; -typedef unsigned long npy_ulonglong; -# define NPY_LONGLONG_SUFFIX(x) (x##L) -# define NPY_ULONGLONG_SUFFIX(x) (x##UL) -#endif - - -typedef unsigned char npy_bool; -#define NPY_FALSE 0 -#define NPY_TRUE 1 - -#if SIZEOF_LONG_DOUBLE==SIZEOF_DOUBLE - typedef double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "g" -#else - typedef long double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "Lg" -#endif - -#ifndef Py_USING_UNICODE -#error Must use Python with unicode enabled. -#endif - - -typedef signed char npy_byte; -typedef unsigned char npy_ubyte; -typedef unsigned short npy_ushort; -typedef unsigned int npy_uint; -typedef unsigned long npy_ulong; - -/* These are for completeness */ -typedef float npy_float; -typedef double npy_double; -typedef short npy_short; -typedef int npy_int; -typedef long npy_long; - -typedef struct { float real, imag; } npy_cfloat; -typedef struct { double real, imag; } npy_cdouble; -typedef struct {npy_longdouble real, imag;} npy_clongdouble; - -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256 /* leave room for characters */ -}; - -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there */ -#define NPY_NUM_FLOATTYPE 3 - -/* We need to match npy_intp to a signed integer of the same size as - a pointer variable. npy_uintp to the equivalent unsigned integer -*/ - - -/* These characters correspond to the array type and the - struct module */ - -/* except 'p' -- signed integer for pointer type */ - -enum NPY_TYPECHAR { NPY_BOOLLTR = '?', - NPY_BYTELTR = 'b', - NPY_UBYTELTR = 'B', - NPY_SHORTLTR = 'h', - NPY_USHORTLTR = 'H', - NPY_INTLTR = 'i', - NPY_UINTLTR = 'I', - NPY_LONGLTR = 'l', - NPY_ULONGLTR = 'L', - NPY_LONGLONGLTR = 'q', - NPY_ULONGLONGLTR = 'Q', - NPY_FLOATLTR = 'f', - NPY_DOUBLELTR = 'd', - NPY_LONGDOUBLELTR = 'g', - NPY_CFLOATLTR = 'F', - NPY_CDOUBLELTR = 'D', - NPY_CLONGDOUBLELTR = 'G', - NPY_OBJECTLTR = 'O', - NPY_STRINGLTR = 'S', - NPY_STRINGLTR2 = 'a', - NPY_UNICODELTR = 'U', - NPY_VOIDLTR = 'V', - NPY_CHARLTR = 'c', - - /* No Descriptor, just a define -- this let's - Python users specify an array of integers - large enough to hold a pointer on the platform*/ - NPY_INTPLTR = 'p', - NPY_UINTPLTR = 'P', - - NPY_GENBOOLLTR ='b', - NPY_SIGNEDLTR = 'i', - NPY_UNSIGNEDLTR = 'u', - NPY_FLOATINGLTR = 'f', - NPY_COMPLEXLTR = 'c' -}; - -typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2, -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_MERGESORT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1, -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { - NPY_NOSCALAR=-1, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR, -} NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) - -typedef enum { - NPY_ANYORDER=-1, - NPY_CORDER=0, - NPY_FORTRANORDER=1 -} NPY_ORDER; - - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - - /* Define bit-width array types and typedefs */ - -#define NPY_MAX_INT8 127 -#define NPY_MIN_INT8 -128 -#define NPY_MAX_UINT8 255 -#define NPY_MAX_INT16 32767 -#define NPY_MIN_INT16 -32768 -#define NPY_MAX_UINT16 65535 -#define NPY_MAX_INT32 2147483647 -#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) -#define NPY_MAX_UINT32 4294967295U -#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) -#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) -#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) -#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) - - /* Need to find the number of bits for each type and - make definitions accordingly. - - C states that sizeof(char) == 1 by definition - - So, just using the sizeof keyword won't help. - - It also looks like Python itself uses sizeof(char) quite a - bit, which by definition should be 1 all the time. - - Idea: Make Use of CHAR_BIT which should tell us how many - BITS per CHARACTER - */ - - /* Include platform definitions -- These are in the C89/90 standard */ -#include -#define NPY_MAX_BYTE SCHAR_MAX -#define NPY_MIN_BYTE SCHAR_MIN -#define NPY_MAX_UBYTE UCHAR_MAX -#define NPY_MAX_SHORT SHRT_MAX -#define NPY_MIN_SHORT SHRT_MIN -#define NPY_MAX_USHORT USHRT_MAX -#define NPY_MAX_INT INT_MAX -#ifndef INT_MIN -#define INT_MIN (-INT_MAX - 1) -#endif -#define NPY_MIN_INT INT_MIN -#define NPY_MAX_UINT UINT_MAX -#define NPY_MAX_LONG LONG_MAX -#define NPY_MIN_LONG LONG_MIN -#define NPY_MAX_ULONG ULONG_MAX - - -#define NPY_SIZEOF_LONG SIZEOF_LONG -#define NPY_SIZEOF_INT SIZEOF_INT -#define NPY_SIZEOF_SHORT SIZEOF_SHORT -#define NPY_SIZEOF_FLOAT SIZEOF_FLOAT -#define NPY_SIZEOF_DOUBLE SIZEOF_DOUBLE -#define NPY_SIZEOF_LONGDOUBLE SIZEOF_LONG_DOUBLE -#define NPY_SIZEOF_LONGLONG SIZEOF_LONG_LONG -#define NPY_BITSOF_BOOL (sizeof(npy_bool)*CHAR_BIT) -#define NPY_BITSOF_CHAR CHAR_BIT -#define NPY_BITSOF_SHORT (SIZEOF_SHORT*CHAR_BIT) -#define NPY_BITSOF_INT (SIZEOF_INT*CHAR_BIT) -#define NPY_BITSOF_LONG (SIZEOF_LONG*CHAR_BIT) -#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG*CHAR_BIT) -#define NPY_BITSOF_FLOAT (SIZEOF_FLOAT*CHAR_BIT) -#define NPY_BITSOF_DOUBLE (SIZEOF_DOUBLE*CHAR_BIT) -#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE*CHAR_BIT) - -#if NPY_BITSOF_LONG == 8 -#define NPY_INT8 NPY_LONG -#define NPY_UINT8 NPY_ULONG - typedef long npy_int8; - typedef unsigned long npy_uint8; -#define PyInt8ScalarObject PyLongScalarObject -#define PyInt8ArrType_Type PyLongArrType_Type -#define PyUInt8ScalarObject PyULongScalarObject -#define PyUInt8ArrType_Type PyULongArrType_Type -#define NPY_INT8_FMT NPY_LONG_FMT -#define NPY_UINT8_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 16 -#define NPY_INT16 NPY_LONG -#define NPY_UINT16 NPY_ULONG - typedef long npy_int16; - typedef unsigned long npy_uint16; -#define PyInt16ScalarObject PyLongScalarObject -#define PyInt16ArrType_Type PyLongArrType_Type -#define PyUInt16ScalarObject PyULongScalarObject -#define PyUInt16ArrType_Type PyULongArrType_Type -#define NPY_INT16_FMT NPY_LONG_FMT -#define NPY_UINT16_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 32 -#define NPY_INT32 NPY_LONG -#define NPY_UINT32 NPY_ULONG - typedef long npy_int32; - typedef unsigned long npy_uint32; - typedef unsigned long npy_ucs4; -#define PyInt32ScalarObject PyLongScalarObject -#define PyInt32ArrType_Type PyLongArrType_Type -#define PyUInt32ScalarObject PyULongScalarObject -#define PyUInt32ArrType_Type PyULongArrType_Type -#define NPY_INT32_FMT NPY_LONG_FMT -#define NPY_UINT32_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 64 -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG - typedef long npy_int64; - typedef unsigned long npy_uint64; -#define PyInt64ScalarObject PyLongScalarObject -#define PyInt64ArrType_Type PyLongArrType_Type -#define PyUInt64ScalarObject PyULongScalarObject -#define PyUInt64ArrType_Type PyULongArrType_Type -#define NPY_INT64_FMT NPY_LONG_FMT -#define NPY_UINT64_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 128 -#define NPY_INT128 NPY_LONG -#define NPY_UINT128 NPY_ULONG - typedef long npy_int128; - typedef unsigned long npy_uint128; -#define PyInt128ScalarObject PyLongScalarObject -#define PyInt128ArrType_Type PyLongArrType_Type -#define PyUInt128ScalarObject PyULongScalarObject -#define PyUInt128ArrType_Type PyULongArrType_Type -#define NPY_INT128_FMT NPY_LONG_FMT -#define NPY_UINT128_FMT NPY_ULONG_FMT -#endif - -#if NPY_BITSOF_LONGLONG == 8 -# ifndef NPY_INT8 -# define NPY_INT8 NPY_LONGLONG -# define NPY_UINT8 NPY_ULONGLONG - typedef npy_longlong npy_int8; - typedef npy_ulonglong npy_uint8; -# define PyInt8ScalarObject PyLongLongScalarObject -# define PyInt8ArrType_Type PyLongLongArrType_Type -# define PyUInt8ScalarObject PyULongLongScalarObject -# define PyUInt8ArrType_Type PyULongLongArrType_Type -#define NPY_INT8_FMT NPY_LONGLONG_FMT -#define NPY_UINT8_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT8 -# define NPY_MIN_LONGLONG NPY_MIN_INT8 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 -#elif NPY_BITSOF_LONGLONG == 16 -# ifndef NPY_INT16 -# define NPY_INT16 NPY_LONGLONG -# define NPY_UINT16 NPY_ULONGLONG - typedef npy_longlong npy_int16; - typedef npy_ulonglong npy_uint16; -# define PyInt16ScalarObject PyLongLongScalarObject -# define PyInt16ArrType_Type PyLongLongArrType_Type -# define PyUInt16ScalarObject PyULongLongScalarObject -# define PyUInt16ArrType_Type PyULongLongArrType_Type -#define NPY_INT16_FMT NPY_LONGLONG_FMT -#define NPY_UINT16_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT16 -# define NPY_MIN_LONGLONG NPY_MIN_INT16 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 -#elif NPY_BITSOF_LONGLONG == 32 -# ifndef NPY_INT32 -# define NPY_INT32 NPY_LONGLONG -# define NPY_UINT32 NPY_ULONGLONG - typedef npy_longlong npy_int32; - typedef npy_ulonglong npy_uint32; - typedef npy_ulonglong npy_ucs4; -# define PyInt32ScalarObject PyLongLongScalarObject -# define PyInt32ArrType_Type PyLongLongArrType_Type -# define PyUInt32ScalarObject PyULongLongScalarObject -# define PyUInt32ArrType_Type PyULongLongArrType_Type -#define NPY_INT32_FMT NPY_LONGLONG_FMT -#define NPY_UINT32_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT32 -# define NPY_MIN_LONGLONG NPY_MIN_INT32 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 -#elif NPY_BITSOF_LONGLONG == 64 -# ifndef NPY_INT64 -# define NPY_INT64 NPY_LONGLONG -# define NPY_UINT64 NPY_ULONGLONG - typedef npy_longlong npy_int64; - typedef npy_ulonglong npy_uint64; -# define PyInt64ScalarObject PyLongLongScalarObject -# define PyInt64ArrType_Type PyLongLongArrType_Type -# define PyUInt64ScalarObject PyULongLongScalarObject -# define PyUInt64ArrType_Type PyULongLongArrType_Type -#define NPY_INT64_FMT NPY_LONGLONG_FMT -#define NPY_UINT64_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT64 -# define NPY_MIN_LONGLONG NPY_MIN_INT64 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 -#elif NPY_BITSOF_LONGLONG == 128 -# ifndef NPY_INT128 -# define NPY_INT128 NPY_LONGLONG -# define NPY_UINT128 NPY_ULONGLONG - typedef npy_longlong npy_int128; - typedef npy_ulonglong npy_uint128; -# define PyInt128ScalarObject PyLongLongScalarObject -# define PyInt128ArrType_Type PyLongLongArrType_Type -# define PyUInt128ScalarObject PyULongLongScalarObject -# define PyUInt128ArrType_Type PyULongLongArrType_Type -#define NPY_INT128_FMT NPY_LONGLONG_FMT -#define NPY_UINT128_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT128 -# define NPY_MIN_LONGLONG NPY_MIN_INT128 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 -#elif NPY_BITSOF_LONGLONG == 256 -# define NPY_INT256 NPY_LONGLONG -# define NPY_UINT256 NPY_ULONGLONG - typedef npy_longlong npy_int256; - typedef npy_ulonglong npy_uint256; -# define PyInt256ScalarObject PyLongLongScalarObject -# define PyInt256ArrType_Type PyLongLongArrType_Type -# define PyUInt256ScalarObject PyULongLongScalarObject -# define PyUInt256ArrType_Type PyULongLongArrType_Type -#define NPY_INT256_FMT NPY_LONGLONG_FMT -#define NPY_UINT256_FMT NPY_ULONGLONG_FMT -# define NPY_MAX_LONGLONG NPY_MAX_INT256 -# define NPY_MIN_LONGLONG NPY_MIN_INT256 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 -#endif - -#if NPY_BITSOF_INT == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_INT -#define NPY_UINT8 NPY_UINT - typedef int npy_int8; - typedef unsigned int npy_uint8; -# define PyInt8ScalarObject PyIntScalarObject -# define PyInt8ArrType_Type PyIntArrType_Type -# define PyUInt8ScalarObject PyUIntScalarObject -# define PyUInt8ArrType_Type PyUIntArrType_Type -#define NPY_INT8_FMT NPY_INT_FMT -#define NPY_UINT8_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_INT -#define NPY_UINT16 NPY_UINT - typedef int npy_int16; - typedef unsigned int npy_uint16; -# define PyInt16ScalarObject PyIntScalarObject -# define PyInt16ArrType_Type PyIntArrType_Type -# define PyUInt16ScalarObject PyIntUScalarObject -# define PyUInt16ArrType_Type PyIntUArrType_Type -#define NPY_INT16_FMT NPY_INT_FMT -#define NPY_UINT16_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT - typedef int npy_int32; - typedef unsigned int npy_uint32; - typedef unsigned int npy_ucs4; -# define PyInt32ScalarObject PyIntScalarObject -# define PyInt32ArrType_Type PyIntArrType_Type -# define PyUInt32ScalarObject PyUIntScalarObject -# define PyUInt32ArrType_Type PyUIntArrType_Type -#define NPY_INT32_FMT NPY_INT_FMT -#define NPY_UINT32_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_INT -#define NPY_UINT64 NPY_UINT - typedef int npy_int64; - typedef unsigned int npy_uint64; -# define PyInt64ScalarObject PyIntScalarObject -# define PyInt64ArrType_Type PyIntArrType_Type -# define PyUInt64ScalarObject PyUIntScalarObject -# define PyUInt64ArrType_Type PyUIntArrType_Type -#define NPY_INT64_FMT NPY_INT_FMT -#define NPY_UINT64_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_INT -#define NPY_UINT128 NPY_UINT - typedef int npy_int128; - typedef unsigned int npy_uint128; -# define PyInt128ScalarObject PyIntScalarObject -# define PyInt128ArrType_Type PyIntArrType_Type -# define PyUInt128ScalarObject PyUIntScalarObject -# define PyUInt128ArrType_Type PyUIntArrType_Type -#define NPY_INT128_FMT NPY_INT_FMT -#define NPY_UINT128_FMT NPY_UINT_FMT -#endif -#endif - -#if NPY_BITSOF_SHORT == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_SHORT -#define NPY_UINT8 NPY_USHORT - typedef short npy_int8; - typedef unsigned short npy_uint8; -# define PyInt8ScalarObject PyShortScalarObject -# define PyInt8ArrType_Type PyShortArrType_Type -# define PyUInt8ScalarObject PyUShortScalarObject -# define PyUInt8ArrType_Type PyUShortArrType_Type -#define NPY_INT8_FMT NPY_SHORT_FMT -#define NPY_UINT8_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT - typedef short npy_int16; - typedef unsigned short npy_uint16; -# define PyInt16ScalarObject PyShortScalarObject -# define PyInt16ArrType_Type PyShortArrType_Type -# define PyUInt16ScalarObject PyUShortScalarObject -# define PyUInt16ArrType_Type PyUShortArrType_Type -#define NPY_INT16_FMT NPY_SHORT_FMT -#define NPY_UINT16_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_SHORT -#define NPY_UINT32 NPY_USHORT - typedef short npy_int32; - typedef unsigned short npy_uint32; - typedef unsigned short npy_ucs4; -# define PyInt32ScalarObject PyShortScalarObject -# define PyInt32ArrType_Type PyShortArrType_Type -# define PyUInt32ScalarObject PyUShortScalarObject -# define PyUInt32ArrType_Type PyUShortArrType_Type -#define NPY_INT32_FMT NPY_SHORT_FMT -#define NPY_UINT32_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_SHORT -#define NPY_UINT64 NPY_USHORT - typedef short npy_int64; - typedef unsigned short npy_uint64; -# define PyInt64ScalarObject PyShortScalarObject -# define PyInt64ArrType_Type PyShortArrType_Type -# define PyUInt64ScalarObject PyUShortScalarObject -# define PyUInt64ArrType_Type PyUShortArrType_Type -#define NPY_INT64_FMT NPY_SHORT_FMT -#define NPY_UINT64_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_SHORT -#define NPY_UINT128 NPY_USHORT - typedef short npy_int128; - typedef unsigned short npy_uint128; -# define PyInt128ScalarObject PyShortScalarObject -# define PyInt128ArrType_Type PyShortArrType_Type -# define PyUInt128ScalarObject PyUShortScalarObject -# define PyUInt128ArrType_Type PyUShortArrType_Type -#define NPY_INT128_FMT NPY_SHORT_FMT -#define NPY_UINT128_FMT NPY_USHORT_FMT -#endif -#endif - - -#if NPY_BITSOF_CHAR == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE - typedef signed char npy_int8; - typedef unsigned char npy_uint8; -# define PyInt8ScalarObject PyByteScalarObject -# define PyInt8ArrType_Type PyByteArrType_Type -# define PyUInt8ScalarObject PyUByteScalarObject -# define PyUInt8ArrType_Type PyUByteArrType_Type -#define NPY_INT8_FMT NPY_BYTE_FMT -#define NPY_UINT8_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_BYTE -#define NPY_UINT16 NPY_UBYTE - typedef signed char npy_int16; - typedef unsigned char npy_uint16; -# define PyInt16ScalarObject PyByteScalarObject -# define PyInt16ArrType_Type PyByteArrType_Type -# define PyUInt16ScalarObject PyUByteScalarObject -# define PyUInt16ArrType_Type PyUByteArrType_Type -#define NPY_INT16_FMT NPY_BYTE_FMT -#define NPY_UINT16_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_BYTE -#define NPY_UINT32 NPY_UBYTE - typedef signed char npy_int32; - typedef unsigned char npy_uint32; - typedef unsigned char npy_ucs4; -# define PyInt32ScalarObject PyByteScalarObject -# define PyInt32ArrType_Type PyByteArrType_Type -# define PyUInt32ScalarObject PyUByteScalarObject -# define PyUInt32ArrType_Type PyUByteArrType_Type -#define NPY_INT32_FMT NPY_BYTE_FMT -#define NPY_UINT32_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_BYTE -#define NPY_UINT64 NPY_UBYTE - typedef signed char npy_int64; - typedef unsigned char npy_uint64; -# define PyInt64ScalarObject PyByteScalarObject -# define PyInt64ArrType_Type PyByteArrType_Type -# define PyUInt64ScalarObject PyUByteScalarObject -# define PyUInt64ArrType_Type PyUByteArrType_Type -#define NPY_INT64_FMT NPY_BYTE_FMT -#define NPY_UINT64_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_BYTE -#define NPY_UINT128 NPY_UBYTE - typedef signed char npy_int128; - typedef unsigned char npy_uint128; -# define PyInt128ScalarObject PyByteScalarObject -# define PyInt128ArrType_Type PyByteArrType_Type -# define PyUInt128ScalarObject PyUByteScalarObject -# define PyUInt128ArrType_Type PyUByteArrType_Type -#define NPY_INT128_FMT NPY_BYTE_FMT -#define NPY_UINT128_FMT NPY_UBYTE_FMT -#endif -#endif - - - -#if NPY_BITSOF_DOUBLE == 16 -#ifndef NPY_FLOAT16 -#define NPY_FLOAT16 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CDOUBLE - typedef double npy_float16; - typedef npy_cdouble npy_complex32; -# define PyFloat16ScalarObject PyDoubleScalarObject -# define PyComplex32ScalarObject PyCDoubleScalarObject -# define PyFloat16ArrType_Type PyDoubleArrType_Type -# define PyComplex32ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT16_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX32_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_DOUBLE -#define NPY_COMPLEX64 NPY_CDOUBLE - typedef double npy_float32; - typedef npy_cdouble npy_complex64; -# define PyFloat32ScalarObject PyDoubleScalarObject -# define PyComplex64ScalarObject PyCDoubleScalarObject -# define PyFloat32ArrType_Type PyDoubleArrType_Type -# define PyComplex64ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX128 NPY_CDOUBLE - typedef double npy_float64; - typedef npy_cdouble npy_complex128; -# define PyFloat64ScalarObject PyDoubleScalarObject -# define PyComplex128ScalarObject PyCDoubleScalarObject -# define PyFloat64ArrType_Type PyDoubleArrType_Type -# define PyComplex128ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_DOUBLE -#define NPY_COMPLEX160 NPY_CDOUBLE - typedef double npy_float80; - typedef npy_cdouble npy_complex160; -# define PyFloat80ScalarObject PyDoubleScalarObject -# define PyComplex160ScalarObject PyCDoubleScalarObject -# define PyFloat80ArrType_Type PyDoubleArrType_Type -# define PyComplex160ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_DOUBLE -#define NPY_COMPLEX192 NPY_CDOUBLE - typedef double npy_float96; - typedef npy_cdouble npy_complex192; -# define PyFloat96ScalarObject PyDoubleScalarObject -# define PyComplex192ScalarObject PyCDoubleScalarObject -# define PyFloat96ArrType_Type PyDoubleArrType_Type -# define PyComplex192ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_DOUBLE -#define NPY_COMPLEX256 NPY_CDOUBLE - typedef double npy_float128; - typedef npy_cdouble npy_complex256; -# define PyFloat128ScalarObject PyDoubleScalarObject -# define PyComplex256ScalarObject PyCDoubleScalarObject -# define PyFloat128ArrType_Type PyDoubleArrType_Type -# define PyComplex256ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT -#endif -#endif - - - -#if NPY_BITSOF_FLOAT == 16 -#ifndef NPY_FLOAT16 -#define NPY_FLOAT16 NPY_FLOAT -#define NPY_COMPLEX32 NPY_CFLOAT - typedef float npy_float16; - typedef npy_cfloat npy_complex32; -# define PyFloat16ScalarObject PyFloatScalarObject -# define PyComplex32ScalarObject PyCFloatScalarObject -# define PyFloat16ArrType_Type PyFloatArrType_Type -# define PyComplex32ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT16_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX32_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_COMPLEX64 NPY_CFLOAT - typedef float npy_float32; - typedef npy_cfloat npy_complex64; -# define PyFloat32ScalarObject PyFloatScalarObject -# define PyComplex64ScalarObject PyCFloatScalarObject -# define PyFloat32ArrType_Type PyFloatArrType_Type -# define PyComplex64ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT32_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_FLOAT -#define NPY_COMPLEX128 NPY_CFLOAT - typedef float npy_float64; - typedef npy_cfloat npy_complex128; -# define PyFloat64ScalarObject PyFloatScalarObject -# define PyComplex128ScalarObject PyCFloatScalarObject -# define PyFloat64ArrType_Type PyFloatArrType_Type -# define PyComplex128ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT64_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_FLOAT -#define NPY_COMPLEX160 NPY_CFLOAT - typedef float npy_float80; - typedef npy_cfloat npy_complex160; -# define PyFloat80ScalarObject PyFloatScalarObject -# define PyComplex160ScalarObject PyCFloatScalarObject -# define PyFloat80ArrType_Type PyFloatArrType_Type -# define PyComplex160ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT80_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_FLOAT -#define NPY_COMPLEX192 NPY_CFLOAT - typedef float npy_float96; - typedef npy_cfloat npy_complex192; -# define PyFloat96ScalarObject PyFloatScalarObject -# define PyComplex192ScalarObject PyCFloatScalarObject -# define PyFloat96ArrType_Type PyFloatArrType_Type -# define PyComplex192ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT96_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_FLOAT -#define NPY_COMPLEX256 NPY_CFLOAT - typedef float npy_float128; - typedef npy_cfloat npy_complex256; -# define PyFloat128ScalarObject PyFloatScalarObject -# define PyComplex256ScalarObject PyCFloatScalarObject -# define PyFloat128ArrType_Type PyFloatArrType_Type -# define PyComplex256ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT128_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT -#endif -#endif - - -#if NPY_BITSOF_LONGDOUBLE == 16 -#ifndef NPY_FLOAT16 -#define NPY_FLOAT16 NPY_LONGDOUBLE -#define NPY_COMPLEX32 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float16; - typedef npy_clongdouble npy_complex32; -# define PyFloat16ScalarObject PyLongDoubleScalarObject -# define PyComplex32ScalarObject PyCLongDoubleScalarObject -# define PyFloat16ArrType_Type PyLongDoubleArrType_Type -# define PyComplex32ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT16_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX32_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_LONGDOUBLE -#define NPY_COMPLEX64 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float32; - typedef npy_clongdouble npy_complex64; -# define PyFloat32ScalarObject PyLongDoubleScalarObject -# define PyComplex64ScalarObject PyCLongDoubleScalarObject -# define PyFloat32ArrType_Type PyLongDoubleArrType_Type -# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_LONGDOUBLE -#define NPY_COMPLEX128 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float64; - typedef npy_clongdouble npy_complex128; -# define PyFloat64ScalarObject PyLongDoubleScalarObject -# define PyComplex128ScalarObject PyCLongDoubleScalarObject -# define PyFloat64ArrType_Type PyLongDoubleArrType_Type -# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_LONGDOUBLE -#define NPY_COMPLEX160 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float80; - typedef npy_clongdouble npy_complex160; -# define PyFloat80ScalarObject PyLongDoubleScalarObject -# define PyComplex160ScalarObject PyCLongDoubleScalarObject -# define PyFloat80ArrType_Type PyLongDoubleArrType_Type -# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_LONGDOUBLE -#define NPY_COMPLEX192 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float96; - typedef npy_clongdouble npy_complex192; -# define PyFloat96ScalarObject PyLongDoubleScalarObject -# define PyComplex192ScalarObject PyCLongDoubleScalarObject -# define PyFloat96ArrType_Type PyLongDoubleArrType_Type -# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_LONGDOUBLE -#define NPY_COMPLEX256 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float128; - typedef npy_clongdouble npy_complex256; -# define PyFloat128ScalarObject PyLongDoubleScalarObject -# define PyComplex256ScalarObject PyCLongDoubleScalarObject -# define PyFloat128ArrType_Type PyLongDoubleArrType_Type -# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 256 -#define NPY_FLOAT256 NPY_LONGDOUBLE -#define NPY_COMPLEX512 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float256; - typedef npy_clongdouble npy_complex512; -# define PyFloat256ScalarObject PyLongDoubleScalarObject -# define PyComplex512ScalarObject PyCLongDoubleScalarObject -# define PyFloat256ArrType_Type PyLongDoubleArrType_Type -# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT -#endif - -/* End of typedefs for numarray style bit-width names */ - -/* This is to typedef npy_intp to the appropriate pointer size for this - * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. */ -typedef Py_intptr_t npy_intp; -typedef Py_uintptr_t npy_uintp; -#define NPY_SIZEOF_INTP SIZEOF_PY_INTPTR_T -#define NPY_SIZEOF_UINTP SIZEOF_PY_INTPTR_T - -#ifdef constchar -#undef constchar -#endif - -#if (PY_VERSION_HEX < 0x02050000) - #ifndef PY_SSIZE_T_MIN - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #endif -#define NPY_SSIZE_T_PYFMT "i" -#undef PyIndex_Check -#define constchar const char -#define PyIndex_Check(op) 0 -#else -#define NPY_SSIZE_T_PYFMT "n" -#define constchar char -#endif - -#if SIZEOF_PY_INTPTR_T == SIZEOF_INT - #define NPY_INTP NPY_INT - #define NPY_UINTP NPY_UINT - #define PyIntpArrType_Type PyIntArrType_Type - #define PyUIntpArrType_Type PyUIntArrType_Type - #define NPY_MAX_INTP NPY_MAX_INT - #define NPY_MIN_INTP NPY_MIN_INT - #define NPY_MAX_UINTP NPY_MAX_UINT - #define NPY_INTP_FMT "d" -#elif SIZEOF_PY_INTPTR_T == SIZEOF_LONG - #define NPY_INTP NPY_LONG - #define NPY_UINTP NPY_ULONG - #define PyIntpArrType_Type PyLongArrType_Type - #define PyUIntpArrType_Type PyULongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONG - #define NPY_MIN_INTP MIN_LONG - #define NPY_MAX_UINTP NPY_MAX_ULONG - #define NPY_INTP_FMT "ld" -#elif defined(PY_LONG_LONG) && (SIZEOF_PY_INTPTR_T == SIZEOF_LONG_LONG) - #define NPY_INTP NPY_LONGLONG - #define NPY_UINTP NPY_ULONGLONG - #define PyIntpArrType_Type PyLongLongArrType_Type - #define PyUIntpArrType_Type PyULongLongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONGLONG - #define NPY_MIN_INTP NPY_MIN_LONGLONG - #define NPY_MAX_UINTP NPY_MAX_ULONGLONG - #define NPY_INTP_FMT "Ld" -#endif - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* Macros to define how array, and dimension/strides data is - allocated. - */ - - /* Data buffer */ -#define PyDataMem_NEW(size) ((char *)malloc(size)) -#define PyDataMem_FREE(ptr) free(ptr) -#define PyDataMem_RENEW(ptr,size) ((char *)realloc(ptr,size)) - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 -#define PyArray_malloc PyMem_Malloc -#define PyArray_free PyMem_Free -#define PyArray_realloc PyMem_Realloc -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - - /* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - - /* These assume aligned and notswapped data -- a buffer will be - used before or contiguous data will be obtained - */ -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* XXX the ignore argument should be removed next time the API version - is bumped. It used to be the separator. */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* Functions to cast to all other standard types*/ - /* Can have some NULL entries */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES]; - - /* The next four functions *cannot* be NULL */ - - /* Functions to get and set items with standard - Python types -- not array scalars */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* Copy and/or swap data. Memory areas may not overlap */ - /* Use memmove first if they might */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* Function to compare items */ - /* Can be NULL - */ - PyArray_CompareFunc *compare; - - /* Function to select largest - Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* Function to compute dot product */ - /* Can be NULL */ - PyArray_DotFunc *dotfunc; - - /* Function to scan an ASCII file and - place a single value plus possible separator - Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* Function to read a single value from a string */ - /* and adjust the pointer; Can be NULL */ - PyArray_FromStrFunc *fromstr; - - /* Function to determine if data is zero or not */ - /* If NULL a default version is */ - /* used at Registration time. */ - PyArray_NonzeroFunc *nonzero; - - /* Used for arange. Can be NULL.*/ - PyArray_FillFunc *fill; - - /* Function to fill arrays with scalar values - Can be NULL*/ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* Sorting functions; Can be NULL*/ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* Dictionary of additional casting functions - PyArray_VectorUnaryFuncs - which can be populated to support casting - to other registered types. Can be NULL*/ - PyObject *castdict; - - /* Functions useful for generalizing - the casting rules. Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; -} PyArray_ArrFuncs; - -#define NPY_ITEM_REFCOUNT 0x01 /* The item must be reference counted - when it is inserted or extracted. */ -#define NPY_ITEM_HASOBJECT 0x01 /* Same as needing REFCOUNT */ - -#define NPY_LIST_PICKLE 0x02 /* Convert to list for pickling */ -#define NPY_ITEM_IS_POINTER 0x04 /* The item is a POINTER */ - -#define NPY_NEEDS_INIT 0x08 /* memory needs to be initialized - for this data-type */ - -#define NPY_NEEDS_PYAPI 0x10 /* operations need Python C-API - so don't give-up thread. */ - -#define NPY_USE_GETITEM 0x20 /* Use f.getitem when extracting elements - of this data-type */ - -#define NPY_USE_SETITEM 0x40 /* Use f.setitem when setting creating - 0-d array from this data-type. - */ - -/* These are inherited for global data-type if any data-types in the field - have them */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->hasobject & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -/* Change dtype hasobject to 32-bit in 1.1 and change its name */ -typedef struct _PyArray_Descr { - PyObject_HEAD - PyTypeObject *typeobj; /* the type object representing an - instance of this type -- should not - be two type_numbers with the same type - object. */ - char kind; /* kind for this type */ - char type; /* unique-character representing this type */ - char byteorder; /* '>' (big), '<' (little), '|' - (not-applicable), or '=' (native). */ - char hasobject; /* non-zero if it has object arrays - in fields */ - int type_num; /* number representing this type */ - int elsize; /* element size for this type */ - int alignment; /* alignment needed for this type */ - struct _arr_descr \ - *subarray; /* Non-NULL if this type is - is an array (C-contiguous) - of some other type - */ - PyObject *fields; /* The fields dictionary for this type */ - /* For statically defined descr this - is always Py_None */ - - PyObject *names; /* An ordered tuple of field names or NULL - if no fields are defined */ - - PyArray_ArrFuncs *f; /* a table of functions specific for each - basic data descriptor */ -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; - PyObject *shape; /* a tuple */ -} PyArray_ArrayDescr; - -/* - The main array object structure. It is recommended to use the macros - defined below (PyArray_DATA and friends) access fields here, instead - of the members themselves. - */ - -typedef struct PyArrayObject { - PyObject_HEAD - char *data; /* pointer to raw data buffer */ - int nd; /* number of dimensions, also called ndim */ - npy_intp *dimensions; /* size in each dimension */ - npy_intp *strides; /* bytes to jump to get to the - next element in each dimension */ - PyObject *base; /* This object should be decref'd - upon deletion of array */ - /* For views it points to the original array */ - /* For creation from buffer object it points - to an object that shold be decref'd on - deletion */ - /* For UPDATEIFCOPY flag this is an array - to-be-updated upon deletion of this one */ - PyArray_Descr *descr; /* Pointer to type structure */ - int flags; /* Flags describing array -- see below*/ - PyObject *weakreflist; /* For weakreferences */ -} PyArrayObject; - -#define NPY_AO PyArrayObject - -#define fortran fortran_ /* For some compilers */ - -/* Array Flags Object */ -typedef struct PyArrayFlagsObject { - PyObject_HEAD - PyObject *arr; - int flags; -} PyArrayFlagsObject; - -/* Mirrors buffer object to ptr */ - -typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; -} PyArray_Chunk; - -typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); - -/* Means c-style contiguous (last index varies the fastest). The - data elements right after each other. */ -#define NPY_CONTIGUOUS 0x0001 -/* set if array is a contiguous Fortran array: the first index - varies the fastest in memory (strides array is reverse of - C-contiguous array)*/ -#define NPY_FORTRAN 0x0002 - -#define NPY_C_CONTIGUOUS NPY_CONTIGUOUS -#define NPY_F_CONTIGUOUS NPY_FORTRAN - -/* - Note: all 0-d arrays are CONTIGUOUS and FORTRAN contiguous. If a - 1-d array is CONTIGUOUS it is also FORTRAN contiguous -*/ - -/* If set, the array owns the data: it will be free'd when the array - is deleted. */ -#define NPY_OWNDATA 0x0004 - -/* An array never has the next four set; they're only used as parameter - flags to the the various FromAny functions */ - -/* Cause a cast to occur regardless of whether or not it is safe. */ -#define NPY_FORCECAST 0x0010 - -/* Always copy the array. Returned arrays are always CONTIGUOUS, ALIGNED, - and WRITEABLE. */ -#define NPY_ENSURECOPY 0x0020 - -/* Make sure the returned array is a base-class ndarray */ -#define NPY_ENSUREARRAY 0x0040 - -/* Make sure that the strides are in units of the element size - Needed for some operations with record-arrays. -*/ -#define NPY_ELEMENTSTRIDES 0x0080 - -/* Array data is aligned on the appropiate memory address for the - type stored according to how the compiler would align things - (e.g., an array of integers (4 bytes each) starts on - a memory address that's a multiple of 4) */ -#define NPY_ALIGNED 0x0100 -/* Array data has the native endianness */ -#define NPY_NOTSWAPPED 0x0200 -/* Array data is writeable */ -#define NPY_WRITEABLE 0x0400 -/* If this flag is set, then base contains a pointer to an array of - the same size that should be updated with the current contents of - this array when this array is deallocated -*/ -#define NPY_UPDATEIFCOPY 0x1000 - -/* This flag is for the array interface */ -#define NPY_ARR_HAS_DESCR 0x0800 - - -#define NPY_BEHAVED (NPY_ALIGNED | NPY_WRITEABLE) -#define NPY_BEHAVED_NS (NPY_ALIGNED | NPY_WRITEABLE | NPY_NOTSWAPPED) -#define NPY_CARRAY (NPY_CONTIGUOUS | NPY_BEHAVED) -#define NPY_CARRAY_RO (NPY_CONTIGUOUS | NPY_ALIGNED) -#define NPY_FARRAY (NPY_FORTRAN | NPY_BEHAVED) -#define NPY_FARRAY_RO (NPY_FORTRAN | NPY_ALIGNED) -#define NPY_DEFAULT NPY_CARRAY -#define NPY_IN_ARRAY NPY_CARRAY_RO -#define NPY_OUT_ARRAY NPY_CARRAY -#define NPY_INOUT_ARRAY (NPY_CARRAY | NPY_UPDATEIFCOPY) -#define NPY_IN_FARRAY NPY_FARRAY_RO -#define NPY_OUT_FARRAY NPY_FARRAY -#define NPY_INOUT_FARRAY (NPY_FARRAY | NPY_UPDATEIFCOPY) - -#define NPY_UPDATE_ALL (NPY_CONTIGUOUS | NPY_FORTRAN | NPY_ALIGNED) - - -/* Size of internal buffers used for alignment */ -/* Make BUFSIZE a multiple of sizeof(cdouble) -- ususally 16 */ -/* So that ufunc buffers are aligned */ -#define NPY_MIN_BUFSIZE ((int)sizeof(cdouble)) -#define NPY_MAX_BUFSIZE (((int)sizeof(cdouble))*1000000) -#define NPY_BUFSIZE 10000 -/* #define NPY_BUFSIZE 80*/ - -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) -#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ - ((p).real < (q).real))) -#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ - ((p).real > (q).real))) -#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ - ((p).real <= (q).real))) -#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ - ((p).real >= (q).real))) -#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) - -/* - * C API: consists of Macros and functions. The MACROS are defined here. - */ - - -#define PyArray_CHKFLAGS(m, FLAGS) \ - ((((PyArrayObject *)(m))->flags & (FLAGS)) == (FLAGS)) - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ALIGNED) - - -#if NPY_ALLOW_THREADS -#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; -#define NPY_BEGIN_THREADS _save = PyEval_SaveThread(); -#define NPY_END_THREADS if (_save) PyEval_RestoreThread(_save); - -#define NPY_BEGIN_THREADS_DESCR(dtype) \ - if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_BEGIN_THREADS - -#define NPY_END_THREADS_DESCR(dtype) \ - if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_END_THREADS - -#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; -#define NPY_ALLOW_C_API __save__ = PyGILState_Ensure(); -#define NPY_DISABLE_C_API PyGILState_Release(__save__); -#else -#define NPY_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF -#define NPY_BEGIN_THREADS -#define NPY_END_THREADS -#define NPY_BEGIN_THREADS_DESCR(dtype) -#define NPY_END_THREADS_DESCR(dtype) -#define NPY_ALLOW_C_API_DEF -#define NPY_ALLOW_C_API -#define NPY_DISABLE_C_API -#endif - -typedef struct { - PyObject_HEAD - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; -} PyArrayIterObject; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} - -#define _PyArray_ITER_NEXT1(it) { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} - -#define _PyArray_ITER_NEXT2(it) { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} - -#define _PyArray_ITER_NEXT3(it) { \ - if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ - (it)->coordinates[2]++; \ - (it)->dataptr += (it)->strides[2]; \ - } \ - else { \ - (it)->coordinates[2] = 0; \ - (it)->dataptr -= (it)->backstrides[2]; \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ - } \ -} - -#define PyArray_ITER_NEXT(it) { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += _PyAIT(it)->ao->descr->elsize; \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} - -#define PyArray_ITER_GOTO(it, destination) { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} - -#define PyArray_ITER_GOTO1D(it, ind) { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp) (ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data + \ - __npy_ind * _PyAIT(it)->ao->descr->elsize; \ - else { \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) - - -/* - Any object passed to PyArray_Broadcast must be binary compatible with - this structure. -*/ - -typedef struct { - PyObject_HEAD - int numiter; /* number of iters */ - npy_intp size; /* broadcasted size */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} - -#define PyArray_MultiIter_NEXT(multi) { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} - -#define PyArray_MultiIter_GOTO(multi, dest) { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} - -#define PyArray_MultiIter_GOTO1D(multi, ind) { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} - -#define PyArray_MultiIter_DATA(multi, i) \ - ((void *)(_PyMIT(multi)->iters[i]->dataptr)) - -#define PyArray_MultiIter_NEXTi(multi, i) \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - -#define PyArray_MultiIter_NOTDONE(multi) \ - (_PyMIT(multi)->index < _PyMIT(multi)->size) - -/* Store the information needed for fancy-indexing over an array */ - -typedef struct { - PyObject_HEAD - /* Multi-iterator portion --- needs to be present in this order to - work with PyArray_Broadcast */ - - int numiter; /* number of index-array - iterators */ - npy_intp size; /* size of broadcasted - result */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object - iterators */ - PyArrayIterObject *ait; /* flat Iterator for - underlying array */ - - /* flat iterator for subspace (when numiter < nd) */ - PyArrayIterObject *subspace; - - /* if subspace iteration, then this is the array of - axes in the underlying array represented by the - index objects */ - int iteraxes[NPY_MAXDIMS]; - /* if subspace iteration, the these are the coordinates - to the start of the subspace. - */ - npy_intp bscoord[NPY_MAXDIMS]; - - PyObject *indexobj; /* creating obj */ - int consec; - char *dataptr; - -} PyArrayMapIterObject; - -/* The default array type - */ -#define NPY_DEFAULT_TYPE NPY_DOUBLE -#define PyArray_DEFAULT NPY_DEFAULT_TYPE -/* All sorts of useful ways to look into a PyArrayObject. - These are the recommended over casting to PyArrayObject and accessing - the members directly. - */ - -#define PyArray_NDIM(obj) (((PyArrayObject *)(obj))->nd) -#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ - PyArray_CHKFLAGS(m, NPY_CONTIGUOUS) || \ - PyArray_CHKFLAGS(m, NPY_FORTRAN)) - -#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_FORTRAN) && \ - (PyArray_NDIM(m) > 1)) - -#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_FORTRAN) ? \ - NPY_FORTRAN : 0)) - -#define FORTRAN_IF PyArray_FORTRAN_IF -#define PyArray_DATA(obj) ((void *)(((PyArrayObject *)(obj))->data)) -#define PyArray_BYTES(obj) (((PyArrayObject *)(obj))->data) -#define PyArray_DIMS(obj) (((PyArrayObject *)(obj))->dimensions) -#define PyArray_STRIDES(obj) (((PyArrayObject *)(obj))->strides) -#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) -#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) -#define PyArray_BASE(obj) (((PyArrayObject *)(obj))->base) -#define PyArray_DESCR(obj) (((PyArrayObject *)(obj))->descr) -#define PyArray_FLAGS(obj) (((PyArrayObject *)(obj))->flags) -#define PyArray_ITEMSIZE(obj) (((PyArrayObject *)(obj))->descr->elsize) -#define PyArray_TYPE(obj) (((PyArrayObject *)(obj))->descr->type_num) - -#define PyArray_GETITEM(obj,itemptr) \ - ((PyArrayObject *)(obj))->descr->f->getitem((char *)(itemptr), \ - (PyArrayObject *)(obj)); - -#define PyArray_SETITEM(obj,itemptr,v) \ - ((PyArrayObject *)(obj))->descr->f->setitem((PyObject *)(v), \ - (char *)(itemptr), \ - (PyArrayObject *)(obj)); - - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) - -#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ - ((type) == NPY_USHORT) || \ - ((type) == NPY_UINT) || \ - ((type) == NPY_ULONG) || \ - ((type) == NPY_ULONGLONG)) - -#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ - ((type) == NPY_SHORT) || \ - ((type) == NPY_INT) || \ - ((type) == NPY_LONG) || \ - ((type) == NPY_LONGLONG)) - -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) - -#define PyTypeNum_ISFLOAT(type) (((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) - -#define PyTypeNum_ISNUMBER(type) ((type) <= NPY_CLONGDOUBLE) - -#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ - ((type) == NPY_UNICODE)) - -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ - ((type) == NPY_DOUBLE) || \ - ((type) == NPY_CDOUBLE) || \ - ((type) == NPY_BOOL) || \ - ((type) == NPY_OBJECT )) - -#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ - ((type) <=NPY_VOID)) - -#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ - ((type) < NPY_USERDEF+ \ - NPY_NUMUSERTYPES)) - -#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ - PyTypeNum_ISUSERDEF(type)) - -#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) - - -#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) -#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) -#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) - -#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) -#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) -#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) -#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) -#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) -#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) -#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) -#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) -#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) -#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) -#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) -#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) - -#define NPY_LITTLE '<' -#define NPY_BIG '>' -#define NPY_NATIVE '=' -#define NPY_SWAP 's' -#define NPY_IGNORE '|' - -#ifdef WORDS_BIGENDIAN -#define NPY_NATBYTE NPY_BIG -#define NPY_OPPBYTE NPY_LITTLE -#else -#define NPY_NATBYTE NPY_LITTLE -#define NPY_OPPBYTE NPY_BIG -#endif - -#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) -#define PyArray_IsNativeByteOrder PyArray_ISNBO -#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) -#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ALIGNED) - - -#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) -#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) - - -/* This is the form of the struct that's returned pointed by the - PyCObject attribute of an array __array_struct__. See - http://numpy.scipy.org/array_interface.shtml for the full - documentation. */ -typedef struct { - int two; /* contains the integer 2 as a sanity check */ - int nd; /* number of dimensions */ - char typekind; /* kind in array --- character code of typestr */ - int itemsize; /* size of each element */ - int flags; /* how should be data interpreted. Valid - flags are CONTIGUOUS (1), FORTRAN (2), - ALIGNED (0x100), NOTSWAPPED (0x200), and - WRITEABLE (0x400). - ARR_HAS_DESCR (0x800) states that arrdescr - field is present in structure */ - npy_intp *shape; /* A length-nd array of shape information */ - npy_intp *strides; /* A length-nd array of stride information */ - void *data; /* A pointer to the first element of the array */ - PyObject *descr; /* A list of fields or NULL (ignored if flags - does not have ARR_HAS_DESCR flag set) */ -} PyArrayInterface; - - -/* Includes the "function" C-API -- these are all stored in a - list of pointers --- one for each file - The two lists are concatenated into one in multiarray. - - They are available as import_array() -*/ - -#include "__multiarray_api.h" - - -/* C-API that requries previous API to be defined */ - -#define PyArray_DescrCheck(op) ((op)->ob_type == &PyArrayDescr_Type) - -#define PyArray_Check(op) ((op)->ob_type == &PyArray_Type || \ - PyObject_TypeCheck(op, &PyArray_Type)) - -#define PyArray_CheckExact(op) ((op)->ob_type == &PyArray_Type) - -#define PyArray_HasArrayInterfaceType(op, type, context, out) \ - ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ - (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ - (((out)=PyArray_FromArrayAttr(op, type, context)) != \ - Py_NotImplemented)) - -#define PyArray_HasArrayInterface(op, out) \ - PyArray_HasArrayInterfaceType(op, NULL, NULL, out) - -#define PyArray_IsZeroDim(op) (PyArray_Check(op) && (PyArray_NDIM(op) == 0)) - -#define PyArray_IsScalar(obj, cls) \ - (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) - -#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ - PyArray_IsZeroDim(m)) - -#define PyArray_IsPythonNumber(obj) \ - (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \ - PyLong_Check(obj) || PyBool_Check(obj)) - -#define PyArray_IsPythonScalar(obj) \ - (PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \ - PyUnicode_Check(obj)) - -#define PyArray_IsAnyScalar(obj) \ - (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) - -#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ - PyArray_CheckScalar(obj)) - -#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ - Py_INCREF(m), (m) : \ - (PyArrayObject *)(PyArray_Copy(m))) - -#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ - PyArray_CompareLists(PyArray_DIMS(a1), \ - PyArray_DIMS(a2), \ - PyArray_NDIM(a1))) - -#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) -#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) -#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) - -#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ - NULL) - -#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ - PyArray_DescrFromType(type), 0, 0, 0, NULL); - -#define PyArray_FROM_OTF(m, type, flags) \ - PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ - (((flags) & NPY_ENSURECOPY) ? \ - ((flags) | NPY_DEFAULT) : (flags)), NULL) - -#define PyArray_FROMANY(m, type, min, max, flags) \ - PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ - (((flags) & NPY_ENSURECOPY) ? \ - (flags) | NPY_DEFAULT : (flags)), NULL) - -#define PyArray_ZEROS(m, dims, type, fortran) \ - PyArray_Zeros(m, dims, PyArray_DescrFromType(type), fortran) - -#define PyArray_EMPTY(m, dims, type, fortran) \ - PyArray_Empty(m, dims, PyArray_DescrFromType(type), fortran) - -#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ - PyArray_NBYTES(obj)) - -#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt) -#define NPY_REFCOUNT PyArray_REFCOUNT -#define NPY_MAX_ELSIZE (2*SIZEOF_LONGDOUBLE) - -#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_DEFAULT, NULL) - -#define PyArray_EquivArrTypes(a1, a2) \ - PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) - -#define PyArray_EquivByteorders(b1, b2) \ - (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) - -#define PyArray_SimpleNew(nd, dims, typenum) \ - PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) - -#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ - PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ - data, 0, NPY_CARRAY, NULL) - -#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ - PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ - NULL, NULL, 0, NULL) - -#define PyArray_ToScalar(data, arr) \ - PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) - - -/* These might be faster without the dereferencing of obj - going on inside -- of course an optimizing compiler should - inline the constants inside a for loop making it a moot point -*/ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0])) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1])) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1] + \ - (k)*PyArray_STRIDES(obj)[2])) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1] + \ - (k)*PyArray_STRIDES(obj)[2] + \ - (l)*PyArray_STRIDES(obj)[3])) - -#define PyArray_XDECREF_ERR(obj) \ - if (obj && (PyArray_FLAGS(obj) & NPY_UPDATEIFCOPY)) { \ - PyArray_FLAGS(PyArray_BASE(obj)) |= NPY_WRITEABLE; \ - PyArray_FLAGS(obj) &= ~NPY_UPDATEIFCOPY; \ - } \ - Py_XDECREF(obj) - -#define PyArray_DESCR_REPLACE(descr) do { \ - PyArray_Descr *_new_; \ - _new_ = PyArray_DescrNew(descr); \ - Py_XDECREF(descr); \ - descr = _new_; \ - } while(0) - -/* Copy should always return contiguous array */ -#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) - -#define PyArray_FromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_BEHAVED | NPY_ENSUREARRAY, NULL) - -#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_DEFAULT | NPY_ENSUREARRAY, NULL) - -#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ENSURECOPY | NPY_DEFAULT | \ - NPY_ENSUREARRAY, NULL) - -#define PyArray_Cast(mp, type_num) \ - PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) - -#define PyArray_Take(ap, items, axis) \ - PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) - -#define PyArray_Put(ap, items, values) \ - PyArray_PutTo(ap, items, values, NPY_RAISE) - -/* Compatibility with old Numeric stuff -- don't use in new code */ - -#define PyArray_FromDimsAndData(nd, d, type, data) \ - PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \ - data) - -#include "old_defines.h" - -#ifdef __cplusplus -} -#endif - -#endif /* NPY_NDARRAYOBJECT_H */ diff --git a/numpy/core/include/numpy/noprefix.h b/numpy/core/include/numpy/noprefix.h deleted file mode 100644 index dc4f71c70..000000000 --- a/numpy/core/include/numpy/noprefix.h +++ /dev/null @@ -1,191 +0,0 @@ -#ifndef NPY_NOPREFIX_H -#define NPY_NOPREFIX_H - -/* You can directly include noprefix.h as a backward -compatibility measure*/ -#ifndef NPY_NO_PREFIX -#include "ndarrayobject.h" -#endif - -#define MAX_DIMS NPY_MAXDIMS - -#define longlong npy_longlong -#define ulonglong npy_ulonglong -#define Bool npy_bool -#define longdouble npy_longdouble -#define byte npy_byte - -#ifndef _BSD_SOURCE -#define ushort npy_ushort -#define uint npy_uint -#define ulong npy_ulong -#endif - -#define ubyte npy_ubyte -#define ushort npy_ushort -#define uint npy_uint -#define ulong npy_ulong -#define cfloat npy_cfloat -#define cdouble npy_cdouble -#define clongdouble npy_clongdouble -#define Int8 npy_int8 -#define UInt8 npy_uint8 -#define Int16 npy_int16 -#define UInt16 npy_uint16 -#define Int32 npy_int32 -#define UInt32 npy_uint32 -#define Int64 npy_int64 -#define UInt64 npy_uint64 -#define Int128 npy_int128 -#define UInt128 npy_uint128 -#define Int256 npy_int256 -#define UInt256 npy_uint256 -#define Float16 npy_float16 -#define Complex32 npy_complex32 -#define Float32 npy_float32 -#define Complex64 npy_complex64 -#define Float64 npy_float64 -#define Complex128 npy_complex128 -#define Float80 npy_float80 -#define Complex160 npy_complex160 -#define Float96 npy_float96 -#define Complex192 npy_complex192 -#define Float128 npy_float128 -#define Complex256 npy_complex256 -#define intp npy_intp -#define uintp npy_uintp - -#define SIZEOF_INTP NPY_SIZEOF_INTP -#define SIZEOF_UINTP NPY_SIZEOF_UINTP - -#define LONGLONG_FMT NPY_LONGLONG_FMT -#define ULONGLONG_FMT NPY_ULONGLONG_FMT -#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX -#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX(x) - -#define MAX_INT8 127 -#define MIN_INT8 -128 -#define MAX_UINT8 255 -#define MAX_INT16 32767 -#define MIN_INT16 -32768 -#define MAX_UINT16 65535 -#define MAX_INT32 2147483647 -#define MIN_INT32 (-MAX_INT32 - 1) -#define MAX_UINT32 4294967295U -#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807) -#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1)) -#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615) -#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864) -#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1)) -#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1)) -#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) - -#define MAX_BYTE NPY_MAX_BYTE -#define MIN_BYTE NPY_MIN_BYTE -#define MAX_UBYTE NPY_MAX_UBYTE -#define MAX_SHORT NPY_MAX_SHORT -#define MIN_SHORT NPY_MIN_SHORT -#define MAX_USHORT NPY_MAX_USHORT -#define MAX_INT NPY_MAX_INT -#define MIN_INT NPY_MIN_INT -#define MAX_UINT NPY_MAX_UINT -#define MAX_LONG NPY_MAX_LONG -#define MIN_LONG NPY_MIN_LONG -#define MAX_ULONG NPY_MAX_ULONG -#define MAX_LONGLONG NPY_MAX_LONGLONG -#define MIN_LONGLONG NPY_MIN_LONGLONG -#define MAX_ULONGLONG NPY_MAX_ULONGLONG - -#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE -#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG -#define BITSOF_BOOL NPY_BITSOF_BOOL -#define BITSOF_CHAR NPY_BITSOF_CHAR -#define BITSOF_SHORT NPY_BITSOF_SHORT -#define BITSOF_INT NPY_BITSOF_INT -#define BITSOF_LONG NPY_BITSOF_LONG -#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG -#define BITSOF_FLOAT NPY_BITSOF_FLOAT -#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE -#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE - -#define PyArray_UCS4 npy_ucs4 -#define _pya_malloc PyArray_malloc -#define _pya_free PyArray_free -#define _pya_realloc PyArray_realloc - -#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF -#define BEGIN_THREADS NPY_BEGIN_THREADS -#define END_THREADS NPY_END_THREADS -#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF -#define ALLOW_C_API NPY_ALLOW_C_API -#define DISABLE_C_API NPY_DISABLE_C_API - -#define PY_FAIL NPY_FAIL -#define PY_SUCCEED NPY_SUCCEED - -#ifndef TRUE -#define TRUE NPY_TRUE -#endif - -#ifndef FALSE -#define FALSE NPY_FALSE -#endif - -#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT - -#define CONTIGUOUS NPY_CONTIGUOUS -#define C_CONTIGUOUS NPY_C_CONTIGUOUS -#define FORTRAN NPY_FORTRAN -#define F_CONTIGUOUS NPY_F_CONTIGUOUS -#define OWNDATA NPY_OWNDATA -#define FORCECAST NPY_FORCECAST -#define ENSURECOPY NPY_ENSURECOPY -#define ENSUREARRAY NPY_ENSUREARRAY -#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES -#define ALIGNED NPY_ALIGNED -#define NOTSWAPPED NPY_NOTSWAPPED -#define WRITEABLE NPY_WRITEABLE -#define UPDATEIFCOPY NPY_UPDATEIFCOPY -#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR -#define BEHAVED NPY_BEHAVED -#define BEHAVED_NS NPY_BEHAVED_NS -#define CARRAY NPY_CARRAY -#define CARRAY_RO NPY_CARRAY_RO -#define FARRAY NPY_FARRAY -#define FARRAY_RO NPY_FARRAY_RO -#define DEFAULT NPY_DEFAULT -#define IN_ARRAY NPY_IN_ARRAY -#define OUT_ARRAY NPY_OUT_ARRAY -#define INOUT_ARRAY NPY_INOUT_ARRAY -#define IN_FARRAY NPY_IN_FARRAY -#define OUT_FARRAY NPY_OUT_FARRAY -#define INOUT_FARRAY NPY_INOUT_FARRAY -#define UPDATE_ALL NPY_UPDATE_ALL - -#define OWN_DATA NPY_OWNDATA -#define BEHAVED_FLAGS NPY_BEHAVED -#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS -#define CARRAY_FLAGS_RO NPY_CARRAY_RO -#define CARRAY_FLAGS NPY_CARRAY -#define FARRAY_FLAGS NPY_FARRAY -#define FARRAY_FLAGS_RO NPY_FARRAY_RO -#define DEFAULT_FLAGS NPY_DEFAULT -#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS - -#ifndef MIN -#define MIN PyArray_MIN -#endif -#ifndef MAX -#define MAX PyArray_MAX -#endif -#define MAX_INTP NPY_MAX_INTP -#define MIN_INTP NPY_MIN_INTP -#define MAX_UINTP NPY_MAX_UINTP -#define INTP_FMT NPY_INTP_FMT - -#define REFCOUNT PyArray_REFCOUNT -#define MAX_ELSIZE NPY_MAX_ELSIZE - -#endif diff --git a/numpy/core/include/numpy/npy_interrupt.h b/numpy/core/include/numpy/npy_interrupt.h deleted file mode 100644 index eb72fbaf0..000000000 --- a/numpy/core/include/numpy/npy_interrupt.h +++ /dev/null @@ -1,117 +0,0 @@ - -/* Signal handling: - -This header file defines macros that allow your code to handle -interrupts received during processing. Interrupts that -could reasonably be handled: - -SIGINT, SIGABRT, SIGALRM, SIGSEGV - -****Warning*************** - -Do not allow code that creates temporary memory or increases reference -counts of Python objects to be interrupted unless you handle it -differently. - -************************** - -The mechanism for handling interrupts is conceptually simple: - - - replace the signal handler with our own home-grown version - and store the old one. - - run the code to be interrupted -- if an interrupt occurs - the handler should basically just cause a return to the - calling function for finish work. - - restore the old signal handler - -Of course, every code that allows interrupts must account for -returning via the interrupt and handle clean-up correctly. But, -even still, the simple paradigm is complicated by at least three -factors. - - 1) platform portability (i.e. Microsoft says not to use longjmp - to return from signal handling. They have a __try and __except - extension to C instead but what about mingw?). - - 2) how to handle threads: apparently whether signals are delivered to - every thread of the process or the "invoking" thread is platform - dependent. --- we don't handle threads for now. - - 3) do we need to worry about re-entrance. For now, assume the - code will not call-back into itself. - -Ideas: - - 1) Start by implementing an approach that works on platforms that - can use setjmp and longjmp functionality and does nothing - on other platforms. - - 2) Ignore threads --- i.e. do not mix interrupt handling and threads - - 3) Add a default signal_handler function to the C-API but have the rest - use macros. - - -Simple Interface: - - -In your C-extension: around a block of code you want to be interruptable -with a SIGINT - -NPY_SIGINT_ON -[code] -NPY_SIGINT_OFF - -In order for this to work correctly, the -[code] block must not allocate any memory or alter the reference count of any -Python objects. In other words [code] must be interruptible so that continuation -after NPY_SIGINT_OFF will only be "missing some computations" - -Interrupt handling does not work well with threads. - -*/ - -/* Add signal handling macros - Make the global variable and signal handler part of the C-API -*/ - -#ifndef NPY_INTERRUPT_H -#define NPY_INTERRUPT_H - -#ifndef NPY_NO_SIGNAL - -#include -#include - -#ifndef sigsetjmp - -#define SIGSETJMP(arg1, arg2) setjmp(arg1) -#define SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) -#define SIGJMP_BUF jmp_buf - -#else - -#define SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) -#define SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) -#define SIGJMP_BUF sigjmp_buf - -#endif - -# define NPY_SIGINT_ON { \ - PyOS_sighandler_t _npy_sig_save; \ - _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ - if (SIGSETJMP(*((SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ - 1) == 0) { \ - -# define NPY_SIGINT_OFF } \ - PyOS_setsig(SIGINT, _npy_sig_save); \ - } - -#else /* NPY_NO_SIGNAL */ - -# define NPY_SIGINT_ON -# define NPY_SIGINT_OFF - -#endif /* HAVE_SIGSETJMP */ - -#endif /* NPY_INTERRUPT_H */ diff --git a/numpy/core/include/numpy/old_defines.h b/numpy/core/include/numpy/old_defines.h deleted file mode 100644 index c21665268..000000000 --- a/numpy/core/include/numpy/old_defines.h +++ /dev/null @@ -1,169 +0,0 @@ -#define NDARRAY_VERSION NPY_VERSION - -#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE -#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE -#define PyArray_BUFSIZE NPY_BUFSIZE - -#define PyArray_PRIORITY NPY_PRIORITY -#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY -#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE - -#define NPY_MAX PyArray_MAX -#define NPY_MIN PyArray_MIN - -#define PyArray_TYPES NPY_TYPES -#define PyArray_BOOL NPY_BOOL -#define PyArray_BYTE NPY_BYTE -#define PyArray_UBYTE NPY_UBYTE -#define PyArray_SHORT NPY_SHORT -#define PyArray_USHORT NPY_USHORT -#define PyArray_INT NPY_INT -#define PyArray_UINT NPY_UINT -#define PyArray_LONG NPY_LONG -#define PyArray_ULONG NPY_ULONG -#define PyArray_LONGLONG NPY_LONGLONG -#define PyArray_ULONGLONG NPY_ULONGLONG -#define PyArray_FLOAT NPY_FLOAT -#define PyArray_DOUBLE NPY_DOUBLE -#define PyArray_LONGDOUBLE NPY_LONGDOUBLE -#define PyArray_CFLOAT NPY_CFLOAT -#define PyArray_CDOUBLE NPY_CDOUBLE -#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE -#define PyArray_OBJECT NPY_OBJECT -#define PyArray_STRING NPY_STRING -#define PyArray_UNICODE NPY_UNICODE -#define PyArray_VOID NPY_VOID -#define PyArray_NTYPES NPY_NTYPES -#define PyArray_NOTYPE NPY_NOTYPE -#define PyArray_CHAR NPY_CHAR -#define PyArray_USERDEF NPY_USERDEF -#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES - -#define PyArray_INTP NPY_INTP -#define PyArray_UINTP NPY_UINTP - -#define PyArray_INT8 NPY_INT8 -#define PyArray_UINT8 NPY_UINT8 -#define PyArray_INT16 NPY_INT16 -#define PyArray_UINT16 NPY_UINT16 -#define PyArray_INT32 NPY_INT32 -#define PyArray_UINT32 NPY_UINT32 - -#ifdef NPY_INT64 -#define PyArray_INT64 NPY_INT64 -#define PyArray_UINT64 NPY_UINT64 -#endif - -#ifdef NPY_INT128 -#define PyArray_INT128 NPY_INT128 -#define PyArray_UINT128 NPY_UINT128 -#endif - -#ifdef NPY_FLOAT16 -#define PyArray_FLOAT16 NPY_FLOAT16 -#define PyArray_COMPLEX32 NPY_COMPLEX32 -#endif - -#ifdef NPY_FLOAT80 -#define PyArray_FLOAT80 NPY_FLOAT80 -#define PyArray_COMPLEX160 NPY_COMPLEX160 -#endif - -#ifdef NPY_FLOAT96 -#define PyArray_FLOAT96 NPY_FLOAT96 -#define PyArray_COMPLEX192 NPY_COMPLEX192 -#endif - -#ifdef NPY_FLOAT128 -#define PyArray_FLOAT128 NPY_FLOAT128 -#define PyArray_COMPLEX256 NPY_COMPLEX256 -#endif - -#define PyArray_FLOAT32 NPY_FLOAT32 -#define PyArray_COMPLEX64 NPY_COMPLEX64 -#define PyArray_FLOAT64 NPY_FLOAT64 -#define PyArray_COMPLEX128 NPY_COMPLEX128 - - -#define PyArray_TYPECHAR NPY_TYPECHAR -#define PyArray_BOOLLTR NPY_BOOLLTR -#define PyArray_BYTELTR NPY_BYTELTR -#define PyArray_UBYTELTR NPY_UBYTELTR -#define PyArray_SHORTLTR NPY_SHORTLTR -#define PyArray_USHORTLTR NPY_USHORTLTR -#define PyArray_INTLTR NPY_INTLTR -#define PyArray_UINTLTR NPY_UINTLTR -#define PyArray_LONGLTR NPY_LONGLTR -#define PyArray_ULONGLTR NPY_ULONGLTR -#define PyArray_LONGLONGLTR NPY_LONGLONGLTR -#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR -#define PyArray_FLOATLTR NPY_FLOATLTR -#define PyArray_DOUBLELTR NPY_DOUBLELTR -#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR -#define PyArray_CFLOATLTR NPY_CFLOATLTR -#define PyArray_CDOUBLELTR NPY_CDOUBLELTR -#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR -#define PyArray_OBJECTLTR NPY_OBJECTLTR -#define PyArray_STRINGLTR NPY_STRINGLTR -#define PyArray_STRINGLTR2 NPY_STRINGLTR2 -#define PyArray_UNICODELTR NPY_UNICODELTR -#define PyArray_VOIDLTR NPY_VOIDLTR -#define PyArray_CHARLTR NPY_CHARLTR -#define PyArray_INTPLTR NPY_INTPLTR -#define PyArray_UINTPLTR NPY_UINTPLTR -#define PyArray_GENBOOLLTR NPY_GENBOOLLTR -#define PyArray_SIGNEDLTR NPY_SIGNEDLTR -#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR -#define PyArray_FLOATINGLTR NPY_FLOATINGLTR -#define PyArray_COMPLEXLTR NPY_COMPLEXLTR - -#define PyArray_QUICKSORT NPY_QUICKSORT -#define PyArray_HEAPSORT NPY_HEAPSORT -#define PyArray_MERGESORT NPY_MERGESORT -#define PyArray_SORTKIND NPY_SORTKIND -#define PyArray_NSORTS NPY_NSORTS - -#define PyArray_NOSCALAR NPY_NOSCALAR -#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR -#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR -#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR -#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR -#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR -#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR -#define PyArray_SCALARKIND NPY_SCALARKIND -#define PyArray_NSCALARKINDS NPY_NSCALARKINDS - -#define PyArray_ANYORDER NPY_ANYORDER -#define PyArray_CORDER NPY_CORDER -#define PyArray_FORTRANORDER NPY_FORTRANORDER -#define PyArray_ORDER NPY_ORDER - -#define PyDescr_ISBOOL PyDataType_ISBOOL -#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED -#define PyDescr_ISSIGNED PyDataType_ISSIGNED -#define PyDescr_ISINTEGER PyDataType_ISINTEGER -#define PyDescr_ISFLOAT PyDataType_ISFLOAT -#define PyDescr_ISNUMBER PyDataType_ISNUMBER -#define PyDescr_ISSTRING PyDataType_ISSTRING -#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX -#define PyDescr_ISPYTHON PyDataType_ISPYTHON -#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE -#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF -#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED -#define PyDescr_ISOBJECT PyDataType_ISOBJECT -#define PyDescr_HASFIELDS PyDataType_HASFIELDS - -#define PyArray_LITTLE NPY_LITTLE -#define PyArray_BIG NPY_BIG -#define PyArray_NATIVE NPY_NATIVE -#define PyArray_SWAP NPY_SWAP -#define PyArray_IGNORE NPY_IGNORE - -#define PyArray_NATBYTE NPY_NATBYTE -#define PyArray_OPPBYTE NPY_OPPBYTE - -#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE - -#define PyArray_USE_PYMEM NPY_USE_PYMEM - -#define PyArray_RemoveLargest PyArray_RemoveSmallest diff --git a/numpy/core/include/numpy/oldnumeric.h b/numpy/core/include/numpy/oldnumeric.h deleted file mode 100644 index 51dba29cd..000000000 --- a/numpy/core/include/numpy/oldnumeric.h +++ /dev/null @@ -1,23 +0,0 @@ -#include "arrayobject.h" - -#ifndef REFCOUNT -# define REFCOUNT NPY_REFCOUNT -# define MAX_ELSIZE 16 -#endif - -#define PyArray_UNSIGNED_TYPES -#define PyArray_SBYTE PyArray_BYTE -#define PyArray_CopyArray PyArray_CopyInto -#define _PyArray_multiply_list PyArray_MultiplyIntList -#define PyArray_ISSPACESAVER(m) NPY_FALSE -#define PyScalarArray_Check PyArray_CheckScalar - -#define CONTIGUOUS NPY_CONTIGUOUS -#define OWN_DIMENSIONS 0 -#define OWN_STRIDES 0 -#define OWN_DATA NPY_OWNDATA -#define SAVESPACE 0 -#define SAVESPACEBIT 0 - -#undef import_array -#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } } diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h deleted file mode 100644 index cf868446b..000000000 --- a/numpy/core/include/numpy/ufuncobject.h +++ /dev/null @@ -1,379 +0,0 @@ -#ifndef Py_UFUNCOBJECT_H -#define Py_UFUNCOBJECT_H -#ifdef __cplusplus -extern "C" { -#endif - -typedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *); - -typedef struct { - PyObject_HEAD - int nin, nout, nargs; - int identity; - PyUFuncGenericFunction *functions; - void **data; - int ntypes; - int check_return; - char *name, *types; - char *doc; - void *ptr; - PyObject *obj; - PyObject *userloops; -} PyUFuncObject; - -#include "arrayobject.h" - -#define UFUNC_ERR_IGNORE 0 -#define UFUNC_ERR_WARN 1 -#define UFUNC_ERR_RAISE 2 -#define UFUNC_ERR_CALL 3 -#define UFUNC_ERR_PRINT 4 -#define UFUNC_ERR_LOG 5 - - /* Python side integer mask */ - -#define UFUNC_MASK_DIVIDEBYZERO 0x07 -#define UFUNC_MASK_OVERFLOW 0x3f -#define UFUNC_MASK_UNDERFLOW 0x1ff -#define UFUNC_MASK_INVALID 0xfff - -#define UFUNC_SHIFT_DIVIDEBYZERO 0 -#define UFUNC_SHIFT_OVERFLOW 3 -#define UFUNC_SHIFT_UNDERFLOW 6 -#define UFUNC_SHIFT_INVALID 9 - - -/* platform-dependent code translates floating point - status to an integer sum of these values -*/ -#define UFUNC_FPE_DIVIDEBYZERO 1 -#define UFUNC_FPE_OVERFLOW 2 -#define UFUNC_FPE_UNDERFLOW 4 -#define UFUNC_FPE_INVALID 8 - -#define UFUNC_ERR_DEFAULT 0 /* Error mode that avoids look-up (no checking) */ - - /* Default user error mode */ -#define UFUNC_ERR_DEFAULT2 \ - (UFUNC_ERR_PRINT << UFUNC_SHIFT_DIVIDEBYZERO) + \ - (UFUNC_ERR_PRINT << UFUNC_SHIFT_OVERFLOW) + \ - (UFUNC_ERR_PRINT << UFUNC_SHIFT_INVALID) - - /* Only internal -- not exported, yet*/ -typedef struct { - /* Multi-iterator portion --- needs to be present in this order - to work with PyArray_Broadcast */ - PyObject_HEAD - int numiter; - npy_intp size; - npy_intp index; - int nd; - npy_intp dimensions[NPY_MAXDIMS]; - PyArrayIterObject *iters[NPY_MAXARGS]; - /* End of Multi-iterator portion */ - - /* The ufunc */ - PyUFuncObject *ufunc; - - /* The error handling */ - int errormask; /* Integer showing desired error handling */ - PyObject *errobj; /* currently a tuple with - (string, func or obj with write method or None) - */ - int first; - - /* Specific function and data to use */ - PyUFuncGenericFunction function; - void *funcdata; - - /* Loop method */ - int meth; - - /* Whether we need to copy to a buffer or not.*/ - int needbuffer[NPY_MAXARGS]; - int leftover; - int ninnerloops; - int lastdim; - - /* Whether or not to swap */ - int swap[NPY_MAXARGS]; - - /* Buffers for the loop */ - char *buffer[NPY_MAXARGS]; - int bufsize; - npy_intp bufcnt; - char *dptr[NPY_MAXARGS]; - - /* For casting */ - char *castbuf[NPY_MAXARGS]; - PyArray_VectorUnaryFunc *cast[NPY_MAXARGS]; - - /* usually points to buffer but when a cast is to be - done it switches for that argument to castbuf. - */ - char *bufptr[NPY_MAXARGS]; - - /* Steps filled in from iters or sizeof(item) - depending on loop method. - */ - npy_intp steps[NPY_MAXARGS]; - - int obj; /* This loop uses object arrays */ - int notimplemented; /* The loop caused notimplemented */ - int objfunc; /* This loop calls object functions - (an inner-loop function with argument types */ -} PyUFuncLoopObject; - -/* Could make this more clever someday */ -#define UFUNC_MAXIDENTITY 32 - -typedef struct { - PyObject_HEAD - PyArrayIterObject *it; - PyArrayObject *ret; - PyArrayIterObject *rit; /* Needed for Accumulate */ - int outsize; - npy_intp index; - npy_intp size; - char idptr[UFUNC_MAXIDENTITY]; - - /* The ufunc */ - PyUFuncObject *ufunc; - - /* The error handling */ - int errormask; - PyObject *errobj; - int first; - - PyUFuncGenericFunction function; - void *funcdata; - int meth; - int swap; - - char *buffer; - int bufsize; - - char *castbuf; - PyArray_VectorUnaryFunc *cast; - - char *bufptr[3]; - npy_intp steps[3]; - - npy_intp N; - int instrides; - int insize; - char *inptr; - - /* For copying small arrays */ - PyObject *decref; - - int obj; - int retbase; - -} PyUFuncReduceObject; - - -#if NPY_ALLOW_THREADS -#define NPY_LOOP_BEGIN_THREADS if (!(loop->obj)) {_save = PyEval_SaveThread();} -#define NPY_LOOP_END_THREADS if (!(loop->obj)) {PyEval_RestoreThread(_save);} -#else -#define NPY_LOOP_BEGIN_THREADS -#define NPY_LOOP_END_THREADS -#endif - -#define PyUFunc_One 1 -#define PyUFunc_Zero 0 -#define PyUFunc_None -1 - -#define UFUNC_REDUCE 0 -#define UFUNC_ACCUMULATE 1 -#define UFUNC_REDUCEAT 2 -#define UFUNC_OUTER 3 - - -typedef struct { - int nin; - int nout; - PyObject *callable; -} PyUFunc_PyFuncData; - -/* A linked-list of function information for - user-defined 1-d loops. - */ -typedef struct _loop1d_info { - PyUFuncGenericFunction func; - void *data; - int *arg_types; - struct _loop1d_info *next; -} PyUFunc_Loop1d; - - -#include "__ufunc_api.h" - -#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" - -#define UFUNC_CHECK_ERROR(arg) \ - if (((arg)->obj && PyErr_Occurred()) || \ - ((arg)->errormask && \ - PyUFunc_checkfperr((arg)->errormask, \ - (arg)->errobj, \ - &(arg)->first))) \ - goto fail - -/* This code checks the IEEE status flags in a platform-dependent way */ -/* Adapted from Numarray */ - -#if (defined(__unix__) || defined(unix)) && !defined(USG) -#include -#endif - -/* OSF/Alpha (Tru64) ---------------------------------------------*/ -#if defined(__osf__) && defined(__alpha) - -#include - -#define UFUNC_CHECK_STATUS(ret) { \ - unsigned long fpstatus; \ - \ - fpstatus = ieee_get_fp_control(); \ - /* clear status bits as well as disable exception mode if on */ \ - ieee_set_fp_control( 0 ); \ - ret = ((IEEE_STATUS_DZE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((IEEE_STATUS_OVF & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((IEEE_STATUS_UNF & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((IEEE_STATUS_INV & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - } - -/* MS Windows -----------------------------------------------------*/ -#elif defined(_MSC_VER) - -#include - - /* Clear the floating point exception default of Borland C++ */ -#if defined(__BORLANDC__) -#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); -#endif - -#define UFUNC_CHECK_STATUS(ret) { \ - int fpstatus = (int) _clearfp(); \ - \ - ret = ((SW_ZERODIVIDE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((SW_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((SW_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((SW_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - } - -#define isnan(x) (_isnan((double)(x))) -#define isinf(x) ((_fpclass((double)(x)) == _FPCLASS_PINF) || \ - (_fpclass((double)(x)) == _FPCLASS_NINF)) -#define isfinite(x) (_finite((double) x)) - -/* Solaris --------------------------------------------------------*/ -/* --------ignoring SunOS ieee_flags approach, someone else can -** deal with that! */ -#elif defined(sun) || defined(__BSD__) || defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD_version < 502114)) || defined(__NetBSD__) -#include - -#define UFUNC_CHECK_STATUS(ret) { \ - int fpstatus; \ - \ - fpstatus = (int) fpgetsticky(); \ - ret = ((FP_X_DZ & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((FP_X_OFL & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((FP_X_UFL & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((FP_X_INV & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - (void) fpsetsticky(0); \ - } - -#elif defined(__GLIBC__) || defined(__APPLE__) || defined(__CYGWIN__) || defined(__MINGW32__) || (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) - -#if defined(__GLIBC__) || defined(__APPLE__) || defined(__MINGW32__) || defined(__FreeBSD__) -#include -#elif defined(__CYGWIN__) -#include "fenv/fenv.c" -#endif - -#define UFUNC_CHECK_STATUS(ret) { \ - int fpstatus = (int) fetestexcept(FE_DIVBYZERO | FE_OVERFLOW | \ - FE_UNDERFLOW | FE_INVALID); \ - ret = ((FE_DIVBYZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((FE_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((FE_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((FE_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - (void) feclearexcept(FE_DIVBYZERO | FE_OVERFLOW | \ - FE_UNDERFLOW | FE_INVALID); \ -} - -#define generate_divbyzero_error() feraiseexcept(FE_DIVBYZERO) -#define generate_overflow_error() feraiseexcept(FE_OVERFLOW) - -#elif defined(_AIX) - -#include -#include - -#define UFUNC_CHECK_STATUS(ret) { \ - fpflag_t fpstatus; \ - \ - fpstatus = fp_read_flag(); \ - ret = ((FP_DIV_BY_ZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((FP_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((FP_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((FP_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - fp_swap_flag(0); \ -} - -#define generate_divbyzero_error() fp_raise_xcp(FP_DIV_BY_ZERO) -#define generate_overflow_error() fp_raise_xcp(FP_OVERFLOW) - -#else - -#define NO_FLOATING_POINT_SUPPORT -#define UFUNC_CHECK_STATUS(ret) { \ - ret = 0; \ - } - -#endif - -/* These should really be altered to just set the corresponding bit - in the floating point status flag. Need to figure out how to do that - on all the platforms... -*/ - -#if !defined(generate_divbyzero_error) -static int numeric_zero2 = 0; -static void generate_divbyzero_error(void) { - double dummy; - dummy = 1./numeric_zero2; - if (dummy) /* to prevent optimizer from eliminating expression */ - return; - else /* should never be called */ - numeric_zero2 += 1; - return; -} -#endif - -#if !defined(generate_overflow_error) -static double numeric_two = 2.0; -static void generate_overflow_error(void) { - double dummy; - dummy = pow(numeric_two,1000); - if (dummy) - return; - else - numeric_two += 0.1; - return; - return; -} -#endif - - /* Make sure it gets defined if it isn't already */ -#ifndef UFUNC_NOFPE -#define UFUNC_NOFPE -#endif - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_UFUNCOBJECT_H */ diff --git a/numpy/core/info.py b/numpy/core/info.py deleted file mode 100644 index 561e171b0..000000000 --- a/numpy/core/info.py +++ /dev/null @@ -1,86 +0,0 @@ -__doc__ = """Defines a multi-dimensional array and useful procedures for Numerical computation. - -Functions - -- array - NumPy Array construction -- zeros - Return an array of all zeros -- empty - Return an unitialized array -- shape - Return shape of sequence or array -- rank - Return number of dimensions -- size - Return number of elements in entire array or a - certain dimension -- fromstring - Construct array from (byte) string -- take - Select sub-arrays using sequence of indices -- put - Set sub-arrays using sequence of 1-D indices -- putmask - Set portion of arrays using a mask -- reshape - Return array with new shape -- repeat - Repeat elements of array -- choose - Construct new array from indexed array tuple -- correlate - Correlate two 1-d arrays -- searchsorted - Search for element in 1-d array -- sum - Total sum over a specified dimension -- average - Average, possibly weighted, over axis or array. -- cumsum - Cumulative sum over a specified dimension -- product - Total product over a specified dimension -- cumproduct - Cumulative product over a specified dimension -- alltrue - Logical and over an entire axis -- sometrue - Logical or over an entire axis -- allclose - Tests if sequences are essentially equal - -More Functions: - -- arange - Return regularly spaced array -- asarray - Guarantee NumPy array -- convolve - Convolve two 1-d arrays -- swapaxes - Exchange axes -- concatenate - Join arrays together -- transpose - Permute axes -- sort - Sort elements of array -- argsort - Indices of sorted array -- argmax - Index of largest value -- argmin - Index of smallest value -- inner - Innerproduct of two arrays -- dot - Dot product (matrix multiplication) -- outer - Outerproduct of two arrays -- resize - Return array with arbitrary new shape -- indices - Tuple of indices -- fromfunction - Construct array from universal function -- diagonal - Return diagonal array -- trace - Trace of array -- dump - Dump array to file object (pickle) -- dumps - Return pickled string representing data -- load - Return array stored in file object -- loads - Return array from pickled string -- ravel - Return array as 1-D -- nonzero - Indices of nonzero elements for 1-D array -- shape - Shape of array -- where - Construct array from binary result -- compress - Elements of array where condition is true -- clip - Clip array between two values -- ones - Array of all ones -- identity - 2-D identity array (matrix) - -(Universal) Math Functions - - add logical_or exp - subtract logical_xor log - multiply logical_not log10 - divide maximum sin - divide_safe minimum sinh - conjugate bitwise_and sqrt - power bitwise_or tan - absolute bitwise_xor tanh - negative invert ceil - greater left_shift fabs - greater_equal right_shift floor - less arccos arctan2 - less_equal arcsin fmod - equal arctan hypot - not_equal cos around - logical_and cosh sign - arccosh arcsinh arctanh - -""" - -depends = ['testing'] -global_symbols = ['*'] diff --git a/numpy/core/ma.py b/numpy/core/ma.py deleted file mode 100644 index 79941b7f8..000000000 --- a/numpy/core/ma.py +++ /dev/null @@ -1,2255 +0,0 @@ -"""MA: a facility for dealing with missing observations -MA is generally used as a numpy.array look-alike. -by Paul F. Dubois. - -Copyright 1999, 2000, 2001 Regents of the University of California. -Released for unlimited redistribution. -Adapted for numpy_core 2005 by Travis Oliphant and -(mainly) Paul Dubois. -""" -import types, sys - -import umath -import fromnumeric -from numeric import newaxis, ndarray, inf -from fromnumeric import amax, amin -from numerictypes import bool_, typecodes -import numeric -import warnings - -# Ufunc domain lookup for __array_wrap__ -ufunc_domain = {} -# Ufunc fills lookup for __array__ -ufunc_fills = {} - -MaskType = bool_ -nomask = MaskType(0) -divide_tolerance = 1.e-35 - -class MAError (Exception): - def __init__ (self, args=None): - "Create an exception" - - # The .args attribute must be a tuple. - if not isinstance(args, tuple): - args = (args,) - self.args = args - def __str__(self): - "Calculate the string representation" - return str(self.args[0]) - __repr__ = __str__ - -class _MaskedPrintOption: - "One instance of this class, masked_print_option, is created." - def __init__ (self, display): - "Create the masked print option object." - self.set_display(display) - self._enabled = 1 - - def display (self): - "Show what prints for masked values." - return self._display - - def set_display (self, s): - "set_display(s) sets what prints for masked values." - self._display = s - - def enabled (self): - "Is the use of the display value enabled?" - return self._enabled - - def enable(self, flag=1): - "Set the enabling flag to flag." - self._enabled = flag - - def __str__ (self): - return str(self._display) - - __repr__ = __str__ - -#if you single index into a masked location you get this object. -masked_print_option = _MaskedPrintOption('--') - -# Use single element arrays or scalars. -default_real_fill_value = 1.e20 -default_complex_fill_value = 1.e20 + 0.0j -default_character_fill_value = '-' -default_integer_fill_value = 999999 -default_object_fill_value = '?' - -def default_fill_value (obj): - "Function to calculate default fill value for an object." - if isinstance(obj, types.FloatType): - return default_real_fill_value - elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType): - return default_integer_fill_value - elif isinstance(obj, types.StringType): - return default_character_fill_value - elif isinstance(obj, types.ComplexType): - return default_complex_fill_value - elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray): - x = obj.dtype.char - if x in typecodes['Float']: - return default_real_fill_value - if x in typecodes['Integer']: - return default_integer_fill_value - if x in typecodes['Complex']: - return default_complex_fill_value - if x in typecodes['Character']: - return default_character_fill_value - if x in typecodes['UnsignedInteger']: - return umath.absolute(default_integer_fill_value) - return default_object_fill_value - else: - return default_object_fill_value - -def minimum_fill_value (obj): - "Function to calculate default fill value suitable for taking minima." - if isinstance(obj, types.FloatType): - return numeric.inf - elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType): - return sys.maxint - elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray): - x = obj.dtype.char - if x in typecodes['Float']: - return numeric.inf - if x in typecodes['Integer']: - return sys.maxint - if x in typecodes['UnsignedInteger']: - return sys.maxint - else: - raise TypeError, 'Unsuitable type for calculating minimum.' - -def maximum_fill_value (obj): - "Function to calculate default fill value suitable for taking maxima." - if isinstance(obj, types.FloatType): - return -inf - elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType): - return -sys.maxint - elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray): - x = obj.dtype.char - if x in typecodes['Float']: - return -inf - if x in typecodes['Integer']: - return -sys.maxint - if x in typecodes['UnsignedInteger']: - return 0 - else: - raise TypeError, 'Unsuitable type for calculating maximum.' - -def set_fill_value (a, fill_value): - "Set fill value of a if it is a masked array." - if isMaskedArray(a): - a.set_fill_value (fill_value) - -def getmask (a): - """Mask of values in a; could be nomask. - Returns nomask if a is not a masked array. - To get an array for sure use getmaskarray.""" - if isinstance(a, MaskedArray): - return a.raw_mask() - else: - return nomask - -def getmaskarray (a): - """Mask of values in a; an array of zeros if mask is nomask - or not a masked array, and is a byte-sized integer. - Do not try to add up entries, for example. - """ - m = getmask(a) - if m is nomask: - return make_mask_none(shape(a)) - else: - return m - -def is_mask (m): - """Is m a legal mask? Does not check contents, only type. - """ - try: - return m.dtype.type is MaskType - except AttributeError: - return False - -def make_mask (m, copy=0, flag=0): - """make_mask(m, copy=0, flag=0) - return m as a mask, creating a copy if necessary or requested. - Can accept any sequence of integers or nomask. Does not check - that contents must be 0s and 1s. - if flag, return nomask if m contains no true elements. - """ - if m is nomask: - return nomask - elif isinstance(m, ndarray): - if m.dtype.type is MaskType: - if copy: - result = numeric.array(m, dtype=MaskType, copy=copy) - else: - result = m - else: - result = m.astype(MaskType) - else: - result = filled(m, True).astype(MaskType) - - if flag and not fromnumeric.sometrue(fromnumeric.ravel(result)): - return nomask - else: - return result - -def make_mask_none (s): - "Return a mask of all zeros of shape s." - result = numeric.zeros(s, dtype=MaskType) - result.shape = s - return result - -def mask_or (m1, m2): - """Logical or of the mask candidates m1 and m2, treating nomask as false. - Result may equal m1 or m2 if the other is nomask. - """ - if m1 is nomask: return make_mask(m2) - if m2 is nomask: return make_mask(m1) - if m1 is m2 and is_mask(m1): return m1 - return make_mask(umath.logical_or(m1, m2)) - -def filled (a, value = None): - """a as a contiguous numeric array with any masked areas replaced by value - if value is None or the special element "masked", get_fill_value(a) - is used instead. - - If a is already a contiguous numeric array, a itself is returned. - - filled(a) can be used to be sure that the result is numeric when - passing an object a to other software ignorant of MA, in particular to - numeric itself. - """ - if isinstance(a, MaskedArray): - return a.filled(value) - elif isinstance(a, ndarray) and a.flags['CONTIGUOUS']: - return a - elif isinstance(a, types.DictType): - return numeric.array(a, 'O') - else: - return numeric.array(a) - -def get_fill_value (a): - """ - The fill value of a, if it has one; otherwise, the default fill value - for that type. - """ - if isMaskedArray(a): - result = a.fill_value() - else: - result = default_fill_value(a) - return result - -def common_fill_value (a, b): - "The common fill_value of a and b, if there is one, or None" - t1 = get_fill_value(a) - t2 = get_fill_value(b) - if t1 == t2: return t1 - return None - -# Domain functions return 1 where the argument(s) are not in the domain. -class domain_check_interval: - "domain_check_interval(a,b)(x) = true where x < a or y > b" - def __init__(self, y1, y2): - "domain_check_interval(a,b)(x) = true where x < a or y > b" - self.y1 = y1 - self.y2 = y2 - - def __call__ (self, x): - "Execute the call behavior." - return umath.logical_or(umath.greater (x, self.y2), - umath.less(x, self.y1) - ) - -class domain_tan: - "domain_tan(eps) = true where abs(cos(x)) < eps)" - def __init__(self, eps): - "domain_tan(eps) = true where abs(cos(x)) < eps)" - self.eps = eps - - def __call__ (self, x): - "Execute the call behavior." - return umath.less(umath.absolute(umath.cos(x)), self.eps) - -class domain_greater: - "domain_greater(v)(x) = true where x <= v" - def __init__(self, critical_value): - "domain_greater(v)(x) = true where x <= v" - self.critical_value = critical_value - - def __call__ (self, x): - "Execute the call behavior." - return umath.less_equal (x, self.critical_value) - -class domain_greater_equal: - "domain_greater_equal(v)(x) = true where x < v" - def __init__(self, critical_value): - "domain_greater_equal(v)(x) = true where x < v" - self.critical_value = critical_value - - def __call__ (self, x): - "Execute the call behavior." - return umath.less (x, self.critical_value) - -class masked_unary_operation: - def __init__ (self, aufunc, fill=0, domain=None): - """ masked_unary_operation(aufunc, fill=0, domain=None) - aufunc(fill) must be defined - self(x) returns aufunc(x) - with masked values where domain(x) is true or getmask(x) is true. - """ - self.f = aufunc - self.fill = fill - self.domain = domain - self.__doc__ = getattr(aufunc, "__doc__", str(aufunc)) - self.__name__ = getattr(aufunc, "__name__", str(aufunc)) - ufunc_domain[aufunc] = domain - ufunc_fills[aufunc] = fill, - - def __call__ (self, a, *args, **kwargs): - "Execute the call behavior." -# numeric tries to return scalars rather than arrays when given scalars. - m = getmask(a) - d1 = filled(a, self.fill) - if self.domain is not None: - m = mask_or(m, self.domain(d1)) - result = self.f(d1, *args, **kwargs) - return masked_array(result, m) - - def __str__ (self): - return "Masked version of " + str(self.f) - - -class domain_safe_divide: - def __init__ (self, tolerance=divide_tolerance): - self.tolerance = tolerance - def __call__ (self, a, b): - return umath.absolute(a) * self.tolerance >= umath.absolute(b) - -class domained_binary_operation: - """Binary operations that have a domain, like divide. These are complicated - so they are a separate class. They have no reduce, outer or accumulate. - """ - def __init__ (self, abfunc, domain, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - self.f = abfunc - self.domain = domain - self.fillx = fillx - self.filly = filly - self.__doc__ = getattr(abfunc, "__doc__", str(abfunc)) - self.__name__ = getattr(abfunc, "__name__", str(abfunc)) - ufunc_domain[abfunc] = domain - ufunc_fills[abfunc] = fillx, filly - - def __call__(self, a, b): - "Execute the call behavior." - ma = getmask(a) - mb = getmask(b) - d1 = filled(a, self.fillx) - d2 = filled(b, self.filly) - t = self.domain(d1, d2) - - if fromnumeric.sometrue(t, None): - d2 = where(t, self.filly, d2) - mb = mask_or(mb, t) - m = mask_or(ma, mb) - result = self.f(d1, d2) - return masked_array(result, m) - - def __str__ (self): - return "Masked version of " + str(self.f) - -class masked_binary_operation: - def __init__ (self, abfunc, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - self.f = abfunc - self.fillx = fillx - self.filly = filly - self.__doc__ = getattr(abfunc, "__doc__", str(abfunc)) - ufunc_domain[abfunc] = None - ufunc_fills[abfunc] = fillx, filly - - def __call__ (self, a, b, *args, **kwargs): - "Execute the call behavior." - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a, self.fillx) - d2 = filled(b, self.filly) - result = self.f(d1, d2, *args, **kwargs) - if isinstance(result, ndarray) \ - and m.ndim != 0 \ - and m.shape != result.shape: - m = mask_or(getmaskarray(a), getmaskarray(b)) - return masked_array(result, m) - - def reduce (self, target, axis=0, dtype=None): - """Reduce target along the given axis with this function.""" - m = getmask(target) - t = filled(target, self.filly) - if t.shape == (): - t = t.reshape(1) - if m is not nomask: - m = make_mask(m, copy=1) - m.shape = (1,) - if m is nomask: - t = self.f.reduce(t, axis) - else: - t = masked_array (t, m) - # XXX: "or t.dtype" below is a workaround for what appears - # XXX: to be a bug in reduce. - t = self.f.reduce(filled(t, self.filly), axis, - dtype=dtype or t.dtype) - m = umath.logical_and.reduce(m, axis) - if isinstance(t, ndarray): - return masked_array(t, m, get_fill_value(target)) - elif m: - return masked - else: - return t - - def outer (self, a, b): - "Return the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = logical_or.outer(ma, mb) - d = self.f.outer(filled(a, self.fillx), filled(b, self.filly)) - return masked_array(d, m) - - def accumulate (self, target, axis=0): - """Accumulate target along axis after filling with y fill value.""" - t = filled(target, self.filly) - return masked_array (self.f.accumulate (t, axis)) - def __str__ (self): - return "Masked version of " + str(self.f) - -sqrt = masked_unary_operation(umath.sqrt, 0.0, domain_greater_equal(0.0)) -log = masked_unary_operation(umath.log, 1.0, domain_greater(0.0)) -log10 = masked_unary_operation(umath.log10, 1.0, domain_greater(0.0)) -exp = masked_unary_operation(umath.exp) -conjugate = masked_unary_operation(umath.conjugate) -sin = masked_unary_operation(umath.sin) -cos = masked_unary_operation(umath.cos) -tan = masked_unary_operation(umath.tan, 0.0, domain_tan(1.e-35)) -arcsin = masked_unary_operation(umath.arcsin, 0.0, domain_check_interval(-1.0, 1.0)) -arccos = masked_unary_operation(umath.arccos, 0.0, domain_check_interval(-1.0, 1.0)) -arctan = masked_unary_operation(umath.arctan) -# Missing from numeric -arcsinh = masked_unary_operation(umath.arcsinh) -arccosh = masked_unary_operation(umath.arccosh, 1.0, domain_greater_equal(1.0)) -arctanh = masked_unary_operation(umath.arctanh, 0.0, domain_check_interval(-1.0+1e-15, 1.0-1e-15)) -sinh = masked_unary_operation(umath.sinh) -cosh = masked_unary_operation(umath.cosh) -tanh = masked_unary_operation(umath.tanh) -absolute = masked_unary_operation(umath.absolute) -fabs = masked_unary_operation(umath.fabs) -negative = masked_unary_operation(umath.negative) - -def nonzero(a): - """returns the indices of the elements of a which are not zero - and not masked - """ - return numeric.asarray(filled(a, 0).nonzero()) - -around = masked_unary_operation(fromnumeric.round_) -floor = masked_unary_operation(umath.floor) -ceil = masked_unary_operation(umath.ceil) -logical_not = masked_unary_operation(umath.logical_not) - -add = masked_binary_operation(umath.add) -subtract = masked_binary_operation(umath.subtract) -subtract.reduce = None -multiply = masked_binary_operation(umath.multiply, 1, 1) -divide = domained_binary_operation(umath.divide, domain_safe_divide(), 0, 1) -true_divide = domained_binary_operation(umath.true_divide, domain_safe_divide(), 0, 1) -floor_divide = domained_binary_operation(umath.floor_divide, domain_safe_divide(), 0, 1) -remainder = domained_binary_operation(umath.remainder, domain_safe_divide(), 0, 1) -fmod = domained_binary_operation(umath.fmod, domain_safe_divide(), 0, 1) -hypot = masked_binary_operation(umath.hypot) -arctan2 = masked_binary_operation(umath.arctan2, 0.0, 1.0) -arctan2.reduce = None -equal = masked_binary_operation(umath.equal) -equal.reduce = None -not_equal = masked_binary_operation(umath.not_equal) -not_equal.reduce = None -less_equal = masked_binary_operation(umath.less_equal) -less_equal.reduce = None -greater_equal = masked_binary_operation(umath.greater_equal) -greater_equal.reduce = None -less = masked_binary_operation(umath.less) -less.reduce = None -greater = masked_binary_operation(umath.greater) -greater.reduce = None -logical_and = masked_binary_operation(umath.logical_and) -alltrue = masked_binary_operation(umath.logical_and, 1, 1).reduce -logical_or = masked_binary_operation(umath.logical_or) -sometrue = logical_or.reduce -logical_xor = masked_binary_operation(umath.logical_xor) -bitwise_and = masked_binary_operation(umath.bitwise_and) -bitwise_or = masked_binary_operation(umath.bitwise_or) -bitwise_xor = masked_binary_operation(umath.bitwise_xor) - -def rank (object): - return fromnumeric.rank(filled(object)) - -def shape (object): - return fromnumeric.shape(filled(object)) - -def size (object, axis=None): - return fromnumeric.size(filled(object), axis) - -class MaskedArray (object): - """Arrays with possibly masked values. - Masked values of 1 exclude the corresponding element from - any computation. - - Construction: - x = array(data, dtype=None, copy=True, order=False, - mask = nomask, fill_value=None) - - If copy=False, every effort is made not to copy the data: - If data is a MaskedArray, and argument mask=nomask, - then the candidate data is data.data and the - mask used is data.mask. If data is a numeric array, - it is used as the candidate raw data. - If dtype is not None and - is != data.dtype.char then a data copy is required. - Otherwise, the candidate is used. - - If a data copy is required, raw data stored is the result of: - numeric.array(data, dtype=dtype.char, copy=copy) - - If mask is nomask there are no masked values. Otherwise mask must - be convertible to an array of booleans with the same shape as x. - - fill_value is used to fill in masked values when necessary, - such as when printing and in method/function filled(). - The fill_value is not used for computation within this module. - """ - __array_priority__ = 10.1 - def __init__(self, data, dtype=None, copy=True, order=False, - mask=nomask, fill_value=None): - """array(data, dtype=None, copy=True, order=False, mask=nomask, fill_value=None) - If data already a numeric array, its dtype becomes the default value of dtype. - """ - if dtype is None: - tc = None - else: - tc = numeric.dtype(dtype) - need_data_copied = copy - if isinstance(data, MaskedArray): - c = data.data - if tc is None: - tc = c.dtype - elif tc != c.dtype: - need_data_copied = True - if mask is nomask: - mask = data.mask - elif mask is not nomask: #attempting to change the mask - need_data_copied = True - - elif isinstance(data, ndarray): - c = data - if tc is None: - tc = c.dtype - elif tc != c.dtype: - need_data_copied = True - else: - need_data_copied = False #because I'll do it now - c = numeric.array(data, dtype=tc, copy=True, order=order) - tc = c.dtype - - if need_data_copied: - if tc == c.dtype: - self._data = numeric.array(c, dtype=tc, copy=True, order=order) - else: - self._data = c.astype(tc) - else: - self._data = c - - if mask is nomask: - self._mask = nomask - self._shared_mask = 0 - else: - self._mask = make_mask (mask) - if self._mask is nomask: - self._shared_mask = 0 - else: - self._shared_mask = (self._mask is mask) - nm = size(self._mask) - nd = size(self._data) - if nm != nd: - if nm == 1: - self._mask = fromnumeric.resize(self._mask, self._data.shape) - self._shared_mask = 0 - elif nd == 1: - self._data = fromnumeric.resize(self._data, self._mask.shape) - self._data.shape = self._mask.shape - else: - raise MAError, "Mask and data not compatible." - elif nm == 1 and shape(self._mask) != shape(self._data): - self.unshare_mask() - self._mask.shape = self._data.shape - - self.set_fill_value(fill_value) - - def __array__ (self, t=None, context=None): - "Special hook for numeric. Converts to numeric if possible." - if self._mask is not nomask: - if fromnumeric.ravel(self._mask).any(): - if context is None: - warnings.warn("Cannot automatically convert masked array to "\ - "numeric because data\n is masked in one or "\ - "more locations."); - return self._data - #raise MAError, \ - # """Cannot automatically convert masked array to numeric because data - # is masked in one or more locations. - # """ - else: - func, args, i = context - fills = ufunc_fills.get(func) - if fills is None: - raise MAError, "%s not known to ma" % func - return self.filled(fills[i]) - else: # Mask is all false - # Optimize to avoid future invocations of this section. - self._mask = nomask - self._shared_mask = 0 - if t: - return self._data.astype(t) - else: - return self._data - - def __array_wrap__ (self, array, context=None): - """Special hook for ufuncs. - - Wraps the numpy array and sets the mask according to - context. - """ - if context is None: - return MaskedArray(array, copy=False, mask=nomask) - func, args = context[:2] - domain = ufunc_domain[func] - m = reduce(mask_or, [getmask(a) for a in args]) - if domain is not None: - m = mask_or(m, domain(*[getattr(a, '_data', a) - for a in args])) - if m is not nomask: - try: - shape = array.shape - except AttributeError: - pass - else: - if m.shape != shape: - m = reduce(mask_or, [getmaskarray(a) for a in args]) - - return MaskedArray(array, copy=False, mask=m) - - def _get_shape(self): - "Return the current shape." - return self._data.shape - - def _set_shape (self, newshape): - "Set the array's shape." - self._data.shape = newshape - if self._mask is not nomask: - self._mask = self._mask.copy() - self._mask.shape = newshape - - def _get_flat(self): - """Calculate the flat value. - """ - if self._mask is nomask: - return masked_array(self._data.ravel(), mask=nomask, - fill_value = self.fill_value()) - else: - return masked_array(self._data.ravel(), - mask=self._mask.ravel(), - fill_value = self.fill_value()) - - def _set_flat (self, value): - "x.flat = value" - y = self.ravel() - y[:] = value - - def _get_real(self): - "Get the real part of a complex array." - if self._mask is nomask: - return masked_array(self._data.real, mask=nomask, - fill_value = self.fill_value()) - else: - return masked_array(self._data.real, mask=self._mask, - fill_value = self.fill_value()) - - def _set_real (self, value): - "x.real = value" - y = self.real - y[...] = value - - def _get_imaginary(self): - "Get the imaginary part of a complex array." - if self._mask is nomask: - return masked_array(self._data.imag, mask=nomask, - fill_value = self.fill_value()) - else: - return masked_array(self._data.imag, mask=self._mask, - fill_value = self.fill_value()) - - def _set_imaginary (self, value): - "x.imaginary = value" - y = self.imaginary - y[...] = value - - def __str__(self): - """Calculate the str representation, using masked for fill if - it is enabled. Otherwise fill with fill value. - """ - if masked_print_option.enabled(): - f = masked_print_option - # XXX: Without the following special case masked - # XXX: would print as "[--]", not "--". Can we avoid - # XXX: checks for masked by choosing a different value - # XXX: for the masked singleton? 2005-01-05 -- sasha - if self is masked: - return str(f) - m = self._mask - if m is not nomask and m.shape == () and m: - return str(f) - # convert to object array to make filled work - self = self.astype(object) - else: - f = self.fill_value() - res = self.filled(f) - return str(res) - - def __repr__(self): - """Calculate the repr representation, using masked for fill if - it is enabled. Otherwise fill with fill value. - """ - with_mask = """\ -array(data = - %(data)s, - mask = - %(mask)s, - fill_value=%(fill)s) -""" - with_mask1 = """\ -array(data = %(data)s, - mask = %(mask)s, - fill_value=%(fill)s) -""" - without_mask = """array( - %(data)s)""" - without_mask1 = """array(%(data)s)""" - - n = len(self.shape) - if self._mask is nomask: - if n <= 1: - return without_mask1 % {'data':str(self.filled())} - return without_mask % {'data':str(self.filled())} - else: - if n <= 1: - return with_mask % { - 'data': str(self.filled()), - 'mask': str(self._mask), - 'fill': str(self.fill_value()) - } - return with_mask % { - 'data': str(self.filled()), - 'mask': str(self._mask), - 'fill': str(self.fill_value()) - } - without_mask1 = """array(%(data)s)""" - if self._mask is nomask: - return without_mask % {'data':str(self.filled())} - else: - return with_mask % { - 'data': str(self.filled()), - 'mask': str(self._mask), - 'fill': str(self.fill_value()) - } - - def __float__(self): - "Convert self to float." - self.unmask() - if self._mask is not nomask: - raise MAError, 'Cannot convert masked element to a Python float.' - return float(self.data.item()) - - def __int__(self): - "Convert self to int." - self.unmask() - if self._mask is not nomask: - raise MAError, 'Cannot convert masked element to a Python int.' - return int(self.data.item()) - - def __getitem__(self, i): - "Get item described by i. Not a copy as in previous versions." - self.unshare_mask() - m = self._mask - dout = self._data[i] - if m is nomask: - try: - if dout.size == 1: - return dout - else: - return masked_array(dout, fill_value=self._fill_value) - except AttributeError: - return dout - mi = m[i] - if mi.size == 1: - if mi: - return masked - else: - return dout - else: - return masked_array(dout, mi, fill_value=self._fill_value) - -# -------- -# setitem and setslice notes -# note that if value is masked, it means to mask those locations. -# setting a value changes the mask to match the value in those locations. - - def __setitem__(self, index, value): - "Set item described by index. If value is masked, mask those locations." - d = self._data - if self is masked: - raise MAError, 'Cannot alter masked elements.' - if value is masked: - if self._mask is nomask: - self._mask = make_mask_none(d.shape) - self._shared_mask = False - else: - self.unshare_mask() - self._mask[index] = True - return - m = getmask(value) - value = filled(value).astype(d.dtype) - d[index] = value - if m is nomask: - if self._mask is not nomask: - self.unshare_mask() - self._mask[index] = False - else: - if self._mask is nomask: - self._mask = make_mask_none(d.shape) - self._shared_mask = True - else: - self.unshare_mask() - self._mask[index] = m - - def __nonzero__(self): - """returns true if any element is non-zero or masked - - """ - # XXX: This changes bool conversion logic from MA. - # XXX: In MA bool(a) == len(a) != 0, but in numpy - # XXX: scalars do not have len - m = self._mask - d = self._data - return bool(m is not nomask and m.any() - or d is not nomask and d.any()) - - def __len__ (self): - """Return length of first dimension. This is weird but Python's - slicing behavior depends on it.""" - return len(self._data) - - def __and__(self, other): - "Return bitwise_and" - return bitwise_and(self, other) - - def __or__(self, other): - "Return bitwise_or" - return bitwise_or(self, other) - - def __xor__(self, other): - "Return bitwise_xor" - return bitwise_xor(self, other) - - __rand__ = __and__ - __ror__ = __or__ - __rxor__ = __xor__ - - def __abs__(self): - "Return absolute(self)" - return absolute(self) - - def __neg__(self): - "Return negative(self)" - return negative(self) - - def __pos__(self): - "Return array(self)" - return array(self) - - def __add__(self, other): - "Return add(self, other)" - return add(self, other) - - __radd__ = __add__ - - def __mod__ (self, other): - "Return remainder(self, other)" - return remainder(self, other) - - def __rmod__ (self, other): - "Return remainder(other, self)" - return remainder(other, self) - - def __lshift__ (self, n): - return left_shift(self, n) - - def __rshift__ (self, n): - return right_shift(self, n) - - def __sub__(self, other): - "Return subtract(self, other)" - return subtract(self, other) - - def __rsub__(self, other): - "Return subtract(other, self)" - return subtract(other, self) - - def __mul__(self, other): - "Return multiply(self, other)" - return multiply(self, other) - - __rmul__ = __mul__ - - def __div__(self, other): - "Return divide(self, other)" - return divide(self, other) - - def __rdiv__(self, other): - "Return divide(other, self)" - return divide(other, self) - - def __truediv__(self, other): - "Return divide(self, other)" - return true_divide(self, other) - - def __rtruediv__(self, other): - "Return divide(other, self)" - return true_divide(other, self) - - def __floordiv__(self, other): - "Return divide(self, other)" - return floor_divide(self, other) - - def __rfloordiv__(self, other): - "Return divide(other, self)" - return floor_divide(other, self) - - def __pow__(self, other, third=None): - "Return power(self, other, third)" - return power(self, other, third) - - def __sqrt__(self): - "Return sqrt(self)" - return sqrt(self) - - def __iadd__(self, other): - "Add other to self in place." - t = self._data.dtype.char - f = filled(other, 0) - t1 = f.dtype.char - if t == t1: - pass - elif t in typecodes['Integer']: - if t1 in typecodes['Integer']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Float']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Complex']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - elif t1 in typecodes['Complex']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - else: - raise TypeError, 'Incorrect type for in-place operation.' - - if self._mask is nomask: - self._data += f - m = getmask(other) - self._mask = m - self._shared_mask = m is not nomask - else: - result = add(self, masked_array(f, mask=getmask(other))) - self._data = result.data - self._mask = result.mask - self._shared_mask = 1 - return self - - def __imul__(self, other): - "Add other to self in place." - t = self._data.dtype.char - f = filled(other, 0) - t1 = f.dtype.char - if t == t1: - pass - elif t in typecodes['Integer']: - if t1 in typecodes['Integer']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Float']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Complex']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - elif t1 in typecodes['Complex']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - else: - raise TypeError, 'Incorrect type for in-place operation.' - - if self._mask is nomask: - self._data *= f - m = getmask(other) - self._mask = m - self._shared_mask = m is not nomask - else: - result = multiply(self, masked_array(f, mask=getmask(other))) - self._data = result.data - self._mask = result.mask - self._shared_mask = 1 - return self - - def __isub__(self, other): - "Subtract other from self in place." - t = self._data.dtype.char - f = filled(other, 0) - t1 = f.dtype.char - if t == t1: - pass - elif t in typecodes['Integer']: - if t1 in typecodes['Integer']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Float']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Complex']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - elif t1 in typecodes['Complex']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - else: - raise TypeError, 'Incorrect type for in-place operation.' - - if self._mask is nomask: - self._data -= f - m = getmask(other) - self._mask = m - self._shared_mask = m is not nomask - else: - result = subtract(self, masked_array(f, mask=getmask(other))) - self._data = result.data - self._mask = result.mask - self._shared_mask = 1 - return self - - - - def __idiv__(self, other): - "Divide self by other in place." - t = self._data.dtype.char - f = filled(other, 0) - t1 = f.dtype.char - if t == t1: - pass - elif t in typecodes['Integer']: - if t1 in typecodes['Integer']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Float']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Complex']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - elif t1 in typecodes['Complex']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - else: - raise TypeError, 'Incorrect type for in-place operation.' - mo = getmask(other) - result = divide(self, masked_array(f, mask=mo)) - self._data = result.data - dm = result.raw_mask() - if dm is not self._mask: - self._mask = dm - self._shared_mask = 1 - return self - - def __eq__(self, other): - return equal(self,other) - - def __ne__(self, other): - return not_equal(self,other) - - def __lt__(self, other): - return less(self,other) - - def __le__(self, other): - return less_equal(self,other) - - def __gt__(self, other): - return greater(self,other) - - def __ge__(self, other): - return greater_equal(self,other) - - def astype (self, tc): - "return self as array of given type." - d = self._data.astype(tc) - return array(d, mask=self._mask) - - def byte_swapped(self): - """Returns the raw data field, byte_swapped. Included for consistency - with numeric but doesn't make sense in this context. - """ - return self._data.byte_swapped() - - def compressed (self): - "A 1-D array of all the non-masked data." - d = fromnumeric.ravel(self._data) - if self._mask is nomask: - return array(d) - else: - m = 1 - fromnumeric.ravel(self._mask) - c = fromnumeric.compress(m, d) - return array(c, copy=0) - - def count (self, axis = None): - "Count of the non-masked elements in a, or along a certain axis." - m = self._mask - s = self._data.shape - ls = len(s) - if m is nomask: - if ls == 0: - return 1 - if ls == 1: - return s[0] - if axis is None: - return reduce(lambda x, y:x*y, s) - else: - n = s[axis] - t = list(s) - del t[axis] - return ones(t) * n - if axis is None: - w = fromnumeric.ravel(m).astype(int) - n1 = size(w) - if n1 == 1: - n2 = w[0] - else: - n2 = umath.add.reduce(w) - return n1 - n2 - else: - n1 = size(m, axis) - n2 = sum(m.astype(int), axis) - return n1 - n2 - - def dot (self, other): - "s.dot(other) = innerproduct(s, other)" - return innerproduct(self, other) - - def fill_value(self): - "Get the current fill value." - return self._fill_value - - def filled (self, fill_value=None): - """A numeric array with masked values filled. If fill_value is None, - use self.fill_value(). - - If mask is nomask, copy data only if not contiguous. - Result is always a contiguous, numeric array. -# Is contiguous really necessary now? - """ - d = self._data - m = self._mask - if m is nomask: - if d.flags['CONTIGUOUS']: - return d - else: - return d.copy() - else: - if fill_value is None: - value = self._fill_value - else: - value = fill_value - - if self is masked: - result = numeric.array(value) - else: - try: - result = numeric.array(d, dtype=d.dtype, copy=1) - result[m] = value - except (TypeError, AttributeError): - #ok, can't put that value in here - value = numeric.array(value, dtype=object) - d = d.astype(object) - result = fromnumeric.choose(m, (d, value)) - return result - - def ids (self): - """Return the ids of the data and mask areas""" - return (id(self._data), id(self._mask)) - - def iscontiguous (self): - "Is the data contiguous?" - return self._data.flags['CONTIGUOUS'] - - def itemsize(self): - "Item size of each data item." - return self._data.itemsize - - - def outer(self, other): - "s.outer(other) = outerproduct(s, other)" - return outerproduct(self, other) - - def put (self, values): - """Set the non-masked entries of self to filled(values). - No change to mask - """ - iota = numeric.arange(self.size) - d = self._data - if self._mask is nomask: - ind = iota - else: - ind = fromnumeric.compress(1 - self._mask, iota) - d[ind] = filled(values).astype(d.dtype) - - def putmask (self, values): - """Set the masked entries of self to filled(values). - Mask changed to nomask. - """ - d = self._data - if self._mask is not nomask: - d[self._mask] = filled(values).astype(d.dtype) - self._shared_mask = 0 - self._mask = nomask - - def ravel (self): - """Return a 1-D view of self.""" - if self._mask is nomask: - return masked_array(self._data.ravel()) - else: - return masked_array(self._data.ravel(), self._mask.ravel()) - - def raw_data (self): - """ Obsolete; use data property instead. - The raw data; portions may be meaningless. - May be noncontiguous. Expert use only.""" - return self._data - data = property(fget=raw_data, - doc="The data, but values at masked locations are meaningless.") - - def raw_mask (self): - """ Obsolete; use mask property instead. - May be noncontiguous. Expert use only. - """ - return self._mask - mask = property(fget=raw_mask, - doc="The mask, may be nomask. Values where mask true are meaningless.") - - def reshape (self, *s): - """This array reshaped to shape s""" - d = self._data.reshape(*s) - if self._mask is nomask: - return masked_array(d) - else: - m = self._mask.reshape(*s) - return masked_array(d, m) - - def set_fill_value (self, v=None): - "Set the fill value to v. Omit v to restore default." - if v is None: - v = default_fill_value (self.raw_data()) - self._fill_value = v - - def _get_ndim(self): - return self._data.ndim - ndim = property(_get_ndim, doc=numeric.ndarray.ndim.__doc__) - - def _get_size (self): - return self._data.size - size = property(fget=_get_size, doc="Number of elements in the array.") -## CHECK THIS: signature of numeric.array.size? - - def _get_dtype(self): - return self._data.dtype - dtype = property(fget=_get_dtype, doc="type of the array elements.") - - def item(self, *args): - "Return Python scalar if possible" - if self._mask is not nomask: - m = self._mask.item(*args) - try: - if m[0]: - return masked - except IndexError: - return masked - return self._data.item(*args) - - def itemset(self, *args): - "Set Python scalar into array" - item = args[-1] - args = args[:-1] - self[args] = item - - def tolist(self, fill_value=None): - "Convert to list" - return self.filled(fill_value).tolist() - - def tostring(self, fill_value=None): - "Convert to string" - return self.filled(fill_value).tostring() - - def unmask (self): - "Replace the mask by nomask if possible." - if self._mask is nomask: return - m = make_mask(self._mask, flag=1) - if m is nomask: - self._mask = nomask - self._shared_mask = 0 - - def unshare_mask (self): - "If currently sharing mask, make a copy." - if self._shared_mask: - self._mask = make_mask (self._mask, copy=1, flag=0) - self._shared_mask = 0 - - def _get_ctypes(self): - return self._data.ctypes - - def _get_T(self): - if (self.ndim < 2): - return self - return self.transpose() - - shape = property(_get_shape, _set_shape, - doc = 'tuple giving the shape of the array') - - flat = property(_get_flat, _set_flat, - doc = 'Access array in flat form.') - - real = property(_get_real, _set_real, - doc = 'Access the real part of the array') - - imaginary = property(_get_imaginary, _set_imaginary, - doc = 'Access the imaginary part of the array') - - imag = imaginary - - ctypes = property(_get_ctypes, None, doc="ctypes") - - T = property(_get_T, None, doc="get transpose") - -#end class MaskedArray - -array = MaskedArray - -def isMaskedArray (x): - "Is x a masked array, that is, an instance of MaskedArray?" - return isinstance(x, MaskedArray) - -isarray = isMaskedArray -isMA = isMaskedArray #backward compatibility - -def allclose (a, b, fill_value=1, rtol=1.e-5, atol=1.e-8): - """ Returns true if all components of a and b are equal - subject to given tolerances. - If fill_value is 1, masked values considered equal. - If fill_value is 0, masked values considered unequal. - The relative error rtol should be positive and << 1.0 - The absolute error atol comes into play for those elements - of b that are very small or zero; it says how small a must be also. - """ - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a) - d2 = filled(b) - x = filled(array(d1, copy=0, mask=m), fill_value).astype(float) - y = filled(array(d2, copy=0, mask=m), 1).astype(float) - d = umath.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y)) - return fromnumeric.alltrue(fromnumeric.ravel(d)) - -def allequal (a, b, fill_value=1): - """ - True if all entries of a and b are equal, using - fill_value as a truth value where either or both are masked. - """ - m = mask_or(getmask(a), getmask(b)) - if m is nomask: - x = filled(a) - y = filled(b) - d = umath.equal(x, y) - return fromnumeric.alltrue(fromnumeric.ravel(d)) - elif fill_value: - x = filled(a) - y = filled(b) - d = umath.equal(x, y) - dm = array(d, mask=m, copy=0) - return fromnumeric.alltrue(fromnumeric.ravel(filled(dm, 1))) - else: - return 0 - -def masked_values (data, value, rtol=1.e-5, atol=1.e-8, copy=1): - """ - masked_values(data, value, rtol=1.e-5, atol=1.e-8) - Create a masked array; mask is nomask if possible. - If copy==0, and otherwise possible, result - may share data values with original array. - Let d = filled(data, value). Returns d - masked where abs(data-value)<= atol + rtol * abs(value) - if d is of a floating point type. Otherwise returns - masked_object(d, value, copy) - """ - abs = umath.absolute - d = filled(data, value) - if issubclass(d.dtype.type, numeric.floating): - m = umath.less_equal(abs(d-value), atol+rtol*abs(value)) - m = make_mask(m, flag=1) - return array(d, mask = m, copy=copy, - fill_value=value) - else: - return masked_object(d, value, copy=copy) - -def masked_object (data, value, copy=1): - "Create array masked where exactly data equal to value" - d = filled(data, value) - dm = make_mask(umath.equal(d, value), flag=1) - return array(d, mask=dm, copy=copy, fill_value=value) - -def arange(start, stop=None, step=1, dtype=None): - """Just like range() except it returns a array whose type can be specified - by the keyword argument dtype. - """ - return array(numeric.arange(start, stop, step, dtype)) - -arrayrange = arange - -def fromstring (s, t): - "Construct a masked array from a string. Result will have no mask." - return masked_array(numeric.fromstring(s, t)) - -def left_shift (a, n): - "Left shift n bits" - m = getmask(a) - if m is nomask: - d = umath.left_shift(filled(a), n) - return masked_array(d) - else: - d = umath.left_shift(filled(a, 0), n) - return masked_array(d, m) - -def right_shift (a, n): - "Right shift n bits" - m = getmask(a) - if m is nomask: - d = umath.right_shift(filled(a), n) - return masked_array(d) - else: - d = umath.right_shift(filled(a, 0), n) - return masked_array(d, m) - -def resize (a, new_shape): - """resize(a, new_shape) returns a new array with the specified shape. - The original array's total size can be any size.""" - m = getmask(a) - if m is not nomask: - m = fromnumeric.resize(m, new_shape) - result = array(fromnumeric.resize(filled(a), new_shape), mask=m) - result.set_fill_value(get_fill_value(a)) - return result - -def repeat(a, repeats, axis=None): - """repeat elements of a repeats times along axis - repeats is a sequence of length a.shape[axis] - telling how many times to repeat each element. - """ - af = filled(a) - if isinstance(repeats, types.IntType): - if axis is None: - num = af.size - else: - num = af.shape[axis] - repeats = tuple([repeats]*num) - - m = getmask(a) - if m is not nomask: - m = fromnumeric.repeat(m, repeats, axis) - d = fromnumeric.repeat(af, repeats, axis) - result = masked_array(d, m) - result.set_fill_value(get_fill_value(a)) - return result - -def identity(n): - """identity(n) returns the identity matrix of shape n x n. - """ - return array(numeric.identity(n)) - -def indices (dimensions, dtype=None): - """indices(dimensions,dtype=None) returns an array representing a grid - of indices with row-only, and column-only variation. - """ - return array(numeric.indices(dimensions, dtype)) - -def zeros (shape, dtype=float): - """zeros(n, dtype=float) = - an array of all zeros of the given length or shape.""" - return array(numeric.zeros(shape, dtype)) - -def ones (shape, dtype=float): - """ones(n, dtype=float) = - an array of all ones of the given length or shape.""" - return array(numeric.ones(shape, dtype)) - -def count (a, axis = None): - "Count of the non-masked elements in a, or along a certain axis." - a = masked_array(a) - return a.count(axis) - -def power (a, b, third=None): - "a**b" - if third is not None: - raise MAError, "3-argument power not supported." - ma = getmask(a) - mb = getmask(b) - m = mask_or(ma, mb) - fa = filled(a, 1) - fb = filled(b, 1) - if fb.dtype.char in typecodes["Integer"]: - return masked_array(umath.power(fa, fb), m) - md = make_mask(umath.less(fa, 0), flag=1) - m = mask_or(m, md) - if m is nomask: - return masked_array(umath.power(fa, fb)) - else: - fa = numeric.where(m, 1, fa) - return masked_array(umath.power(fa, fb), m) - -def masked_array (a, mask=nomask, fill_value=None): - """masked_array(a, mask=nomask) = - array(a, mask=mask, copy=0, fill_value=fill_value) - """ - return array(a, mask=mask, copy=0, fill_value=fill_value) - -def sum (target, axis=None, dtype=None): - if axis is None: - target = ravel(target) - axis = 0 - return add.reduce(target, axis, dtype) - -def product (target, axis=None, dtype=None): - if axis is None: - target = ravel(target) - axis = 0 - return multiply.reduce(target, axis, dtype) - -def average (a, axis=None, weights=None, returned = 0): - """average(a, axis=None, weights=None) - Computes average along indicated axis. - If axis is None, average over the entire array - Inputs can be integer or floating types; result is of type float. - - If weights are given, result is sum(a*weights,axis=0)/(sum(weights,axis=0)*1.0) - weights must have a's shape or be the 1-d with length the size - of a in the given axis. - - If returned, return a tuple: the result and the sum of the weights - or count of values. Results will have the same shape. - - masked values in the weights will be set to 0.0 - """ - a = masked_array(a) - mask = a.mask - ash = a.shape - if ash == (): - ash = (1,) - if axis is None: - if mask is nomask: - if weights is None: - n = add.reduce(a.raw_data().ravel()) - d = reduce(lambda x, y: x * y, ash, 1.0) - else: - w = filled(weights, 0.0).ravel() - n = umath.add.reduce(a.raw_data().ravel() * w) - d = umath.add.reduce(w) - del w - else: - if weights is None: - n = add.reduce(a.ravel()) - w = fromnumeric.choose(mask, (1.0, 0.0)).ravel() - d = umath.add.reduce(w) - del w - else: - w = array(filled(weights, 0.0), float, mask=mask).ravel() - n = add.reduce(a.ravel() * w) - d = add.reduce(w) - del w - else: - if mask is nomask: - if weights is None: - d = ash[axis] * 1.0 - n = umath.add.reduce(a.raw_data(), axis) - else: - w = filled(weights, 0.0) - wsh = w.shape - if wsh == (): - wsh = (1,) - if wsh == ash: - w = numeric.array(w, float, copy=0) - n = add.reduce(a*w, axis) - d = add.reduce(w, axis) - del w - elif wsh == (ash[axis],): - r = [newaxis]*len(ash) - r[axis] = slice(None, None, 1) - w = eval ("w["+ repr(tuple(r)) + "] * ones(ash, float)") - n = add.reduce(a*w, axis) - d = add.reduce(w, axis) - del w, r - else: - raise ValueError, 'average: weights wrong shape.' - else: - if weights is None: - n = add.reduce(a, axis) - w = numeric.choose(mask, (1.0, 0.0)) - d = umath.add.reduce(w, axis) - del w - else: - w = filled(weights, 0.0) - wsh = w.shape - if wsh == (): - wsh = (1,) - if wsh == ash: - w = array(w, float, mask=mask, copy=0) - n = add.reduce(a*w, axis) - d = add.reduce(w, axis) - elif wsh == (ash[axis],): - r = [newaxis]*len(ash) - r[axis] = slice(None, None, 1) - w = eval ("w["+ repr(tuple(r)) + "] * masked_array(ones(ash, float), mask)") - n = add.reduce(a*w, axis) - d = add.reduce(w, axis) - else: - raise ValueError, 'average: weights wrong shape.' - del w - #print n, d, repr(mask), repr(weights) - if n is masked or d is masked: return masked - result = divide (n, d) - del n - - if isinstance(result, MaskedArray): - result.unmask() - if returned: - if not isinstance(d, MaskedArray): - d = masked_array(d) - if not d.shape == result.shape: - d = ones(result.shape, float) * d - d.unmask() - if returned: - return result, d - else: - return result - -def where (condition, x, y): - """where(condition, x, y) is x where condition is nonzero, y otherwise. - condition must be convertible to an integer array. - Answer is always the shape of condition. - The type depends on x and y. It is integer if both x and y are - the value masked. - """ - fc = filled(not_equal(condition, 0), 0) - xv = filled(x) - xm = getmask(x) - yv = filled(y) - ym = getmask(y) - d = numeric.choose(fc, (yv, xv)) - md = numeric.choose(fc, (ym, xm)) - m = getmask(condition) - m = make_mask(mask_or(m, md), copy=0, flag=1) - return masked_array(d, m) - -def choose (indices, t, out=None, mode='raise'): - "Returns array shaped like indices with elements chosen from t" - def fmask (x): - if x is masked: return 1 - return filled(x) - def nmask (x): - if x is masked: return 1 - m = getmask(x) - if m is nomask: return 0 - return m - c = filled(indices, 0) - masks = [nmask(x) for x in t] - a = [fmask(x) for x in t] - d = numeric.choose(c, a) - m = numeric.choose(c, masks) - m = make_mask(mask_or(m, getmask(indices)), copy=0, flag=1) - return masked_array(d, m) - -def masked_where(condition, x, copy=1): - """Return x as an array masked where condition is true. - Also masked where x or condition masked. - """ - cm = filled(condition,1) - m = mask_or(getmask(x), cm) - return array(filled(x), copy=copy, mask=m) - -def masked_greater(x, value, copy=1): - "masked_greater(x, value) = x masked where x > value" - return masked_where(greater(x, value), x, copy) - -def masked_greater_equal(x, value, copy=1): - "masked_greater_equal(x, value) = x masked where x >= value" - return masked_where(greater_equal(x, value), x, copy) - -def masked_less(x, value, copy=1): - "masked_less(x, value) = x masked where x < value" - return masked_where(less(x, value), x, copy) - -def masked_less_equal(x, value, copy=1): - "masked_less_equal(x, value) = x masked where x <= value" - return masked_where(less_equal(x, value), x, copy) - -def masked_not_equal(x, value, copy=1): - "masked_not_equal(x, value) = x masked where x != value" - d = filled(x, 0) - c = umath.not_equal(d, value) - m = mask_or(c, getmask(x)) - return array(d, mask=m, copy=copy) - -def masked_equal(x, value, copy=1): - """masked_equal(x, value) = x masked where x == value - For floating point consider masked_values(x, value) instead. - """ - d = filled(x, 0) - c = umath.equal(d, value) - m = mask_or(c, getmask(x)) - return array(d, mask=m, copy=copy) - -def masked_inside(x, v1, v2, copy=1): - """x with mask of all values of x that are inside [v1,v2] - v1 and v2 can be given in either order. - """ - if v2 < v1: - t = v2 - v2 = v1 - v1 = t - d = filled(x, 0) - c = umath.logical_and(umath.less_equal(d, v2), umath.greater_equal(d, v1)) - m = mask_or(c, getmask(x)) - return array(d, mask = m, copy=copy) - -def masked_outside(x, v1, v2, copy=1): - """x with mask of all values of x that are outside [v1,v2] - v1 and v2 can be given in either order. - """ - if v2 < v1: - t = v2 - v2 = v1 - v1 = t - d = filled(x, 0) - c = umath.logical_or(umath.less(d, v1), umath.greater(d, v2)) - m = mask_or(c, getmask(x)) - return array(d, mask = m, copy=copy) - -def reshape (a, *newshape): - "Copy of a with a new shape." - m = getmask(a) - d = filled(a).reshape(*newshape) - if m is nomask: - return masked_array(d) - else: - return masked_array(d, mask=numeric.reshape(m, *newshape)) - -def ravel (a): - "a as one-dimensional, may share data and mask" - m = getmask(a) - d = fromnumeric.ravel(filled(a)) - if m is nomask: - return masked_array(d) - else: - return masked_array(d, mask=numeric.ravel(m)) - -def concatenate (arrays, axis=0): - "Concatenate the arrays along the given axis" - d = [] - for x in arrays: - d.append(filled(x)) - d = numeric.concatenate(d, axis) - for x in arrays: - if getmask(x) is not nomask: break - else: - return masked_array(d) - dm = [] - for x in arrays: - dm.append(getmaskarray(x)) - dm = numeric.concatenate(dm, axis) - return masked_array(d, mask=dm) - -def swapaxes (a, axis1, axis2): - m = getmask(a) - d = masked_array(a).data - if m is nomask: - return masked_array(data=numeric.swapaxes(d, axis1, axis2)) - else: - return masked_array(data=numeric.swapaxes(d, axis1, axis2), - mask=numeric.swapaxes(m, axis1, axis2),) - - -def take (a, indices, axis=None, out=None, mode='raise'): - "returns selection of items from a." - m = getmask(a) - # d = masked_array(a).raw_data() - d = masked_array(a).data - if m is nomask: - return masked_array(numeric.take(d, indices, axis)) - else: - return masked_array(numeric.take(d, indices, axis), - mask = numeric.take(m, indices, axis)) - -def transpose(a, axes=None): - "reorder dimensions per tuple axes" - m = getmask(a) - d = filled(a) - if m is nomask: - return masked_array(numeric.transpose(d, axes)) - else: - return masked_array(numeric.transpose(d, axes), - mask = numeric.transpose(m, axes)) - - -def put(a, indices, values, mode='raise'): - """sets storage-indexed locations to corresponding values. - - Values and indices are filled if necessary. - - """ - d = a.raw_data() - ind = filled(indices) - v = filled(values) - numeric.put (d, ind, v) - m = getmask(a) - if m is not nomask: - a.unshare_mask() - numeric.put(a.raw_mask(), ind, 0) - -def putmask(a, mask, values): - "putmask(a, mask, values) sets a where mask is true." - if mask is nomask: - return - numeric.putmask(a.raw_data(), mask, values) - m = getmask(a) - if m is nomask: return - a.unshare_mask() - numeric.putmask(a.raw_mask(), mask, 0) - -def inner(a, b): - """inner(a,b) returns the dot product of two arrays, which has - shape a.shape[:-1] + b.shape[:-1] with elements computed by summing the - product of the elements from the last dimensions of a and b. - Masked elements are replace by zeros. - """ - fa = filled(a, 0) - fb = filled(b, 0) - if len(fa.shape) == 0: fa.shape = (1,) - if len(fb.shape) == 0: fb.shape = (1,) - return masked_array(numeric.inner(fa, fb)) - -innerproduct = inner - -def outer(a, b): - """outer(a,b) = {a[i]*b[j]}, has shape (len(a),len(b))""" - fa = filled(a, 0).ravel() - fb = filled(b, 0).ravel() - d = numeric.outer(fa, fb) - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - return masked_array(d) - ma = getmaskarray(a) - mb = getmaskarray(b) - m = make_mask(1-numeric.outer(1-ma, 1-mb), copy=0) - return masked_array(d, m) - -outerproduct = outer - -def dot(a, b): - """dot(a,b) returns matrix-multiplication between a and b. The product-sum - is over the last dimension of a and the second-to-last dimension of b. - Masked values are replaced by zeros. See also innerproduct. - """ - return innerproduct(filled(a, 0), numeric.swapaxes(filled(b, 0), -1, -2)) - -def compress(condition, x, dimension=-1, out=None): - """Select those parts of x for which condition is true. - Masked values in condition are considered false. - """ - c = filled(condition, 0) - m = getmask(x) - if m is not nomask: - m = numeric.compress(c, m, dimension) - d = numeric.compress(c, filled(x), dimension) - return masked_array(d, m) - -class _minimum_operation: - "Object to calculate minima" - def __init__ (self): - """minimum(a, b) or minimum(a) - In one argument case returns the scalar minimum. - """ - pass - - def __call__ (self, a, b=None): - "Execute the call behavior." - if b is None: - m = getmask(a) - if m is nomask: - d = amin(filled(a).ravel()) - return d - ac = a.compressed() - if len(ac) == 0: - return masked - else: - return amin(ac.raw_data()) - else: - return where(less(a, b), a, b) - - def reduce (self, target, axis=0): - """Reduce target along the given axis.""" - m = getmask(target) - if m is nomask: - t = filled(target) - return masked_array (umath.minimum.reduce (t, axis)) - else: - t = umath.minimum.reduce(filled(target, minimum_fill_value(target)), axis) - m = umath.logical_and.reduce(m, axis) - return masked_array(t, m, get_fill_value(target)) - - def outer (self, a, b): - "Return the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = logical_or.outer(ma, mb) - d = umath.minimum.outer(filled(a), filled(b)) - return masked_array(d, m) - -minimum = _minimum_operation () - -class _maximum_operation: - "Object to calculate maxima" - def __init__ (self): - """maximum(a, b) or maximum(a) - In one argument case returns the scalar maximum. - """ - pass - - def __call__ (self, a, b=None): - "Execute the call behavior." - if b is None: - m = getmask(a) - if m is nomask: - d = amax(filled(a).ravel()) - return d - ac = a.compressed() - if len(ac) == 0: - return masked - else: - return amax(ac.raw_data()) - else: - return where(greater(a, b), a, b) - - def reduce (self, target, axis=0): - """Reduce target along the given axis.""" - m = getmask(target) - if m is nomask: - t = filled(target) - return masked_array (umath.maximum.reduce (t, axis)) - else: - t = umath.maximum.reduce(filled(target, maximum_fill_value(target)), axis) - m = umath.logical_and.reduce(m, axis) - return masked_array(t, m, get_fill_value(target)) - - def outer (self, a, b): - "Return the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = logical_or.outer(ma, mb) - d = umath.maximum.outer(filled(a), filled(b)) - return masked_array(d, m) - -maximum = _maximum_operation () - -def sort (x, axis = -1, fill_value=None): - """If x does not have a mask, return a masked array formed from the - result of numeric.sort(x, axis). - Otherwise, fill x with fill_value. Sort it. - Set a mask where the result is equal to fill_value. - Note that this may have unintended consequences if the data contains the - fill value at a non-masked site. - - If fill_value is not given the default fill value for x's type will be - used. - """ - if fill_value is None: - fill_value = default_fill_value (x) - d = filled(x, fill_value) - s = fromnumeric.sort(d, axis) - if getmask(x) is nomask: - return masked_array(s) - return masked_values(s, fill_value, copy=0) - -def diagonal(a, k = 0, axis1=0, axis2=1): - """diagonal(a,k=0,axis1=0, axis2=1) = the k'th diagonal of a""" - d = fromnumeric.diagonal(filled(a), k, axis1, axis2) - m = getmask(a) - if m is nomask: - return masked_array(d, m) - else: - return masked_array(d, fromnumeric.diagonal(m, k, axis1, axis2)) - -def trace (a, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """trace(a,offset=0, axis1=0, axis2=1) returns the sum along diagonals - (defined by the last two dimenions) of the array. - """ - return diagonal(a, offset, axis1, axis2).sum(dtype=dtype) - -def argsort (x, axis = -1, out=None, fill_value=None): - """Treating masked values as if they have the value fill_value, - return sort indices for sorting along given axis. - if fill_value is None, use get_fill_value(x) - Returns a numpy array. - """ - d = filled(x, fill_value) - return fromnumeric.argsort(d, axis) - -def argmin (x, axis = -1, out=None, fill_value=None): - """Treating masked values as if they have the value fill_value, - return indices for minimum values along given axis. - if fill_value is None, use get_fill_value(x). - Returns a numpy array if x has more than one dimension. - Otherwise, returns a scalar index. - """ - d = filled(x, fill_value) - return fromnumeric.argmin(d, axis) - -def argmax (x, axis = -1, out=None, fill_value=None): - """Treating masked values as if they have the value fill_value, - return sort indices for maximum along given axis. - if fill_value is None, use -get_fill_value(x) if it exists. - Returns a numpy array if x has more than one dimension. - Otherwise, returns a scalar index. - """ - if fill_value is None: - fill_value = default_fill_value (x) - try: - fill_value = - fill_value - except: - pass - d = filled(x, fill_value) - return fromnumeric.argmax(d, axis) - -def fromfunction (f, s): - """apply f to s to create array as in umath.""" - return masked_array(numeric.fromfunction(f, s)) - -def asarray(data, dtype=None): - """asarray(data, dtype) = array(data, dtype, copy=0) - """ - if isinstance(data, MaskedArray) and \ - (dtype is None or dtype == data.dtype): - return data - return array(data, dtype=dtype, copy=0) - -# Add methods to support ndarray interface -# XXX: I is better to to change the masked_*_operation adaptors -# XXX: to wrap ndarray methods directly to create ma.array methods. -from types import MethodType -def _m(f): - return MethodType(f, None, array) -def not_implemented(*args, **kwds): - raise NotImplementedError, "not yet implemented for numpy.ma arrays" -array.all = _m(alltrue) -array.any = _m(sometrue) -array.argmax = _m(argmax) -array.argmin = _m(argmin) -array.argsort = _m(argsort) -array.base = property(_m(not_implemented)) -array.byteswap = _m(not_implemented) - -def _choose(self, *args, **kwds): - return choose(self, args) -array.choose = _m(_choose) -del _choose - -def _clip(self,a_min,a_max,out=None): - return MaskedArray(data = self.data.clip(asarray(a_min).data, - asarray(a_max).data), - mask = mask_or(self.mask, - mask_or(getmask(a_min),getmask(a_max)))) -array.clip = _m(_clip) - -def _compress(self, cond, axis=None, out=None): - return compress(cond, self, axis) -array.compress = _m(_compress) -del _compress - -array.conj = array.conjugate = _m(conjugate) -array.copy = _m(not_implemented) - -def _cumprod(self, axis=None, dtype=None, out=None): - m = self.mask - if m is not nomask: - m = umath.logical_or.accumulate(self.mask, axis) - return MaskedArray(data = self.filled(1).cumprod(axis, dtype), mask=m) -array.cumprod = _m(_cumprod) - -def _cumsum(self, axis=None, dtype=None, out=None): - m = self.mask - if m is not nomask: - m = umath.logical_or.accumulate(self.mask, axis) - return MaskedArray(data=self.filled(0).cumsum(axis, dtype), mask=m) -array.cumsum = _m(_cumsum) - -array.diagonal = _m(diagonal) -array.dump = _m(not_implemented) -array.dumps = _m(not_implemented) -array.fill = _m(not_implemented) -array.flags = property(_m(not_implemented)) -array.flatten = _m(ravel) -array.getfield = _m(not_implemented) - -def _max(a, axis=None, out=None): - if out is not None: - raise TypeError("Output arrays Unsupported for masked arrays") - if axis is None: - return maximum(a) - else: - return maximum.reduce(a, axis) -array.max = _m(_max) -del _max -def _min(a, axis=None, out=None): - if out is not None: - raise TypeError("Output arrays Unsupported for masked arrays") - if axis is None: - return minimum(a) - else: - return minimum.reduce(a, axis) -array.min = _m(_min) -del _min -array.mean = _m(average) -array.nbytes = property(_m(not_implemented)) -array.newbyteorder = _m(not_implemented) -array.nonzero = _m(nonzero) -array.prod = _m(product) - -def _ptp(a,axis=None,out=None): - return a.max(axis,out)-a.min(axis) -array.ptp = _m(_ptp) -array.repeat = _m(repeat) -array.resize = _m(resize) -array.searchsorted = _m(not_implemented) -array.setfield = _m(not_implemented) -array.setflags = _m(not_implemented) -array.sort = _m(not_implemented) # NB: ndarray.sort is inplace - -def _squeeze(self): - try: - result = MaskedArray(data = self.data.squeeze(), - mask = self.mask.squeeze()) - except AttributeError: - result = _wrapit(self, 'squeeze') - return result -array.squeeze = _m(_squeeze) - -array.strides = property(_m(not_implemented)) -array.sum = _m(sum) -def _swapaxes(self,axis1,axis2): - return MaskedArray(data = self.data.swapaxes(axis1, axis2), - mask = self.mask.swapaxes(axis1, axis2)) -array.swapaxes = _m(_swapaxes) -array.take = _m(take) -array.tofile = _m(not_implemented) -array.trace = _m(trace) -array.transpose = _m(transpose) - -def _var(self,axis=None,dtype=None, out=None): - if axis is None: - return numeric.asarray(self.compressed()).var() - a = self.swapaxes(axis,0) - a = a - a.mean(axis=0) - a *= a - a /= a.count(axis=0) - return a.swapaxes(0,axis).sum(axis) -def _std(self,axis=None, dtype=None, out=None): - return (self.var(axis,dtype))**0.5 -array.var = _m(_var) -array.std = _m(_std) - -array.view = _m(not_implemented) -array.round = _m(around) -del _m, MethodType, not_implemented - - -masked = MaskedArray(0, int, mask=1) diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py deleted file mode 100644 index 56a44ba9c..000000000 --- a/numpy/core/memmap.py +++ /dev/null @@ -1,103 +0,0 @@ -__all__ = ['memmap'] - -import mmap -from numeric import uint8, ndarray, dtype - -dtypedescr = dtype -valid_filemodes = ["r", "c", "r+", "w+"] -writeable_filemodes = ["r+","w+"] - -mode_equivalents = { - "readonly":"r", - "copyonwrite":"c", - "readwrite":"r+", - "write":"w+" - } - -class memmap(ndarray): - __array_priority__ = -100.0 - def __new__(subtype, name, dtype=uint8, mode='r+', offset=0, - shape=None, order='C'): - try: - mode = mode_equivalents[mode] - except KeyError: - if mode not in valid_filemodes: - raise ValueError("mode must be one of %s" % \ - (valid_filemodes + mode_equivalents.keys())) - - fid = file(name, (mode == 'c' and 'r' or mode)+'b') - - if (mode == 'w+') and shape is None: - raise ValueError, "shape must be given" - - fid.seek(0,2) - flen = fid.tell() - descr = dtypedescr(dtype) - _dbytes = descr.itemsize - - if shape is None: - bytes = flen-offset - if (bytes % _dbytes): - fid.close() - raise ValueError, "Size of available data is not a "\ - "multiple of data-type size." - size = bytes // _dbytes - shape = (size,) - else: - if not isinstance(shape, tuple): - shape = (shape,) - size = 1 - for k in shape: - size *= k - - bytes = long(offset + size*_dbytes) - - if mode == 'w+' or (mode == 'r+' and flen < bytes): - fid.seek(bytes-1,0) - fid.write(chr(0)) - fid.flush() - - if mode == 'c': - acc = mmap.ACCESS_COPY - elif mode == 'r': - acc = mmap.ACCESS_READ - else: - acc = mmap.ACCESS_WRITE - - mm = mmap.mmap(fid.fileno(), bytes, access=acc) - - self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, - offset=offset, order=order) - self._mmap = mm - self._offset = offset - self._mode = mode - self._size = size - self._name = name - fid.close() - return self - - def __array_finalize__(self, obj): - if obj is not None: - if not isinstance(obj, memmap): - raise ValueError, "Cannot create a memmap array that way" - self._mmap = obj._mmap - else: - self._mmap = None - - def sync(self): - if self._mmap is not None: - self._mmap.flush() - - def close(self): - if (self.base is self._mmap): - self._mmap.close() - elif self._mmap is not None: - raise ValueError, "Cannot close a memmap that is being used " \ - "by another object." - - def __del__(self): - self.sync() - try: - self.close() - except ValueError: - pass diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py deleted file mode 100644 index b2f141312..000000000 --- a/numpy/core/numeric.py +++ /dev/null @@ -1,1051 +0,0 @@ -__all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc', - 'arange', 'array', 'zeros', 'empty', 'broadcast', 'dtype', - 'fromstring', 'fromfile', 'frombuffer','newbuffer', - 'getbuffer', 'int_asbuffer', 'where', 'argwhere', - 'concatenate', 'fastCopyAndTranspose', 'lexsort', - 'set_numeric_ops', 'can_cast', - 'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray', - 'isfortran', 'empty_like', 'zeros_like', - 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', - 'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot', - 'array2string', 'get_printoptions', 'set_printoptions', - 'array_repr', 'array_str', 'set_string_function', - 'little_endian', 'require', - 'fromiter', 'array_equal', 'array_equiv', - 'indices', 'fromfunction', 'loadtxt', 'savetxt', - 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', - 'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask', - 'seterr', 'geterr', 'setbufsize', 'getbufsize', - 'seterrcall', 'geterrcall', 'errstate', 'flatnonzero', - 'Inf', 'inf', 'infty', 'Infinity', - 'nan', 'NaN', 'False_', 'True_', 'bitwise_not', - 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS'] - -import sys -import multiarray -import umath -from umath import * -import numerictypes -from numerictypes import * - -bitwise_not = invert - -CLIP = multiarray.CLIP -WRAP = multiarray.WRAP -RAISE = multiarray.RAISE -MAXDIMS = multiarray.MAXDIMS -ALLOW_THREADS = multiarray.ALLOW_THREADS -BUFSIZE = multiarray.BUFSIZE - - -# from Fernando Perez's IPython -def zeros_like(a): - """Return an array of zeros of the shape and typecode of a. - - If you don't explicitly need the array to be zeroed, you should instead - use empty_like(), which is faster as it only allocates memory.""" - try: - return zeros(a.shape, a.dtype, a.flags.fnc) - except AttributeError: - try: - wrap = a.__array_wrap__ - except AttributeError: - wrap = None - a = asarray(a) - res = zeros(a.shape, a.dtype) - if wrap: - res = wrap(res) - return res - -def empty_like(a): - """Return an empty (uninitialized) array of the shape and typecode of a. - - Note that this does NOT initialize the returned array. If you require - your array to be initialized, you should use zeros_like(). - - """ - try: - return empty(a.shape, a.dtype, a.flags.fnc) - except AttributeError: - try: - wrap = a.__array_wrap__ - except AttributeError: - wrap = None - a = asarray(a) - res = empty(a.shape, a.dtype) - if wrap: - res = wrap(res) - return res - -# end Fernando's utilities - - -def extend_all(module): - adict = {} - for a in __all__: - adict[a] = 1 - try: - mall = getattr(module, '__all__') - except AttributeError: - mall = [k for k in module.__dict__.keys() if not k.startswith('_')] - for a in mall: - if a not in adict: - __all__.append(a) - -extend_all(umath) -extend_all(numerictypes) - -newaxis = None - -ndarray = multiarray.ndarray -flatiter = multiarray.flatiter -broadcast = multiarray.broadcast -dtype = multiarray.dtype -ufunc = type(sin) - -arange = multiarray.arange -array = multiarray.array -zeros = multiarray.zeros -empty = multiarray.empty -fromstring = multiarray.fromstring -fromiter = multiarray.fromiter -fromfile = multiarray.fromfile -frombuffer = multiarray.frombuffer -newbuffer = multiarray.newbuffer -getbuffer = multiarray.getbuffer -int_asbuffer = multiarray.int_asbuffer -where = multiarray.where -concatenate = multiarray.concatenate -fastCopyAndTranspose = multiarray._fastCopyAndTranspose -set_numeric_ops = multiarray.set_numeric_ops -can_cast = multiarray.can_cast -lexsort = multiarray.lexsort -compare_chararrays = multiarray.compare_chararrays -putmask = multiarray.putmask - -def asarray(a, dtype=None, order=None): - """Returns a as an array. - - Unlike array(), no copy is performed if a is already an array. Subclasses - are converted to base class ndarray. - """ - return array(a, dtype, copy=False, order=order) - -def asanyarray(a, dtype=None, order=None): - """Returns a as an array, but will pass subclasses through. - """ - return array(a, dtype, copy=False, order=order, subok=1) - -def ascontiguousarray(a, dtype=None): - """Return 'a' as an array contiguous in memory (C order). - """ - return array(a, dtype, copy=False, order='C', ndmin=1) - -def asfortranarray(a, dtype=None): - """Return 'a' as an array laid out in Fortran-order in memory. - """ - return array(a, dtype, copy=False, order='F', ndmin=1) - -def require(a, dtype=None, requirements=None): - if requirements is None: - requirements = [] - else: - requirements = [x.upper() for x in requirements] - - if not requirements: - return asanyarray(a, dtype=dtype) - - if 'ENSUREARRAY' in requirements or 'E' in requirements: - subok = 0 - else: - subok = 1 - - arr = array(a, dtype=dtype, copy=False, subok=subok) - - copychar = 'A' - if 'FORTRAN' in requirements or \ - 'F_CONTIGUOUS' in requirements or \ - 'F' in requirements: - copychar = 'F' - elif 'CONTIGUOUS' in requirements or \ - 'C_CONTIGUOUS' in requirements or \ - 'C' in requirements: - copychar = 'C' - - for prop in requirements: - if not arr.flags[prop]: - arr = arr.copy(copychar) - break - return arr - -def isfortran(a): - """Returns True if 'a' is arranged in Fortran-order in memory with a.ndim > 1 - """ - return a.flags.fnc - -def argwhere(a): - """Return a 2-d array of shape N x a.ndim where each row - is a sequence of indices into a. This sequence must be - converted to a tuple in order to be used to index into a. - """ - return asarray(a.nonzero()).T - -def flatnonzero(a): - """Return indicies that are not-zero in flattened version of a - - Equivalent to a.ravel().nonzero()[0] - """ - return a.ravel().nonzero()[0] - -_mode_from_name_dict = {'v': 0, - 's' : 1, - 'f' : 2} - -def _mode_from_name(mode): - if isinstance(mode, type("")): - return _mode_from_name_dict[mode.lower()[0]] - return mode - -def correlate(a,v,mode='valid'): - """Return the discrete, linear correlation of 1-D sequences a and v; mode - can be 'valid', 'same', or 'full' to specify the size of the resulting - sequence - """ - mode = _mode_from_name(mode) - return multiarray.correlate(a,v,mode) - - -def convolve(a,v,mode='full'): - """Returns the discrete, linear convolution of 1-D sequences a and v; mode - can be 'valid', 'same', or 'full' to specify size of the resulting sequence. - """ - a,v = array(a,ndmin=1),array(v,ndmin=1) - if (len(v) > len(a)): - a, v = v, a - assert len(a) > 0, 'a cannot be empty' - assert len(v) > 0, 'v cannot be empty' - mode = _mode_from_name(mode) - return multiarray.correlate(a,asarray(v)[::-1],mode) - -inner = multiarray.inner -dot = multiarray.dot - -def outer(a,b): - """Returns the outer product of two vectors. - - result[i,j] = a[i]*b[j] when a and b are vectors. - Will accept any arguments that can be made into vectors. - """ - a = asarray(a) - b = asarray(b) - return a.ravel()[:,newaxis]*b.ravel()[newaxis,:] - -def vdot(a, b): - """Returns the dot product of 2 vectors (or anything that can be made into - a vector). - - Note: this is not the same as `dot`, as it takes the conjugate of its first - argument if complex and always returns a scalar.""" - return dot(asarray(a).ravel().conj(), asarray(b).ravel()) - -# try to import blas optimized dot if available -try: - # importing this changes the dot function for basic 4 types - # to blas-optimized versions. - from _dotblas import dot, vdot, inner, alterdot, restoredot -except ImportError: - def alterdot(): - pass - def restoredot(): - pass - - -def tensordot(a, b, axes=2): - """tensordot returns the product for any (ndim >= 1) arrays. - - r_{xxx, yyy} = \sum_k a_{xxx,k} b_{k,yyy} where - - the axes to be summed over are given by the axes argument. - the first element of the sequence determines the axis or axes - in arr1 to sum over, and the second element in axes argument sequence - determines the axis or axes in arr2 to sum over. - - When there is more than one axis to sum over, the corresponding - arguments to axes should be sequences of the same length with the first - axis to sum over given first in both sequences, the second axis second, - and so forth. - - If the axes argument is an integer, N, then the last N dimensions of a - and first N dimensions of b are summed over. - """ - try: - iter(axes) - except: - axes_a = range(-axes,0) - axes_b = range(0,axes) - else: - axes_a, axes_b = axes - try: - na = len(axes_a) - axes_a = list(axes_a) - except TypeError: - axes_a = [axes_a] - na = 1 - try: - nb = len(axes_b) - axes_b = list(axes_b) - except TypeError: - axes_b = [axes_b] - nb = 1 - - a, b = asarray(a), asarray(b) - as_ = a.shape - nda = len(a.shape) - bs = b.shape - ndb = len(b.shape) - equal = 1 - if (na != nb): equal = 0 - else: - for k in xrange(na): - if as_[axes_a[k]] != bs[axes_b[k]]: - equal = 0 - break - if axes_a[k] < 0: - axes_a[k] += nda - if axes_b[k] < 0: - axes_b[k] += ndb - if not equal: - raise ValueError, "shape-mismatch for sum" - - # Move the axes to sum over to the end of "a" - # and to the front of "b" - notin = [k for k in range(nda) if k not in axes_a] - newaxes_a = notin + axes_a - N2 = 1 - for axis in axes_a: - N2 *= as_[axis] - newshape_a = (-1, N2) - olda = [as_[axis] for axis in notin] - - notin = [k for k in range(ndb) if k not in axes_b] - newaxes_b = axes_b + notin - N2 = 1 - for axis in axes_b: - N2 *= bs[axis] - newshape_b = (N2, -1) - oldb = [bs[axis] for axis in notin] - - at = a.transpose(newaxes_a).reshape(newshape_a) - bt = b.transpose(newaxes_b).reshape(newshape_b) - res = dot(at, bt) - return res.reshape(olda + oldb) - -def roll(a, shift, axis=None): - """Roll the elements in the array by 'shift' positions along - the given axis. - """ - a = asanyarray(a) - if axis is None: - n = a.size - reshape=1 - else: - n = a.shape[axis] - reshape=0 - shift %= n - indexes = concatenate((arange(n-shift,n),arange(n-shift))) - res = a.take(indexes, axis) - if reshape: - return res.reshape(a.shape) - else: - return res - -def rollaxis(a, axis, start=0): - """Return transposed array so that axis is rolled before start. - - if a.shape is (3,4,5,6) - rollaxis(a, 3, 1).shape is (3,6,4,5) - rollaxis(a, 2, 0).shape is (5,3,4,6) - rollaxis(a, 1, 3).shape is (3,5,4,6) - rollaxis(a, 1, 4).shape is (3,5,6,4) - """ - n = a.ndim - if axis < 0: - axis += n - if start < 0: - start += n - msg = 'rollaxis: %s (%d) must be >=0 and < %d' - if not (0 <= axis < n): - raise ValueError, msg % ('axis', axis, n) - if not (0 <= start < n+1): - raise ValueError, msg % ('start', start, n+1) - if (axis < start): # it's been removed - start -= 1 - if axis==start: - return a - axes = range(0,n) - axes.remove(axis) - axes.insert(start, axis) - return a.transpose(axes) - -# fix hack in scipy which imports this function -def _move_axis_to_0(a, axis): - return rollaxis(a, axis, 0) - -def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): - """Return the cross product of two (arrays of) vectors. - - The cross product is performed over the last axis of a and b by default, - and can handle axes with dimensions 2 and 3. For a dimension of 2, - the z-component of the equivalent three-dimensional cross product is - returned. - """ - if axis is not None: - axisa,axisb,axisc=(axis,)*3 - a = asarray(a).swapaxes(axisa, 0) - b = asarray(b).swapaxes(axisb, 0) - msg = "incompatible dimensions for cross product\n"\ - "(dimension must be 2 or 3)" - if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]): - raise ValueError(msg) - if a.shape[0] == 2: - if (b.shape[0] == 2): - cp = a[0]*b[1] - a[1]*b[0] - if cp.ndim == 0: - return cp - else: - return cp.swapaxes(0, axisc) - else: - x = a[1]*b[2] - y = -a[0]*b[2] - z = a[0]*b[1] - a[1]*b[0] - elif a.shape[0] == 3: - if (b.shape[0] == 3): - x = a[1]*b[2] - a[2]*b[1] - y = a[2]*b[0] - a[0]*b[2] - z = a[0]*b[1] - a[1]*b[0] - else: - x = -a[2]*b[1] - y = a[2]*b[0] - z = a[0]*b[1] - a[1]*b[0] - cp = array([x,y,z]) - if cp.ndim == 1: - return cp - else: - return cp.swapaxes(0,axisc) - - -#Use numarray's printing function -from arrayprint import array2string, get_printoptions, set_printoptions - -_typelessdata = [int_, float_, complex_] -if issubclass(intc, int): - _typelessdata.append(intc) - -if issubclass(longlong, int): - _typelessdata.append(longlong) - -def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): - if arr.size > 0 or arr.shape==(0,): - lst = array2string(arr, max_line_width, precision, suppress_small, - ', ', "array(") - else: # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(arr.shape),) - typeless = arr.dtype.type in _typelessdata - - if arr.__class__ is not ndarray: - cName= arr.__class__.__name__ - else: - cName = "array" - if typeless and arr.size: - return cName + "(%s)" % lst - else: - typename=arr.dtype.name - lf = '' - if issubclass(arr.dtype.type, flexible): - if arr.dtype.names: - typename = "%s" % str(arr.dtype) - else: - typename = "'%s'" % str(arr.dtype) - lf = '\n'+' '*len("array(") - return cName + "(%s, %sdtype=%s)" % (lst, lf, typename) - -def array_str(a, max_line_width=None, precision=None, suppress_small=None): - return array2string(a, max_line_width, precision, suppress_small, ' ', "", str) - -set_string_function = multiarray.set_string_function -set_string_function(array_str, 0) -set_string_function(array_repr, 1) - -little_endian = (sys.byteorder == 'little') - - -def indices(dimensions, dtype=int): - """Returns an array representing a grid of indices with row-only, and - column-only variation. - """ - dimensions = tuple(dimensions) - N = len(dimensions) - if N == 0: - return array([],dtype=dtype) - res = empty((N,)+dimensions, dtype=dtype) - for i, dim in enumerate(dimensions): - tmp = arange(dim,dtype=dtype) - tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1) - newdim = dimensions[:i] + (1,)+ dimensions[i+1:] - val = zeros(newdim, dtype) - add(tmp, val, res[i]) - return res - -def fromfunction(function, shape, **kwargs): - """Returns an array constructed by calling a function on a tuple of number - grids. - - The function should accept as many arguments as the length of shape and - work on array inputs. The shape argument is a sequence of numbers - indicating the length of the desired output for each axis. - - The function can also accept keyword arguments (except dtype), which will - be passed through fromfunction to the function itself. The dtype argument - (default float) determines the data-type of the index grid passed to the - function. - """ - dtype = kwargs.pop('dtype', float) - args = indices(shape, dtype=dtype) - return function(*args,**kwargs) - -def isscalar(num): - """Returns True if the type of num is a scalar type. - """ - if isinstance(num, generic): - return True - else: - return type(num) in ScalarType - -_lkup = { - '0':'0000', - '1':'0001', - '2':'0010', - '3':'0011', - '4':'0100', - '5':'0101', - '6':'0110', - '7':'0111', - '8':'1000', - '9':'1001', - 'a':'1010', - 'b':'1011', - 'c':'1100', - 'd':'1101', - 'e':'1110', - 'f':'1111', - 'A':'1010', - 'B':'1011', - 'C':'1100', - 'D':'1101', - 'E':'1110', - 'F':'1111', - 'L':''} - -def binary_repr(num, width=None): - """Return the binary representation of the input number as a string. - - This is equivalent to using base_repr with base 2, but about 25x - faster. - - For negative numbers, if width is not given, a - sign is added to the - front. If width is given, the two's complement of the number is - returned, with respect to that width. - """ - sign = '' - if num < 0: - if width is None: - sign = '-' - num = -num - else: - # replace num with its 2-complement - num = 2**width + num - elif num == 0: - return '0'*(width or 1) - ostr = hex(num) - bin = ''.join([_lkup[ch] for ch in ostr[2:]]) - bin = bin.lstrip('0') - if width is not None: - bin = bin.zfill(width) - return sign + bin - -def base_repr (number, base=2, padding=0): - """Return the representation of a number in the given base. - - Base can't be larger than 36. - """ - if number < 0: - raise ValueError("negative numbers not handled in base_repr") - if base > 36: - raise ValueError("bases greater than 36 not handled in base_repr") - - chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' - import math - lnb = math.log(base) - res = padding*chars[0] - if number == 0: - return res + chars[0] - exponent = int (math.log (number)/lnb) - while(exponent >= 0): - term = long(base)**exponent - lead_digit = int(number / term) - res += chars[lead_digit] - number -= term*lead_digit - exponent -= 1 - return res - -from cPickle import load, loads -_cload = load -_file = file - -def load(file): - """Wrapper around cPickle.load which accepts either a file-like object or - a filename. - """ - if isinstance(file, type("")): - file = _file(file,"rb") - return _cload(file) - -# Adapted from matplotlib - -def _getconv(dtype): - typ = dtype.type - if issubclass(typ, bool_): - return lambda x: bool(int(x)) - if issubclass(typ, integer): - return int - elif issubclass(typ, floating): - return float - elif issubclass(typ, complex): - return complex - else: - return str - - -def _string_like(obj): - try: obj + '' - except (TypeError, ValueError): return 0 - return 1 - -def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, - skiprows=0, usecols=None, unpack=False): - """ - Load ASCII data from fname into an array and return the array. - - The data must be regular, same number of values in every row - - fname can be a filename or a file handle. Support for gzipped files is - automatic, if the filename ends in .gz - - See scipy.loadmat to read and write matfiles. - - Example usage: - - X = loadtxt('test.dat') # data in two columns - t = X[:,0] - y = X[:,1] - - Alternatively, you can do the same with "unpack"; see below - - X = loadtxt('test.dat') # a matrix of data - x = loadtxt('test.dat') # a single column of data - - - dtype - the data-type of the resulting array. If this is a - record data-type, the the resulting array will be 1-d and each row will - be interpreted as an element of the array. The number of columns - used must match the number of fields in the data-type in this case. - - comments - the character used to indicate the start of a comment - in the file - - delimiter is a string-like character used to seperate values in the - file. If delimiter is unspecified or none, any whitespace string is - a separator. - - converters, if not None, is a dictionary mapping column number to - a function that will convert that column to a float. Eg, if - column 0 is a date string: converters={0:datestr2num} - - skiprows is the number of rows from the top to skip - - usecols, if not None, is a sequence of integer column indexes to - extract where 0 is the first column, eg usecols=(1,4,5) to extract - just the 2nd, 5th and 6th columns - - unpack, if True, will transpose the matrix allowing you to unpack - into named arguments on the left hand side - - t,y = load('test.dat', unpack=True) # for two column data - x,y,z = load('somefile.dat', usecols=(3,5,7), unpack=True) - - """ - - if _string_like(fname): - if fname.endswith('.gz'): - import gzip - fh = gzip.open(fname) - else: - fh = file(fname) - elif hasattr(fname, 'seek'): - fh = fname - else: - raise ValueError('fname must be a string or file handle') - X = [] - - dtype = multiarray.dtype(dtype) - defconv = _getconv(dtype) - converterseq = None - if converters is None: - converters = {} - if dtype.names is not None: - converterseq = [_getconv(dtype.fields[name][0]) \ - for name in dtype.names] - - for i,line in enumerate(fh): - if i>> seterr(over='raise') # doctest: +SKIP - {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'} - - >>> seterr(all='warn', over='raise') # doctest: +SKIP - {'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'} - - >>> int16(32000) * int16(3) # doctest: +SKIP - Traceback (most recent call last): - File "", line 1, in ? - FloatingPointError: overflow encountered in short_scalars - >>> seterr(all='ignore') # doctest: +SKIP - {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'} - - """ - - pyvals = umath.geterrobj() - old = geterr() - - if divide is None: divide = all or old['divide'] - if over is None: over = all or old['over'] - if under is None: under = all or old['under'] - if invalid is None: invalid = all or old['invalid'] - - maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + - (_errdict[over] << SHIFT_OVERFLOW ) + - (_errdict[under] << SHIFT_UNDERFLOW) + - (_errdict[invalid] << SHIFT_INVALID)) - - pyvals[1] = maskvalue - umath.seterrobj(pyvals) - return old - - -def geterr(): - """Get the current way of handling floating-point errors. - - Returns a dictionary with entries "divide", "over", "under", and - "invalid", whose values are from the strings - "ignore", "print", "log", "warn", "raise", and "call". - """ - maskvalue = umath.geterrobj()[1] - mask = 7 - res = {} - val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask - res['divide'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_OVERFLOW) & mask - res['over'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_UNDERFLOW) & mask - res['under'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_INVALID) & mask - res['invalid'] = _errdict_rev[val] - return res - -def setbufsize(size): - """Set the size of the buffer used in ufuncs. - """ - if size > 10e6: - raise ValueError, "Buffer size, %s, is too big." % size - if size < 5: - raise ValueError, "Buffer size, %s, is too small." %size - if size % 16 != 0: - raise ValueError, "Buffer size, %s, is not a multiple of 16." %size - - pyvals = umath.geterrobj() - old = getbufsize() - pyvals[0] = size - umath.seterrobj(pyvals) - return old - -def getbufsize(): - """Return the size of the buffer used in ufuncs. - """ - return umath.geterrobj()[0] - -def seterrcall(func): - """Set the callback function used when a floating-point error handler - is set to 'call' or the object with a write method for use when - the floating-point error handler is set to 'log' - - 'func' should be a function that takes two arguments. The first is - type of error ("divide", "over", "under", or "invalid"), and the second - is the status flag (= divide + 2*over + 4*under + 8*invalid). - - Returns the old handler. - """ - if func is not None and not callable(func): - if not hasattr(func, 'write') or not callable(func.write): - raise ValueError, "Only callable can be used as callback" - pyvals = umath.geterrobj() - old = geterrcall() - pyvals[2] = func - umath.seterrobj(pyvals) - return old - -def geterrcall(): - """Return the current callback function used on floating-point errors. - """ - return umath.geterrobj()[2] - -class _unspecified(object): - pass -_Unspecified = _unspecified() - -class errstate(object): - """with errstate(**state): --> operations in following block use given state. - - # Set error handling to known state. - >>> _ = seterr(invalid='raise', divide='raise', over='raise', under='ignore') - - |>> a = -arange(3) - |>> with errstate(invalid='ignore'): - ... print sqrt(a) - [ 0. -1.#IND -1.#IND] - |>> print sqrt(a.astype(complex)) - [ 0. +0.00000000e+00j 0. +1.00000000e+00j 0. +1.41421356e+00j] - |>> print sqrt(a) - Traceback (most recent call last): - ... - FloatingPointError: invalid encountered in sqrt - |>> with errstate(divide='ignore'): - ... print a/0 - [0 0 0] - |>> print a/0 - Traceback (most recent call last): - ... - FloatingPointError: divide by zero encountered in divide - - """ - # Note that we don't want to run the above doctests because they will fail - # without a from __future__ import with_statement - def __init__(self, **kwargs): - self.call = kwargs.pop('call',_Unspecified) - self.kwargs = kwargs - def __enter__(self): - self.oldstate = seterr(**self.kwargs) - if self.call is not _Unspecified: - self.oldcall = seterrcall(self.call) - def __exit__(self, *exc_info): - seterr(**self.oldstate) - if self.call is not _Unspecified: - seterrcall(self.oldcall) - -def _setdef(): - defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None] - umath.seterrobj(defval) - -# set the default values -_setdef() - -Inf = inf = infty = Infinity = PINF -nan = NaN = NAN -False_ = bool_(False) -True_ = bool_(True) - -import fromnumeric -from fromnumeric import * -extend_all(fromnumeric) diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py deleted file mode 100644 index 74906ac40..000000000 --- a/numpy/core/numerictypes.py +++ /dev/null @@ -1,488 +0,0 @@ -"""numerictypes: Define the numeric type objects - -This module is designed so 'from numerictypes import *' is safe. -Exported symbols include: - - Dictionary with all registered number types (including aliases): - typeDict - - Type objects (not all will be available, depends on platform): - see variable sctypes for which ones you have - - Bit-width names - - int8 int16 int32 int64 int128 - uint8 uint16 uint32 uint64 uint128 - float16 float32 float64 float96 float128 float256 - complex32 complex64 complex128 complex192 complex256 complex512 - - c-based names - - bool_ - - object_ - - void, str_, unicode_ - - byte, ubyte, - short, ushort - intc, uintc, - intp, uintp, - int_, uint, - longlong, ulonglong, - - single, csingle, - float_, complex_, - longfloat, clongfloat, - - As part of the type-hierarchy: xx -- is bit-width - - generic - +-> bool_ - +-> number - | integer - | signedinteger (intxx) - | byte - | short - | intc - | intp int0 - | int_ - | longlong - +-> unsignedinteger (uintxx) - | ubyte - | ushort - | uintc - | uintp uint0 - | uint_ - | ulonglong - +-> inexact - | +-> floating (floatxx) - | | single - | | float_ (double) - | | longfloat - | \-> complexfloating (complexxx) - | csingle (singlecomplex) - | complex_ (cfloat, cdouble) - | clongfloat (longcomplex) - +-> flexible - | character - | str_ (string_) - | unicode_ - | void - | - \-> object_ (not used much) - -$Id: numerictypes.py,v 1.17 2005/09/09 22:20:06 teoliphant Exp $ -""" - -# we add more at the bottom -__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes', - 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', - 'maximum_sctype', 'issctype', 'typecodes'] - -from numpy.core.multiarray import typeinfo, ndarray, array, empty, dtype -import types as _types - -# we don't export these for import *, but we do want them accessible -# as numerictypes.bool, etc. -from __builtin__ import bool, int, long, float, complex, object, unicode, str - -sctypeDict = {} # Contains all leaf-node scalar types with aliases -sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences -allTypes = {} # Collect the types we will add to the module here - -def _evalname(name): - k = 0 - for ch in name: - if ch in '0123456789': - break - k += 1 - try: - bits = int(name[k:]) - except ValueError: - bits = 0 - base = name[:k] - return base, bits - -def bitname(obj): - """Return a bit-width name for a given type object""" - name = obj.__name__ - base = '' - char = '' - try: - if name[-1] == '_': - newname = name[:-1] - else: - newname = name - info = typeinfo[newname.upper()] - assert(info[-1] == obj) # sanity check - bits = info[2] - - except KeyError: # bit-width name - base, bits = _evalname(name) - char = base[0] - - if name == 'bool_': - char = 'b' - base = 'bool' - elif name=='string_': - char = 'S' - base = 'string' - elif name=='unicode_': - char = 'U' - base = 'unicode' - elif name=='void': - char = 'V' - base = 'void' - elif name=='object_': - char = 'O' - base = 'object' - bits = 0 - - bytes = bits / 8 - - if char != '' and bytes != 0: - char = "%s%d" % (char, bytes) - - return base, bits, char - - -def _add_types(): - for a in typeinfo.keys(): - name = a.lower() - if isinstance(typeinfo[a], tuple): - typeobj = typeinfo[a][-1] - - # define C-name and insert typenum and typechar references also - allTypes[name] = typeobj - sctypeDict[name] = typeobj - sctypeDict[typeinfo[a][0]] = typeobj - sctypeDict[typeinfo[a][1]] = typeobj - - else: # generic class - allTypes[name] = typeinfo[a] -_add_types() - -def _add_aliases(): - for a in typeinfo.keys(): - name = a.lower() - if not isinstance(typeinfo[a], tuple): - continue - typeobj = typeinfo[a][-1] - # insert bit-width version for this class (if relevant) - base, bit, char = bitname(typeobj) - if base[-3:] == 'int' or char[0] in 'ui': continue - if base != '': - myname = "%s%d" % (base, bit) - if (name != 'longdouble' and name != 'clongdouble') or \ - myname not in allTypes.keys(): - allTypes[myname] = typeobj - sctypeDict[myname] = typeobj - if base == 'complex': - na_name = '%s%d' % (base.capitalize(), bit/2) - elif base == 'bool': - na_name = base.capitalize() - sctypeDict[na_name] = typeobj - else: - na_name = "%s%d" % (base.capitalize(), bit) - sctypeDict[na_name] = typeobj - sctypeNA[na_name] = typeobj - sctypeDict[na_name] = typeobj - sctypeNA[typeobj] = na_name - sctypeNA[typeinfo[a][0]] = na_name - if char != '': - sctypeDict[char] = typeobj - sctypeNA[char] = na_name -_add_aliases() - -# Integers handled so that -# The int32, int64 types should agree exactly with -# PyArray_INT32, PyArray_INT64 in C -# We need to enforce the same checking as is done -# in arrayobject.h where the order of getting a -# bit-width match is: -# long, longlong, int, short, char -# for int8, int16, int32, int64, int128 - -def _add_integer_aliases(): - _ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE'] - for ctype in _ctypes: - val = typeinfo[ctype] - bits = val[2] - charname = 'i%d' % (bits/8,) - ucharname = 'u%d' % (bits/8,) - intname = 'int%d' % bits - UIntname = 'UInt%d' % bits - Intname = 'Int%d' % bits - uval = typeinfo['U'+ctype] - typeobj = val[-1] - utypeobj = uval[-1] - if intname not in allTypes.keys(): - uintname = 'uint%d' % bits - allTypes[intname] = typeobj - allTypes[uintname] = utypeobj - sctypeDict[intname] = typeobj - sctypeDict[uintname] = utypeobj - sctypeDict[Intname] = typeobj - sctypeDict[UIntname] = utypeobj - sctypeDict[charname] = typeobj - sctypeDict[ucharname] = utypeobj - sctypeNA[Intname] = typeobj - sctypeNA[UIntname] = utypeobj - sctypeNA[charname] = typeobj - sctypeNA[ucharname] = utypeobj - sctypeNA[typeobj] = Intname - sctypeNA[utypeobj] = UIntname - sctypeNA[val[0]] = Intname - sctypeNA[uval[0]] = UIntname -_add_integer_aliases() - -# We use these later -void = allTypes['void'] -generic = allTypes['generic'] - -# -# Rework the Python names (so that float and complex and int are consistent -# with Python usage) -# -def _set_up_aliases(): - type_pairs = [('complex_', 'cdouble'), - ('int0', 'intp'), - ('uint0', 'uintp'), - ('single', 'float'), - ('csingle', 'cfloat'), - ('singlecomplex', 'cfloat'), - ('float_', 'double'), - ('intc', 'int'), - ('uintc', 'uint'), - ('int_', 'long'), - ('uint', 'ulong'), - ('cfloat', 'cdouble'), - ('longfloat', 'longdouble'), - ('clongfloat', 'clongdouble'), - ('longcomplex', 'clongdouble'), - ('bool_', 'bool'), - ('unicode_', 'unicode'), - ('str_', 'string'), - ('string_', 'string'), - ('object_', 'object')] - for alias, t in type_pairs: - allTypes[alias] = allTypes[t] - sctypeDict[alias] = sctypeDict[t] - # Remove aliases overriding python types and modules - for t in ['ulong', 'object', 'unicode', 'int', 'long', 'float', - 'complex', 'bool', 'string']: - try: - del allTypes[t] - del sctypeDict[t] - except KeyError: - pass -_set_up_aliases() - -# Now, construct dictionary to lookup character codes from types -_sctype2char_dict = {} -def _construct_char_code_lookup(): - for name in typeinfo.keys(): - tup = typeinfo[name] - if isinstance(tup, tuple): - if tup[0] not in ['p','P']: - _sctype2char_dict[tup[-1]] = tup[0] -_construct_char_code_lookup() - - -sctypes = {'int': [], - 'uint':[], - 'float':[], - 'complex':[], - 'others':[bool,object,str,unicode,void]} - -def _add_array_type(typename, bits): - try: - t = allTypes['%s%d' % (typename, bits)] - except KeyError: - pass - else: - sctypes[typename].append(t) - -def _set_array_types(): - ibytes = [1, 2, 4, 8, 16, 32, 64] - fbytes = [2, 4, 8, 10, 12, 16, 32, 64] - for bytes in ibytes: - bits = 8*bytes - _add_array_type('int', bits) - _add_array_type('uint', bits) - for bytes in fbytes: - bits = 8*bytes - _add_array_type('float', bits) - _add_array_type('complex', 2*bits) - _gi = dtype('p') - if _gi.type not in sctypes['int']: - indx = 0 - sz = _gi.itemsize - _lst = sctypes['int'] - while (indx < len(_lst) and sz >= _lst[indx](0).itemsize): - indx += 1 - sctypes['int'].insert(indx, _gi.type) - sctypes['uint'].insert(indx, dtype('P').type) -_set_array_types() - - -genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', - 'int32', 'uint32', 'int64', 'uint64', 'int128', - 'uint128', 'float16', - 'float32', 'float64', 'float80', 'float96', 'float128', - 'float256', - 'complex32', 'complex64', 'complex128', 'complex160', - 'complex192', 'complex256', 'complex512', 'object'] - -def maximum_sctype(t): - """returns the sctype of highest precision of the same general kind as 't'""" - g = obj2sctype(t) - if g is None: - return t - t = g - name = t.__name__ - base, bits = _evalname(name) - if bits == 0: - return t - else: - return sctypes[base][-1] - -_python_types = {int : 'int_', - float: 'float_', - complex: 'complex_', - bool: 'bool_', - str: 'string_', - unicode: 'unicode_', - _types.BufferType: 'void', - } -def _python_type(t): - """returns the type corresponding to a certain Python type""" - if not isinstance(t, _types.TypeType): - t = type(t) - return allTypes[_python_types.get(t, 'object_')] - -def issctype(rep): - """Determines whether the given object represents - a numeric array type.""" - if not isinstance(rep, (type, dtype)): - return False - try: - res = obj2sctype(rep) - if res and res != object_: - return True - return False - except: - return False - -def obj2sctype(rep, default=None): - try: - if issubclass(rep, generic): - return rep - except TypeError: - pass - if isinstance(rep, dtype): - return rep.type - if isinstance(rep, type): - return _python_type(rep) - if isinstance(rep, ndarray): - return rep.dtype.type - try: - res = dtype(rep) - except: - return default - return res.type - - -# This dictionary allows look up based on any alias for an array data-type -class _typedict(dict): - def __getitem__(self, obj): - return dict.__getitem__(self, obj2sctype(obj)) - -nbytes = _typedict() -_alignment = _typedict() -_maxvals = _typedict() -_minvals = _typedict() -def _construct_lookups(): - for name, val in typeinfo.iteritems(): - if not isinstance(val, tuple): - continue - obj = val[-1] - nbytes[obj] = val[2] / 8 - _alignment[obj] = val[3] - if (len(val) > 5): - _maxvals[obj] = val[4] - _minvals[obj] = val[5] - else: - _maxvals[obj] = None - _minvals[obj] = None - -_construct_lookups() - -def sctype2char(sctype): - sctype = obj2sctype(sctype) - if sctype is None: - raise ValueError, "unrecognized type" - return _sctype2char_dict[sctype] - -# Create dictionary of casting functions that wrap sequences -# indexed by type or type character - - -cast = _typedict() -ScalarType = [_types.IntType, _types.FloatType, - _types.ComplexType, _types.LongType, _types.BooleanType, - _types.StringType, _types.UnicodeType, _types.BufferType] -ScalarType.extend(_sctype2char_dict.keys()) -ScalarType = tuple(ScalarType) -for key in _sctype2char_dict.keys(): - cast[key] = lambda x, k=key : array(x, copy=False).astype(k) - - -_unicodesize = array('u','U1').itemsize - -# Create the typestring lookup dictionary -_typestr = _typedict() -for key in _sctype2char_dict.keys(): - if issubclass(key, allTypes['flexible']): - _typestr[key] = _sctype2char_dict[key] - else: - _typestr[key] = empty((1,),key).dtype.str[1:] - -# Make sure all typestrings are in sctypeDict -for key, val in _typestr.items(): - if val not in sctypeDict: - sctypeDict[val] = key - -# Add additional strings to the sctypeDict - -_toadd = ['int', 'float', 'complex', 'bool', 'object', 'string', ('str', allTypes['string_']), - 'unicode', 'object', ('a', allTypes['string_'])] - -for name in _toadd: - if isinstance(name, tuple): - sctypeDict[name[0]] = name[1] - else: - sctypeDict[name] = allTypes['%s_' % name] - -del _toadd, name - -# Now add the types we've determined to this module -for key in allTypes: - globals()[key] = allTypes[key] - __all__.append(key) - -del key - -typecodes = {'Character':'S1', - 'Integer':'bhilqp', - 'UnsignedInteger':'BHILQP', - 'Float':'fdg', - 'Complex':'FDG', - 'AllInteger':'bBhHiIlLqQpP', - 'AllFloat':'fdgFDG', - 'All':'?bhilqpBHILQPfdgFDGSUVO'} - -# backwards compatibility --- deprecated name -typeDict = sctypeDict -typeNA = sctypeNA diff --git a/numpy/core/records.py b/numpy/core/records.py deleted file mode 100644 index b86a71d7b..000000000 --- a/numpy/core/records.py +++ /dev/null @@ -1,588 +0,0 @@ -# All of the functions allow formats to be a dtype -__all__ = ['record', 'recarray', 'format_parser'] - -import numeric as sb -from defchararray import chararray -import numerictypes as nt -import types -import os -import sys - -ndarray = sb.ndarray - -_byteorderconv = {'b':'>', - 'l':'<', - 'n':'=', - 'B':'>', - 'L':'<', - 'N':'=', - 'S':'s', - 's':'s', - '>':'>', - '<':'<', - '=':'=', - '|':'|', - 'I':'|', - 'i':'|'} - -# formats regular expression -# allows multidimension spec with a tuple syntax in front -# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' -# are equally allowed - -numfmt = nt.typeDict -_typestr = nt._typestr - -def find_duplicate(list): - """Find duplication in a list, return a list of duplicated elements""" - dup = [] - for i in range(len(list)): - if (list[i] in list[i+1:]): - if (list[i] not in dup): - dup.append(list[i]) - return dup - -class format_parser: - def __init__(self, formats, names, titles, aligned=False, byteorder=None): - self._parseFormats(formats, aligned) - self._setfieldnames(names, titles) - self._createdescr(byteorder) - - def _parseFormats(self, formats, aligned=0): - """ Parse the field formats """ - - if formats is None: - raise ValueError, "Need formats argument" - if isinstance(formats, list): - if len(formats) < 2: - formats.append('') - formats = ','.join(formats) - dtype = sb.dtype(formats, aligned) - fields = dtype.fields - if fields is None: - dtype = sb.dtype([('f1', dtype)], aligned) - fields = dtype.fields - keys = dtype.names - self._f_formats = [fields[key][0] for key in keys] - self._offsets = [fields[key][1] for key in keys] - self._nfields = len(keys) - - def _setfieldnames(self, names, titles): - """convert input field names into a list and assign to the _names - attribute """ - - if (names): - if (type(names) in [types.ListType, types.TupleType]): - pass - elif (type(names) == types.StringType): - names = names.split(',') - else: - raise NameError, "illegal input names %s" % `names` - - self._names = [n.strip() for n in names[:self._nfields]] - else: - self._names = [] - - # if the names are not specified, they will be assigned as - # "f0, f1, f2,..." - # if not enough names are specified, they will be assigned as "f[n], - # f[n+1],..." etc. where n is the number of specified names..." - self._names += ['f%d' % i for i in range(len(self._names), - self._nfields)] - # check for redundant names - _dup = find_duplicate(self._names) - if _dup: - raise ValueError, "Duplicate field names: %s" % _dup - - if (titles): - self._titles = [n.strip() for n in titles[:self._nfields]] - else: - self._titles = [] - titles = [] - - if (self._nfields > len(titles)): - self._titles += [None]*(self._nfields-len(titles)) - - def _createdescr(self, byteorder): - descr = sb.dtype({'names':self._names, - 'formats':self._f_formats, - 'offsets':self._offsets, - 'titles':self._titles}) - if (byteorder is not None): - byteorder = _byteorderconv[byteorder[0]] - descr = descr.newbyteorder(byteorder) - - self._descr = descr - -class record(nt.void): - def __repr__(self): - return self.__str__() - - def __str__(self): - return str(self.item()) - - def __getattribute__(self, attr): - if attr in ['setfield', 'getfield', 'dtype']: - return nt.void.__getattribute__(self, attr) - try: - return nt.void.__getattribute__(self, attr) - except AttributeError: - pass - fielddict = nt.void.__getattribute__(self, 'dtype').fields - res = fielddict.get(attr, None) - if res: - obj = self.getfield(*res[:2]) - # if it has fields return a recarray, - # if it's a string ('SU') return a chararray - # otherwise return the object - try: - dt = obj.dtype - except AttributeError: - return obj - if dt.fields: - return obj.view(obj.__class__) - if dt.char in 'SU': - return obj.view(chararray) - return obj - else: - raise AttributeError, "'record' object has no "\ - "attribute '%s'" % attr - - - def __setattr__(self, attr, val): - if attr in ['setfield', 'getfield', 'dtype']: - raise AttributeError, "Cannot set '%s' attribute" % attr - fielddict = nt.void.__getattribute__(self, 'dtype').fields - res = fielddict.get(attr, None) - if res: - return self.setfield(val, *res[:2]) - else: - if getattr(self,attr,None): - return nt.void.__setattr__(self, attr, val) - else: - raise AttributeError, "'record' object has no "\ - "attribute '%s'" % attr - - def pprint(self): - # pretty-print all fields - names = self.dtype.names - maxlen = max([len(name) for name in names]) - rows = [] - fmt = '%% %ds: %%s' %maxlen - for name in names: - rows.append(fmt%(name, getattr(self, name))) - return "\n".join(rows) - -# The recarray is almost identical to a standard array (which supports -# named fields already) The biggest difference is that it can use -# attribute-lookup to find the fields and it is constructed using -# a record. - -# If byteorder is given it forces a particular byteorder on all -# the fields (and any subfields) - -class recarray(ndarray): - def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None, - formats=None, names=None, titles=None, - byteorder=None, aligned=False): - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - if buf is None: - self = ndarray.__new__(subtype, shape, (record, descr)) - else: - self = ndarray.__new__(subtype, shape, (record, descr), - buffer=buf, offset=offset, - strides=strides) - return self - - def __getattribute__(self, attr): - try: - return object.__getattribute__(self, attr) - except AttributeError: # attr must be a fieldname - pass - fielddict = ndarray.__getattribute__(self,'dtype').fields - try: - res = fielddict[attr][:2] - except (TypeError, KeyError): - raise AttributeError, "record array has no attribute %s" % attr - obj = self.getfield(*res) - # if it has fields return a recarray, otherwise return - # normal array - if obj.dtype.fields: - return obj - if obj.dtype.char in 'SU': - return obj.view(chararray) - return obj.view(ndarray) - -# Save the dictionary -# If the attr is a field name and not in the saved dictionary -# Undo any "setting" of the attribute and do a setfield -# Thus, you can't create attributes on-the-fly that are field names. - - def __setattr__(self, attr, val): - newattr = attr not in self.__dict__ - try: - ret = object.__setattr__(self, attr, val) - except: - fielddict = ndarray.__getattribute__(self,'dtype').fields or {} - if attr not in fielddict: - exctype, value = sys.exc_info()[:2] - raise exctype, value - else: - fielddict = ndarray.__getattribute__(self,'dtype').fields or {} - if attr not in fielddict: - return ret - if newattr: # We just added this one - try: # or this setattr worked on an internal - # attribute. - object.__delattr__(self, attr) - except: - return ret - try: - res = fielddict[attr][:2] - except (TypeError,KeyError): - raise AttributeError, "record array has no attribute %s" % attr - return self.setfield(val, *res) - - def __getitem__(self, indx): - obj = ndarray.__getitem__(self, indx) - if (isinstance(obj, ndarray) and obj.dtype.isbuiltin): - return obj.view(ndarray) - return obj - - def field(self, attr, val=None): - if isinstance(attr, int): - names = ndarray.__getattribute__(self,'dtype').names - attr = names[attr] - - fielddict = ndarray.__getattribute__(self,'dtype').fields - - res = fielddict[attr][:2] - - if val is None: - obj = self.getfield(*res) - if obj.dtype.fields: - return obj - if obj.dtype.char in 'SU': - return obj.view(chararray) - return obj.view(ndarray) - else: - return self.setfield(val, *res) - - def view(self, obj): - try: - if issubclass(obj, ndarray): - return ndarray.view(self, obj) - except TypeError: - pass - dtype = sb.dtype(obj) - if dtype.fields is None: - return self.__array__().view(dtype) - return ndarray.view(self, obj) - -def fromarrays(arrayList, dtype=None, shape=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """ create a record array from a (flat) list of arrays - - >>> x1=N.array([1,2,3,4]) - >>> x2=N.array(['a','dd','xyz','12']) - >>> x3=N.array([1.1,2,3,4]) - >>> r = fromarrays([x1,x2,x3],names='a,b,c') - >>> print r[1] - (2, 'dd', 2.0) - >>> x1[1]=34 - >>> r.a - array([1, 2, 3, 4]) - """ - - arrayList = [sb.asarray(x) for x in arrayList] - - if shape is None or shape == 0: - shape = arrayList[0].shape - - if isinstance(shape, int): - shape = (shape,) - - if formats is None and dtype is None: - # go through each object in the list to see if it is an ndarray - # and determine the formats. - formats = '' - for obj in arrayList: - if not isinstance(obj, ndarray): - raise ValueError, "item in the array list must be an ndarray." - formats += _typestr[obj.dtype.type] - if issubclass(obj.dtype.type, nt.flexible): - formats += `obj.itemsize` - formats += ',' - formats = formats[:-1] - - if dtype is not None: - descr = sb.dtype(dtype) - _names = descr.names - else: - parsed = format_parser(formats, names, titles, aligned, byteorder) - _names = parsed._names - descr = parsed._descr - - # Determine shape from data-type. - if len(descr) != len(arrayList): - raise ValueError, "mismatch between the number of fields "\ - "and the number of arrays" - - d0 = descr[0].shape - nn = len(d0) - if nn > 0: - shape = shape[:-nn] - - for k, obj in enumerate(arrayList): - nn = len(descr[k].shape) - testshape = obj.shape[:len(obj.shape)-nn] - if testshape != shape: - raise ValueError, "array-shape mismatch in array %d" % k - - _array = recarray(shape, descr) - - # populate the record array (makes a copy) - for i in range(len(arrayList)): - _array[_names[i]] = arrayList[i] - - return _array - -# shape must be 1-d if you use list of lists... -def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, - titles=None, aligned=False, byteorder=None): - """ create a recarray from a list of records in text form - - The data in the same field can be heterogeneous, they will be promoted - to the highest data type. This method is intended for creating - smaller record arrays. If used to create large array without formats - defined - - r=fromrecords([(2,3.,'abc')]*100000) - - it can be slow. - - If formats is None, then this will auto-detect formats. Use list of - tuples rather than list of lists for faster processing. - - >>> r=fromrecords([(456,'dbe',1.2),(2,'de',1.3)],names='col1,col2,col3') - >>> print r[0] - (456, 'dbe', 1.2) - >>> r.col1 - array([456, 2]) - >>> r.col2 - chararray(['dbe', 'de'], - dtype='|S3') - >>> import cPickle - >>> print cPickle.loads(cPickle.dumps(r)) - [(456, 'dbe', 1.2) (2, 'de', 1.3)] - """ - - nfields = len(recList[0]) - if formats is None and dtype is None: # slower - obj = sb.array(recList, dtype=object) - arrlist = [sb.array(obj[...,i].tolist()) for i in xrange(nfields)] - return fromarrays(arrlist, formats=formats, shape=shape, names=names, - titles=titles, aligned=aligned, byteorder=byteorder) - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - try: - retval = sb.array(recList, dtype = descr) - except TypeError: # list of lists instead of list of tuples - if (shape is None or shape == 0): - shape = len(recList) - if isinstance(shape, (int, long)): - shape = (shape,) - if len(shape) > 1: - raise ValueError, "Can only deal with 1-d array." - _array = recarray(shape, descr) - for k in xrange(_array.size): - _array[k] = tuple(recList[k]) - return _array - else: - if shape is not None and retval.shape != shape: - retval.shape = shape - - res = retval.view(recarray) - - res.dtype = sb.dtype((record, res.dtype)) - return res - - -def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """ create a (read-only) record array from binary data contained in - a string""" - - - if dtype is None and formats is None: - raise ValueError, "Must have dtype= or formats=" - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - itemsize = descr.itemsize - if (shape is None or shape == 0 or shape == -1): - shape = (len(datastring)-offset) / itemsize - - _array = recarray(shape, descr, buf=datastring, offset=offset) - return _array - -def get_remaining_size(fd): - try: - fn = fd.fileno() - except AttributeError: - return os.path.getsize(fd.name) - fd.tell() - st = os.fstat(fn) - size = st.st_size - fd.tell() - return size - -def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """Create an array from binary file data - - If file is a string then that file is opened, else it is assumed - to be a file object. - - >>> from tempfile import TemporaryFile - >>> a = N.empty(10,dtype='f8,i4,a5') - >>> a[5] = (0.5,10,'abcde') - >>> - >>> fd=TemporaryFile() - >>> a = a.newbyteorder('<') - >>> a.tofile(fd) - >>> - >>> fd.seek(0) - >>> r=fromfile(fd, formats='f8,i4,a5', shape=10, byteorder='<') - >>> print r[5] - (0.5, 10, 'abcde') - >>> r.shape - (10,) - """ - - if (shape is None or shape == 0): - shape = (-1,) - elif isinstance(shape, (int, long)): - shape = (shape,) - - name = 0 - if isinstance(fd, str): - name = 1 - fd = open(fd, 'rb') - if (offset > 0): - fd.seek(offset, 1) - size = get_remaining_size(fd) - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - itemsize = descr.itemsize - - shapeprod = sb.array(shape).prod() - shapesize = shapeprod*itemsize - if shapesize < 0: - shape = list(shape) - shape[ shape.index(-1) ] = size / -shapesize - shape = tuple(shape) - shapeprod = sb.array(shape).prod() - - nbytes = shapeprod*itemsize - - if nbytes > size: - raise ValueError( - "Not enough bytes left in file for specified shape and type") - - # create the array - _array = recarray(shape, descr) - nbytesread = fd.readinto(_array.data) - if nbytesread != nbytes: - raise IOError("Didn't read as many bytes as expected") - if name: - fd.close() - - return _array - -def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None, copy=True): - """Construct a record array from a wide-variety of objects. - """ - - if isinstance(obj, (type(None), str, file)) and (formats is None) \ - and (dtype is None): - raise ValueError("Must define formats (or dtype) if object is "\ - "None, string, or an open file") - - kwds = {} - if dtype is not None: - dtype = sb.dtype(dtype) - elif formats is not None: - dtype = format_parser(formats, names, titles, - aligned, byteorder)._descr - else: - kwds = {'formats': formats, - 'names' : names, - 'titles' : titles, - 'aligned' : aligned, - 'byteorder' : byteorder - } - - if obj is None: - if shape is None: - raise ValueError("Must define a shape if obj is None") - return recarray(shape, dtype, buf=obj, offset=offset, strides=strides) - elif isinstance(obj, str): - return fromstring(obj, dtype, shape=shape, offset=offset, **kwds) - - elif isinstance(obj, (list, tuple)): - if isinstance(obj[0], (tuple, list)): - return fromrecords(obj, dtype=dtype, shape=shape, **kwds) - else: - return fromarrays(obj, dtype=dtype, shape=shape, **kwds) - - elif isinstance(obj, recarray): - if dtype is not None and (obj.dtype != dtype): - new = obj.view(dtype) - else: - new = obj - if copy: - new = new.copy() - return new - - elif isinstance(obj, file): - return fromfile(obj, dtype=dtype, shape=shape, offset=offset) - - elif isinstance(obj, ndarray): - if dtype is not None and (obj.dtype != dtype): - new = obj.view(dtype) - else: - new = obj - if copy: - new = new.copy() - res = new.view(recarray) - if issubclass(res.dtype.type, nt.void): - res.dtype = sb.dtype((record, res.dtype)) - return res - - else: - interface = getattr(obj, "__array_interface__", None) - if interface is None or not isinstance(interface, dict): - raise ValueError("Unknown input type") - obj = sb.array(obj) - if dtype is not None and (obj.dtype != dtype): - obj = obj.view(dtype) - res = obj.view(recarray) - if issubclass(res.dtype.type, nt.void): - res.dtype = sb.dtype((record, res.dtype)) - return res diff --git a/numpy/core/setup.py b/numpy/core/setup.py deleted file mode 100644 index a8fd46911..000000000 --- a/numpy/core/setup.py +++ /dev/null @@ -1,347 +0,0 @@ -import imp -import os -import sys -from os.path import join -from numpy.distutils import log -from distutils.dep_util import newer - -FUNCTIONS_TO_CHECK = [ - ('expl', 'HAVE_LONGDOUBLE_FUNCS'), - ('expf', 'HAVE_FLOAT_FUNCS'), - ('log1p', 'HAVE_LOG1P'), - ('expm1', 'HAVE_EXPM1'), - ('asinh', 'HAVE_INVERSE_HYPERBOLIC'), - ('atanhf', 'HAVE_INVERSE_HYPERBOLIC_FLOAT'), - ('atanhl', 'HAVE_INVERSE_HYPERBOLIC_LONGDOUBLE'), - ('isnan', 'HAVE_ISNAN'), - ('isinf', 'HAVE_ISINF'), - ('rint', 'HAVE_RINT'), - ] - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration,dot_join - from numpy.distutils.system_info import get_info, default_lib_dirs - - config = Configuration('core',parent_package,top_path) - local_dir = config.local_path - codegen_dir = join(local_dir,'code_generators') - - generate_umath_py = join(codegen_dir,'generate_umath.py') - n = dot_join(config.name,'generate_umath') - generate_umath = imp.load_module('_'.join(n.split('.')), - open(generate_umath_py,'U'),generate_umath_py, - ('.py','U',1)) - - header_dir = 'include/numpy' # this is relative to config.path_in_package - - def generate_config_h(ext, build_dir): - target = join(build_dir,'config.h') - if newer(__file__,target): - config_cmd = config.get_config_cmd() - log.info('Generating %s',target) - tc = generate_testcode(target) - from distutils import sysconfig - python_include = sysconfig.get_python_inc() - python_h = join(python_include, 'Python.h') - if not os.path.isfile(python_h): - raise SystemError,\ - "Non-existing %s. Perhaps you need to install"\ - " python-dev|python-devel." % (python_h) - result = config_cmd.try_run(tc,include_dirs=[python_include], - library_dirs = default_lib_dirs) - if not result: - raise SystemError,"Failed to test configuration. "\ - "See previous error messages for more information." - - # Python 2.3 causes a segfault when - # trying to re-acquire the thread-state - # which is done in error-handling - # ufunc code. NPY_ALLOW_C_API and friends - # cause the segfault. So, we disable threading - # for now. - if sys.version[:5] < '2.4.2': - nosmp = 1 - else: - # Perhaps a fancier check is in order here. - # so that threads are only enabled if there - # are actually multiple CPUS? -- but - # threaded code can be nice even on a single - # CPU so that long-calculating code doesn't - # block. - try: - nosmp = os.environ['NPY_NOSMP'] - nosmp = 1 - except KeyError: - nosmp = 0 - if nosmp: moredefs = [('NPY_ALLOW_THREADS', '0')] - else: moredefs = [] - # - mathlibs = [] - tc = testcode_mathlib() - mathlibs_choices = [[],['m'],['cpml']] - mathlib = os.environ.get('MATHLIB') - if mathlib: - mathlibs_choices.insert(0,mathlib.split(',')) - for libs in mathlibs_choices: - if config_cmd.try_run(tc,libraries=libs): - mathlibs = libs - break - else: - raise EnvironmentError("math library missing; rerun " - "setup.py after setting the " - "MATHLIB env variable") - ext.libraries.extend(mathlibs) - moredefs.append(('MATHLIB',','.join(mathlibs))) - - def check_func(func_name): - return config_cmd.check_func(func_name, - libraries=mathlibs, decl=False, - headers=['math.h']) - - for func_name, defsymbol in FUNCTIONS_TO_CHECK: - if check_func(func_name): - moredefs.append(defsymbol) - - if sys.platform == 'win32': - moredefs.append('NPY_NO_SIGNAL') - - if sys.platform=='win32' or os.name=='nt': - from distutils.msvccompiler import get_build_architecture - a = get_build_architecture() - print 'BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % (a, os.name, sys.platform) - if a == 'AMD64': - moredefs.append('DISTUTILS_USE_SDK') - - if sys.version[:3] < '2.4': - if config_cmd.check_func('strtod', decl=False, - headers=['stdlib.h']): - moredefs.append(('PyOS_ascii_strtod', 'strtod')) - - target_f = open(target,'a') - for d in moredefs: - if isinstance(d,str): - target_f.write('#define %s\n' % (d)) - else: - target_f.write('#define %s %s\n' % (d[0],d[1])) - if not nosmp: # default is to use WITH_THREAD - target_f.write('#ifdef WITH_THREAD\n#define NPY_ALLOW_THREADS 1\n#else\n#define NPY_ALLOW_THREADS 0\n#endif\n') - target_f.close() - print 'File:',target - target_f = open(target) - print target_f.read() - target_f.close() - print 'EOF' - else: - mathlibs = [] - target_f = open(target) - for line in target_f.readlines(): - s = '#define MATHLIB' - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - target_f.close() - - ext.libraries.extend(mathlibs) - - incl_dir = os.path.dirname(target) - if incl_dir not in config.numpy_include_dirs: - config.numpy_include_dirs.append(incl_dir) - - config.add_data_files((header_dir,target)) - return target - - def generate_api_func(module_name): - def generate_api(ext, build_dir): - script = join(codegen_dir, module_name + '.py') - sys.path.insert(0, codegen_dir) - try: - m = __import__(module_name) - log.info('executing %s', script) - h_file, c_file, doc_file = m.generate_api(build_dir) - finally: - del sys.path[0] - config.add_data_files((header_dir, h_file), - (header_dir, doc_file)) - return (h_file,) - return generate_api - - generate_array_api = generate_api_func('generate_array_api') - generate_ufunc_api = generate_api_func('generate_ufunc_api') - - def generate_umath_c(ext,build_dir): - target = join(build_dir,'__umath_generated.c') - script = generate_umath_py - if newer(script,target): - f = open(target,'w') - f.write(generate_umath.make_code(generate_umath.defdict, - generate_umath.__file__)) - f.close() - return [] - - config.add_data_files('include/numpy/*.h') - config.add_include_dirs('src') - - config.numpy_include_dirs.extend(config.paths('include')) - - deps = [join('src','arrayobject.c'), - join('src','arraymethods.c'), - join('src','scalartypes.inc.src'), - join('src','arraytypes.inc.src'), - join('src','_signbit.c'), - join('src','_isnan.c'), - join('src','ucsnarrow.c'), - join('include','numpy','*object.h'), - 'include/numpy/fenv/fenv.c', - 'include/numpy/fenv/fenv.h', - join(codegen_dir,'genapi.py'), - join(codegen_dir,'*.txt') - ] - - # Don't install fenv unless we need them. - if sys.platform == 'cygwin': - config.add_data_dir('include/numpy/fenv') - - config.add_extension('multiarray', - sources = [join('src','multiarraymodule.c'), - generate_config_h, - generate_array_api, - join('src','scalartypes.inc.src'), - join('src','arraytypes.inc.src'), - join(codegen_dir,'generate_array_api.py'), - join('*.py') - ], - depends = deps, - ) - - config.add_extension('umath', - sources = [generate_config_h, - join('src','umathmodule.c.src'), - generate_umath_c, - generate_ufunc_api, - join('src','scalartypes.inc.src'), - join('src','arraytypes.inc.src'), - ], - depends = [join('src','ufuncobject.c'), - generate_umath_py, - join(codegen_dir,'generate_ufunc_api.py'), - ]+deps, - ) - - config.add_extension('_sort', - sources=[join('src','_sortmodule.c.src'), - generate_config_h, - generate_array_api, - ], - ) - - config.add_extension('scalarmath', - sources=[join('src','scalarmathmodule.c.src'), - generate_config_h, - generate_array_api, - generate_ufunc_api], - ) - - # Configure blasdot - blas_info = get_info('blas_opt',0) - #blas_info = {} - def get_dotblas_sources(ext, build_dir): - if blas_info: - if ('NO_ATLAS_INFO',1) in blas_info.get('define_macros',[]): - return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient. - return ext.depends[:1] - return None # no extension module will be built - - config.add_extension('_dotblas', - sources = [get_dotblas_sources], - depends=[join('blasdot','_dotblas.c'), - join('blasdot','cblas.h'), - ], - include_dirs = ['blasdot'], - extra_info = blas_info - ) - - - config.add_data_dir('tests') - config.make_svn_version_py() - - return config - -def testcode_mathlib(): - return """\ -/* check whether libm is broken */ -#include -int main(int argc, char *argv[]) -{ - return exp(-720.) > 1.0; /* typically an IEEE denormal */ -} -""" - -import sys -def generate_testcode(target): - if sys.platform == 'win32': - target = target.replace('\\','\\\\') - testcode = [r''' -#include -#include -#include - -int main(int argc, char **argv) -{ - - FILE *fp; - - fp = fopen("'''+target+'''","w"); - '''] - - c_size_test = r''' -#ifndef %(sz)s - fprintf(fp,"#define %(sz)s %%d\n", sizeof(%(type)s)); -#else - fprintf(fp,"/* #define %(sz)s %%d */\n", %(sz)s); -#endif -''' - for sz, t in [('SIZEOF_SHORT', 'short'), - ('SIZEOF_INT', 'int'), - ('SIZEOF_LONG', 'long'), - ('SIZEOF_FLOAT', 'float'), - ('SIZEOF_DOUBLE', 'double'), - ('SIZEOF_LONG_DOUBLE', 'long double'), - ('SIZEOF_PY_INTPTR_T', 'Py_intptr_t'), - ]: - testcode.append(c_size_test % {'sz' : sz, 'type' : t}) - - testcode.append('#ifdef PY_LONG_LONG') - testcode.append(c_size_test % {'sz' : 'SIZEOF_LONG_LONG', - 'type' : 'PY_LONG_LONG'}) - testcode.append(c_size_test % {'sz' : 'SIZEOF_PY_LONG_LONG', - 'type' : 'PY_LONG_LONG'}) - - - testcode.append(r''' -#else - fprintf(fp, "/* PY_LONG_LONG not defined */\n"); -#endif -#ifndef CHAR_BIT - { - unsigned char var = 2; - int i=0; - while (var >= 2) { - var = var << 1; - i++; - } - fprintf(fp,"#define CHAR_BIT %d\n", i+1); - } -#else - fprintf(fp, "/* #define CHAR_BIT %d */\n", CHAR_BIT); -#endif - fclose(fp); - return 0; -} -''') - testcode = '\n'.join(testcode) - return testcode - -if __name__=='__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/core/src/_isnan.c b/numpy/core/src/_isnan.c deleted file mode 100644 index bff6e0a49..000000000 --- a/numpy/core/src/_isnan.c +++ /dev/null @@ -1,46 +0,0 @@ -/* Adapted from cephes */ - -static int -isnan(double x) -{ - union - { - double d; - unsigned short s[4]; - unsigned int i[2]; - } u; - - u.d = x; - -#if SIZEOF_INT == 4 - -#ifdef WORDS_BIGENDIAN /* defined in pyconfig.h */ - if( ((u.i[0] & 0x7ff00000) == 0x7ff00000) - && (((u.i[0] & 0x000fffff) != 0) || (u.i[1] != 0))) - return 1; -#else - if( ((u.i[1] & 0x7ff00000) == 0x7ff00000) - && (((u.i[1] & 0x000fffff) != 0) || (u.i[0] != 0))) - return 1; -#endif - -#else /* SIZEOF_INT != 4 */ - -#ifdef WORDS_BIGENDIAN - if( (u.s[0] & 0x7ff0) == 0x7ff0) - { - if( ((u.s[0] & 0x000f) | u.s[1] | u.s[2] | u.s[3]) != 0 ) - return 1; - } -#else - if( (u.s[3] & 0x7ff0) == 0x7ff0) - { - if( ((u.s[3] & 0x000f) | u.s[2] | u.s[1] | u.s[0]) != 0 ) - return 1; - } -#endif - -#endif /* SIZEOF_INT */ - - return 0; -} diff --git a/numpy/core/src/_signbit.c b/numpy/core/src/_signbit.c deleted file mode 100644 index 2be3649fd..000000000 --- a/numpy/core/src/_signbit.c +++ /dev/null @@ -1,32 +0,0 @@ -/* Adapted from cephes */ - -static int -signbit(double x) -{ - union - { - double d; - short s[4]; - int i[2]; - } u; - - u.d = x; - -#if SIZEOF_INT == 4 - -#ifdef WORDS_BIGENDIAN /* defined in pyconfig.h */ - return u.i[0] < 0; -#else - return u.i[1] < 0; -#endif - -#else /* SIZEOF_INT != 4 */ - -#ifdef WORDS_BIGENDIAN - return u.s[0] < 0; -#else - return u.s[3] < 0; -#endif - -#endif /* SIZEOF_INT */ -} diff --git a/numpy/core/src/_sortmodule.c.src b/numpy/core/src/_sortmodule.c.src deleted file mode 100644 index 52cfe6219..000000000 --- a/numpy/core/src/_sortmodule.c.src +++ /dev/null @@ -1,490 +0,0 @@ -/* -*- c -*- */ - -/* The purpose of this module is to add faster sort functions - that are type-specific. This is done by altering the - function table for the builtin descriptors. - - These sorting functions are copied almost directly from numarray - with a few modifications (complex comparisons compare the imaginary - part if the real parts are equal, for example), and the names - are changed. - - The original sorting code is due to Charles R. Harris who wrote - it for numarray. -*/ - -/* Quick sort is usually the fastest, but the worst case scenario can - be slower than the merge and heap sorts. The merge sort requires - extra memory and so for large arrays may not be useful. - - The merge sort is *stable*, meaning that equal components - are unmoved from their entry versions, so it can be used to - implement lexigraphic sorting on multiple keys. - - The heap sort is included for completeness. -*/ - - -#include "Python.h" -#include "numpy/noprefix.h" - -#define PYA_QS_STACK 100 -#define SMALL_QUICKSORT 15 -#define SMALL_MERGESORT 20 -#define SWAP(a,b) {SWAP_temp = (b); (b)=(a); (a) = SWAP_temp;} -#define STDC_LT(a,b) ((a) < (b)) -#define STDC_LE(a,b) ((a) <= (b)) -#define STDC_EQ(a,b) ((a) == (b)) -#define NUMC_LT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag): ((p).real < (q).real))) -#define NUMC_LE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag): ((p).real <= (q).real))) -#define NUMC_EQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define STRING_LT(pa, pb, len) (strncmp(pa, pb, len) < 0) -#define STRING_LE(pa, pb, len) (strncmp(pa, pb, len) <= 0) -#define STRING_EQ(pa, pb, len) (strncmp(pa, pb, len) == 0) -#define UNICODE_LT(pa, pb, len) (PyArray_CompareUCS4(pa, pb, len) < 0) -#define UNICODE_LE(pa, pb, len) (PyArray_CompareUCS4(pa, pb, len) <= 0) -#define UNICODE_EQ(pa, pb, len) (PyArray_CompareUCS4(pa, pb, len) == 0) - - -/**begin repeat - #TYPE=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE# - #type=Bool,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #lessthan=STDC_LT*14,NUMC_LT*3# - #lessequal=STDC_LE*14,NUMC_LE*3# -**/ -static int -@TYPE@_quicksort(@type@ *start, intp num, void *unused) -{ - @type@ *pl = start; - @type@ *pr = start + num - 1; - @type@ vp, SWAP_temp; - @type@ *stack[PYA_QS_STACK], **sptr = stack, *pm, *pi, *pj, *pt; - - for(;;) { - while ((pr - pl) > SMALL_QUICKSORT) { - /* quicksort partition */ - pm = pl + ((pr - pl) >> 1); - if (@lessthan@(*pm,*pl)) SWAP(*pm,*pl); - if (@lessthan@(*pr,*pm)) SWAP(*pr,*pm); - if (@lessthan@(*pm,*pl)) SWAP(*pm,*pl); - vp = *pm; - pi = pl; - pj = pr - 1; - SWAP(*pm,*pj); - for(;;) { - do ++pi; while (@lessthan@(*pi,vp)); - do --pj; while (@lessthan@(vp,*pj)); - if (pi >= pj) break; - SWAP(*pi,*pj); - } - SWAP(*pi,*(pr-1)); - /* push largest partition on stack */ - if (pi - pl < pr - pi) { - *sptr++ = pi + 1; - *sptr++ = pr; - pr = pi - 1; - } - else { - *sptr++ = pl; - *sptr++ = pi - 1; - pl = pi + 1; - } - } - - /* insertion sort */ - for(pi = pl + 1; pi <= pr; ++pi) { - vp = *pi; - for(pj = pi, pt = pi - 1; pj > pl && @lessthan@(vp, *pt);) { - *pj-- = *pt--; - } - *pj = vp; - } - if (sptr == stack) break; - pr = *(--sptr); - pl = *(--sptr); - } - - return 0; -} - -static int -@TYPE@_aquicksort(@type@ *v, intp* tosort, intp num, void *unused) -{ - @type@ vp; - intp *pl, *pr, SWAP_temp; - intp *stack[PYA_QS_STACK], **sptr=stack, *pm, *pi, *pj, *pt, vi; - - pl = tosort; - pr = tosort + num - 1; - - for(;;) { - while ((pr - pl) > SMALL_QUICKSORT) { - /* quicksort partition */ - pm = pl + ((pr - pl) >> 1); - if (@lessthan@(v[*pm],v[*pl])) SWAP(*pm,*pl); - if (@lessthan@(v[*pr],v[*pm])) SWAP(*pr,*pm); - if (@lessthan@(v[*pm],v[*pl])) SWAP(*pm,*pl); - vp = v[*pm]; - pi = pl; - pj = pr - 1; - SWAP(*pm,*pj); - for(;;) { - do ++pi; while (@lessthan@(v[*pi],vp)); - do --pj; while (@lessthan@(vp,v[*pj])); - if (pi >= pj) break; - SWAP(*pi,*pj); - } - SWAP(*pi,*(pr-1)); - /* push largest partition on stack */ - if (pi - pl < pr - pi) { - *sptr++ = pi + 1; - *sptr++ = pr; - pr = pi - 1; - } - else { - *sptr++ = pl; - *sptr++ = pi - 1; - pl = pi + 1; - } - } - - /* insertion sort */ - for(pi = pl + 1; pi <= pr; ++pi) { - vi = *pi; - vp = v[vi]; - for(pj = pi, pt = pi - 1; pj > pl && @lessthan@(vp, v[*pt]);) { - *pj-- = *pt--; - } - *pj = vi; - } - if (sptr == stack) break; - pr = *(--sptr); - pl = *(--sptr); - } - - return 0; -} - - -static int -@TYPE@_heapsort(@type@ *start, intp n, void *unused) -{ - @type@ tmp, *a; - intp i,j,l; - - /* The array needs to be offset by one for heapsort indexing */ - a = start - 1; - - for (l = n>>1; l > 0; --l) { - tmp = a[l]; - for (i = l, j = l<<1; j <= n;) { - if (j < n && @lessthan@(a[j], a[j+1])) - j += 1; - if (@lessthan@(tmp, a[j])) { - a[i] = a[j]; - i = j; - j += j; - } - else - break; - } - a[i] = tmp; - } - - for (; n > 1;) { - tmp = a[n]; - a[n] = a[1]; - n -= 1; - for (i = 1, j = 2; j <= n;) { - if (j < n && @lessthan@(a[j], a[j+1])) - j++; - if (@lessthan@(tmp, a[j])) { - a[i] = a[j]; - i = j; - j += j; - } - else - break; - } - a[i] = tmp; - } - - return 0; -} - -static int -@TYPE@_aheapsort(@type@ *v, intp *tosort, intp n, void *unused) -{ - intp *a, i,j,l, tmp; - /* The arrays need to be offset by one for heapsort indexing */ - a = tosort - 1; - - for (l = n>>1; l > 0; --l) { - tmp = a[l]; - for (i = l, j = l<<1; j <= n;) { - if (j < n && @lessthan@(v[a[j]], v[a[j+1]])) - j += 1; - if (@lessthan@(v[tmp], v[a[j]])) { - a[i] = a[j]; - i = j; - j += j; - } - else - break; - } - a[i] = tmp; - } - - for (; n > 1;) { - tmp = a[n]; - a[n] = a[1]; - n -= 1; - for (i = 1, j = 2; j <= n;) { - if (j < n && @lessthan@(v[a[j]], v[a[j+1]])) - j++; - if (@lessthan@(v[tmp], v[a[j]])) { - a[i] = a[j]; - i = j; - j += j; - } - else - break; - } - a[i] = tmp; - } - - return 0; -} - -static void -@TYPE@_mergesort0(@type@ *pl, @type@ *pr, @type@ *pw) -{ - @type@ vp, *pi, *pj, *pk, *pm; - - if (pr - pl > SMALL_MERGESORT) { - /* merge sort */ - pm = pl + ((pr - pl + 1)>>1); - @TYPE@_mergesort0(pl,pm-1,pw); - @TYPE@_mergesort0(pm,pr,pw); - for(pi = pw, pj = pl; pj < pm; ++pi, ++pj) { - *pi = *pj; - } - for(pk = pw, pm = pl; pk < pi && pj <= pr; ++pm) { - if (@lessequal@(*pk,*pj)) { - *pm = *pk; - ++pk; - } - else { - *pm = *pj; - ++pj; - } - } - for(; pk < pi; ++pm, ++pk) { - *pm = *pk; - } - } - else { - /* insertion sort */ - for(pi = pl + 1; pi <= pr; ++pi) { - vp = *pi; - for(pj = pi, pk = pi - 1; pj > pl && @lessthan@(vp, *pk); --pj, --pk) { - *pj = *pk; - } - *pj = vp; - } - } -} - -static int -@TYPE@_mergesort(@type@ *start, intp num, void *unused) -{ - @type@ *pl, *pr, *pw; - - pl = start; pr = pl + num - 1; - pw = (@type@ *) PyDataMem_NEW(((1+num/2))*sizeof(@type@)); - - if (!pw) { - PyErr_NoMemory(); - return -1; - } - - @TYPE@_mergesort0(pl, pr, pw); - PyDataMem_FREE(pw); - - return 0; -} - -static void -@TYPE@_amergesort0(intp *pl, intp *pr, @type@ *v, intp *pw) -{ - @type@ vp; - intp vi, *pi, *pj, *pk, *pm; - - if (pr - pl > SMALL_MERGESORT) { - /* merge sort */ - pm = pl + ((pr - pl + 1)>>1); - @TYPE@_amergesort0(pl,pm-1,v,pw); - @TYPE@_amergesort0(pm,pr,v,pw); - for(pi = pw, pj = pl; pj < pm; ++pi, ++pj) { - *pi = *pj; - } - for(pk = pw, pm = pl; pk < pi && pj <= pr; ++pm) { - if (@lessequal@(v[*pk],v[*pj])) { - *pm = *pk; - ++pk; - } - else { - *pm = *pj; - ++pj; - } - } - for(; pk < pi; ++pm, ++pk) { - *pm = *pk; - } - } - else { - /* insertion sort */ - for(pi = pl + 1; pi <= pr; ++pi) { - vi = *pi; - vp = v[vi]; - for(pj = pi, pk = pi - 1; pj > pl && @lessthan@(vp, v[*pk]); --pj, --pk) { - *pj = *pk; - } - *pj = vi; - } - } -} - -static int -@TYPE@_amergesort(@type@ *v, intp *tosort, intp num, void *unused) -{ - intp *pl, *pr, *pw; - - pl = tosort; pr = pl + num - 1; - pw = PyDimMem_NEW((1+num/2)); - - if (!pw) { - PyErr_NoMemory(); - return -1; - } - - @TYPE@_amergesort0(pl, pr, v, pw); - PyDimMem_FREE(pw); - - return 0; -} -/**end repeat**/ - -/**begin repeat - #TYPE=STRING, UNICODE# - #type=char, PyArray_UCS4# - #lessthan=STRING_LT, UNICODE_LT# - #lessequal=STRING_LE, UNICODE_LE# -**/ -static void -@TYPE@_amergesort0(intp *pl, intp *pr, @type@ *v, intp *pw, int len) -{ - @type@ *vp; - intp vi, *pi, *pj, *pk, *pm; - - if (pr - pl > SMALL_MERGESORT) { - /* merge sort */ - pm = pl + ((pr - pl + 1)>>1); - @TYPE@_amergesort0(pl,pm-1,v,pw,len); - @TYPE@_amergesort0(pm,pr,v,pw,len); - for(pi = pw, pj = pl; pj < pm;) { - *pi++ = *pj++; - } - for(pk = pw, pm = pl; pk < pi && pj <= pr;) { - if (@lessequal@(v + (*pk)*len, v + (*pj)*len, len)) { - *pm++ = *pk++; - } else { - *pm++ = *pj++; - } - } - while(pk < pi) { - *pm++ = *pk++; - } - } else { - /* insertion sort */ - for(pi = pl + 1; pi <= pr; ++pi) { - vi = *pi; - vp = v + vi*len; - pj = pi; - pk = pi -1; - for(; pj > pl && @lessthan@(vp, v + (*pk)*len, len);) { - *pj-- = *pk--; - } - *pj = vi; - } - } -} - -static int -@TYPE@_amergesort(@type@ *v, intp *tosort, intp num, PyArrayObject *arr) -{ - intp *pl, *pr, *pw; - int elsize, chars; - - elsize = arr->descr->elsize; - - chars = elsize / sizeof(@type@); - - pl = tosort; pr = pl + num - 1; - pw = PyDimMem_NEW((1+num/2)); - - if (!pw) { - PyErr_NoMemory(); - return -1; - } - - @TYPE@_amergesort0(pl, pr, v, pw, chars); - PyDimMem_FREE(pw); - - return 0; -} -/**end repeat**/ - -static void -add_sortfuncs(void) -{ - PyArray_Descr *descr; - - /**begin repeat - #TYPE=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE# - **/ - descr = PyArray_DescrFromType(PyArray_@TYPE@); - descr->f->sort[PyArray_QUICKSORT] = \ - (PyArray_SortFunc *)@TYPE@_quicksort; - descr->f->argsort[PyArray_QUICKSORT] = \ - (PyArray_ArgSortFunc *)@TYPE@_aquicksort; - descr->f->sort[PyArray_HEAPSORT] = \ - (PyArray_SortFunc *)@TYPE@_heapsort; - descr->f->argsort[PyArray_HEAPSORT] = \ - (PyArray_ArgSortFunc *)@TYPE@_aheapsort; - descr->f->sort[PyArray_MERGESORT] = \ - (PyArray_SortFunc *)@TYPE@_mergesort; - descr->f->argsort[PyArray_MERGESORT] = \ - (PyArray_ArgSortFunc *)@TYPE@_amergesort; - /**end repeat**/ - - descr = PyArray_DescrFromType(PyArray_STRING); - descr->f->argsort[PyArray_MERGESORT] = \ - (PyArray_ArgSortFunc *)STRING_amergesort; - descr = PyArray_DescrFromType(PyArray_UNICODE); - descr->f->argsort[PyArray_MERGESORT] = \ - (PyArray_ArgSortFunc *)UNICODE_amergesort; -} - -static struct PyMethodDef methods[] = { - {NULL, NULL, 0} -}; - -PyMODINIT_FUNC -init_sort(void) { - - Py_InitModule("_sort", methods); - - import_array(); - add_sortfuncs(); -} diff --git a/numpy/core/src/arraymethods.c b/numpy/core/src/arraymethods.c deleted file mode 100644 index 57c879426..000000000 --- a/numpy/core/src/arraymethods.c +++ /dev/null @@ -1,1942 +0,0 @@ -/* Should only be used if x is known to be an nd-array */ -#define _ARET(x) PyArray_Return((PyArrayObject *)(x)) - -static PyObject * -array_take(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int dimension=MAX_DIMS; - PyObject *indices; - PyArrayObject *out=NULL; - NPY_CLIPMODE mode=NPY_RAISE; - static char *kwlist[] = {"indices", "axis", "out", "mode", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O&", kwlist, - &indices, PyArray_AxisConverter, - &dimension, - PyArray_OutputConverter, - &out, - PyArray_ClipmodeConverter, - &mode)) - return NULL; - - return _ARET(PyArray_TakeFrom(self, indices, dimension, out, mode)); -} - -static PyObject * -array_fill(PyArrayObject *self, PyObject *args) -{ - PyObject *obj; - if (!PyArg_ParseTuple(args, "O", &obj)) - return NULL; - if (PyArray_FillWithScalar(self, obj) < 0) return NULL; - Py_INCREF(Py_None); - return Py_None; -} - -static PyObject * -array_put(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *indices, *values; - NPY_CLIPMODE mode=NPY_RAISE; - static char *kwlist[] = {"indices", "values", "mode", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O&", kwlist, - &indices, &values, - PyArray_ClipmodeConverter, - &mode)) - return NULL; - return PyArray_PutTo(self, values, indices, mode); -} - -static PyObject * -array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyArray_Dims newshape; - PyObject *ret; - PyArray_ORDER order=PyArray_CORDER; - int n; - - if (kwds != NULL) { - PyObject *ref; - ref = PyDict_GetItemString(kwds, "order"); - if (ref == NULL) { - PyErr_SetString(PyExc_TypeError, - "invalid keyword argument"); - return NULL; - } - if ((PyArray_OrderConverter(ref, &order) == PY_FAIL)) - return NULL; - } - - n = PyTuple_Size(args); - if (n <= 1) { - if (PyTuple_GET_ITEM(args, 0) == Py_None) - return PyArray_View(self, NULL, NULL); - if (!PyArg_ParseTuple(args, "O&", PyArray_IntpConverter, - &newshape)) return NULL; - } - else { - if (!PyArray_IntpConverter(args, &newshape)) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "invalid shape"); - } - goto fail; - } - } - ret = PyArray_Newshape(self, &newshape, order); - PyDimMem_FREE(newshape.ptr); - return ret; - - fail: - PyDimMem_FREE(newshape.ptr); - return NULL; -} - -static PyObject * -array_squeeze(PyArrayObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) return NULL; - return PyArray_Squeeze(self); -} - -static PyObject * -array_view(PyArrayObject *self, PyObject *args) -{ - PyObject *otype=NULL; - PyArray_Descr *type=NULL; - - if (!PyArg_ParseTuple(args, "|O", &otype)) return NULL; - - if (otype) { - if (PyType_Check(otype) && \ - PyType_IsSubtype((PyTypeObject *)otype, - &PyArray_Type)) { - return PyArray_View(self, NULL, - (PyTypeObject *)otype); - } - else { - if (PyArray_DescrConverter(otype, &type) == PY_FAIL) - return NULL; - } - } - return PyArray_View(self, type, NULL); -} - -static PyObject * -array_argmax(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, - &axis, - PyArray_OutputConverter, - &out)) - return NULL; - - return _ARET(PyArray_ArgMax(self, axis, out)); -} - -static PyObject * -array_argmin(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, - &axis, - PyArray_OutputConverter, - &out)) - return NULL; - - return _ARET(PyArray_ArgMin(self, axis, out)); -} - -static PyObject * -array_max(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, - &axis, - PyArray_OutputConverter, - &out)) - return NULL; - - return PyArray_Max(self, axis, out); -} - -static PyObject * -array_ptp(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, - &axis, - PyArray_OutputConverter, - &out)) - return NULL; - - return PyArray_Ptp(self, axis, out); -} - - -static PyObject * -array_min(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, - &axis, - PyArray_OutputConverter, - &out)) - return NULL; - - return PyArray_Min(self, axis, out); -} - -static PyObject * -array_swapaxes(PyArrayObject *self, PyObject *args) -{ - int axis1, axis2; - - if (!PyArg_ParseTuple(args, "ii", &axis1, &axis2)) return NULL; - - return PyArray_SwapAxes(self, axis1, axis2); -} - - -/* steals typed reference */ -/*OBJECT_API - Get a subset of bytes from each element of the array -*/ -static PyObject * -PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) -{ - PyObject *ret=NULL; - - if (offset < 0 || (offset + typed->elsize) > self->descr->elsize) { - PyErr_Format(PyExc_ValueError, - "Need 0 <= offset <= %d for requested type " \ - "but received offset = %d", - self->descr->elsize-typed->elsize, offset); - Py_DECREF(typed); - return NULL; - } - ret = PyArray_NewFromDescr(self->ob_type, - typed, - self->nd, self->dimensions, - self->strides, - self->data + offset, - self->flags, (PyObject *)self); - if (ret == NULL) return NULL; - Py_INCREF(self); - ((PyArrayObject *)ret)->base = (PyObject *)self; - - PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); - return ret; -} - -static PyObject * -array_getfield(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - - PyArray_Descr *dtype; - int offset = 0; - static char *kwlist[] = {"dtype", "offset", 0}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|i", kwlist, - PyArray_DescrConverter, - &dtype, &offset)) return NULL; - - return PyArray_GetField(self, dtype, offset); -} - - -/*OBJECT_API - Set a subset of bytes from each element of the array -*/ -static int -PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, - int offset, PyObject *val) -{ - PyObject *ret=NULL; - int retval = 0; - - if (offset < 0 || (offset + dtype->elsize) > self->descr->elsize) { - PyErr_Format(PyExc_ValueError, - "Need 0 <= offset <= %d for requested type " \ - "but received offset = %d", - self->descr->elsize-dtype->elsize, offset); - Py_DECREF(dtype); - return -1; - } - ret = PyArray_NewFromDescr(self->ob_type, - dtype, self->nd, self->dimensions, - self->strides, self->data + offset, - self->flags, (PyObject *)self); - if (ret == NULL) return -1; - Py_INCREF(self); - ((PyArrayObject *)ret)->base = (PyObject *)self; - - PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); - retval = PyArray_CopyObject((PyArrayObject *)ret, val); - Py_DECREF(ret); - return retval; -} - -static PyObject * -array_setfield(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyArray_Descr *dtype; - int offset = 0; - PyObject *value; - static char *kwlist[] = {"value", "dtype", "offset", 0}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|i", kwlist, - &value, PyArray_DescrConverter, - &dtype, &offset)) return NULL; - - if (PyArray_SetField(self, dtype, offset, value) < 0) - return NULL; - Py_INCREF(Py_None); - return Py_None; -} - -/* This doesn't change the descriptor just the actual data... - */ - -/*OBJECT_API*/ -static PyObject * -PyArray_Byteswap(PyArrayObject *self, Bool inplace) -{ - PyArrayObject *ret; - intp size; - PyArray_CopySwapNFunc *copyswapn; - PyArrayIterObject *it; - - copyswapn = self->descr->f->copyswapn; - if (inplace) { - if (!PyArray_ISWRITEABLE(self)) { - PyErr_SetString(PyExc_RuntimeError, - "Cannot byte-swap in-place on a " \ - "read-only array"); - return NULL; - } - size = PyArray_SIZE(self); - if (PyArray_ISONESEGMENT(self)) { - copyswapn(self->data, self->descr->elsize, NULL, -1, size, 1, self); - } - else { /* Use iterator */ - int axis = -1; - intp stride; - it = (PyArrayIterObject *) \ - PyArray_IterAllButAxis((PyObject *)self, &axis); - stride = self->strides[axis]; - size = self->dimensions[axis]; - while (it->index < it->size) { - copyswapn(it->dataptr, stride, NULL, -1, size, 1, self); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - } - - Py_INCREF(self); - return (PyObject *)self; - } - else { - PyObject *new; - if ((ret = (PyArrayObject *)PyArray_NewCopy(self,-1)) == NULL) - return NULL; - new = PyArray_Byteswap(ret, TRUE); - Py_DECREF(new); - return (PyObject *)ret; - } -} - - -static PyObject * -array_byteswap(PyArrayObject *self, PyObject *args) -{ - Bool inplace=FALSE; - - if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) - return NULL; - - return PyArray_Byteswap(self, inplace); -} - -static PyObject * -array_tolist(PyArrayObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) return NULL; - return PyArray_ToList(self); -} - - -static PyObject * -array_tostring(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - NPY_ORDER order=NPY_CORDER; - static char *kwlist[] = {"order", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&", kwlist, - PyArray_OrderConverter, - &order)) return NULL; - return PyArray_ToString(self, order); -} - - -/* This should grow an order= keyword to be consistent - */ - -static PyObject * -array_tofile(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int ret; - PyObject *file; - FILE *fd; - char *sep=""; - char *format=""; - static char *kwlist[] = {"file", "sep", "format", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|ss", kwlist, - &file, &sep, &format)) return NULL; - - if (PyString_Check(file) || PyUnicode_Check(file)) { - file = PyObject_CallFunction((PyObject *)&PyFile_Type, - "Os", file, "wb"); - if (file==NULL) return NULL; - } - else { - Py_INCREF(file); - } - fd = PyFile_AsFile(file); - if (fd == NULL) { - PyErr_SetString(PyExc_IOError, "first argument must be a " \ - "string or open file"); - Py_DECREF(file); - return NULL; - } - ret = PyArray_ToFile(self, fd, sep, format); - Py_DECREF(file); - if (ret < 0) return NULL; - Py_INCREF(Py_None); - return Py_None; -} - - -static PyObject * -array_toscalar(PyArrayObject *self, PyObject *args) { - int n, nd; - n = PyTuple_GET_SIZE(args); - - if (n==1) { - PyObject *obj; - obj = PyTuple_GET_ITEM(args, 0); - if (PyTuple_Check(obj)) { - args = obj; - n = PyTuple_GET_SIZE(args); - } - } - - if (n==0) { - if (self->nd == 0 || PyArray_SIZE(self) == 1) - return self->descr->f->getitem(self->data, self); - else { - PyErr_SetString(PyExc_ValueError, - "can only convert an array " \ - " of size 1 to a Python scalar"); - return NULL; - } - } - else if (n != self->nd && (n > 1 || self->nd==0)) { - PyErr_SetString(PyExc_ValueError, - "incorrect number of indices for " \ - "array"); - return NULL; - } - else if (n==1) { /* allows for flat getting as well as 1-d case */ - intp value, loc, index, factor; - intp factors[MAX_DIMS]; - value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, 0)); - if (error_converting(value)) { - PyErr_SetString(PyExc_ValueError, "invalid integer"); - return NULL; - } - factor = PyArray_SIZE(self); - if (value < 0) value += factor; - if ((value >= factor) || (value < 0)) { - PyErr_SetString(PyExc_ValueError, - "index out of bounds"); - return NULL; - } - if (self->nd == 1) { - value *= self->strides[0]; - return self->descr->f->getitem(self->data + value, - self); - } - nd = self->nd; - factor = 1; - while (nd--) { - factors[nd] = factor; - factor *= self->dimensions[nd]; - } - loc = 0; - for (nd=0; nd < self->nd; nd++) { - index = value / factors[nd]; - value = value % factors[nd]; - loc += self->strides[nd]*index; - } - - return self->descr->f->getitem(self->data + loc, - self); - - } - else { - intp loc, index[MAX_DIMS]; - nd = PyArray_IntpFromSequence(args, index, MAX_DIMS); - if (nd < n) return NULL; - loc = 0; - while (nd--) { - if (index[nd] < 0) - index[nd] += self->dimensions[nd]; - if (index[nd] < 0 || - index[nd] >= self->dimensions[nd]) { - PyErr_SetString(PyExc_ValueError, - "index out of bounds"); - return NULL; - } - loc += self->strides[nd]*index[nd]; - } - return self->descr->f->getitem(self->data + loc, self); - } -} - -static PyObject * -array_setscalar(PyArrayObject *self, PyObject *args) { - int n, nd; - int ret = -1; - PyObject *obj; - n = PyTuple_GET_SIZE(args)-1; - - if (n < 0) { - PyErr_SetString(PyExc_ValueError, - "itemset must have at least one argument"); - return NULL; - } - obj = PyTuple_GET_ITEM(args, n); - if (n==0) { - if (self->nd == 0 || PyArray_SIZE(self) == 1) { - ret = self->descr->f->setitem(obj, self->data, self); - } - else { - PyErr_SetString(PyExc_ValueError, - "can only place a scalar for an " - " array of size 1"); - return NULL; - } - } - else if (n != self->nd && (n > 1 || self->nd==0)) { - PyErr_SetString(PyExc_ValueError, - "incorrect number of indices for " \ - "array"); - return NULL; - } - else if (n==1) { /* allows for flat setting as well as 1-d case */ - intp value, loc, index, factor; - intp factors[MAX_DIMS]; - PyObject *indobj; - - indobj = PyTuple_GET_ITEM(args, 0); - if (PyTuple_Check(indobj)) { - PyObject *res; - PyObject *newargs; - PyObject *tmp; - int i, nn; - nn = PyTuple_GET_SIZE(indobj); - newargs = PyTuple_New(nn+1); - Py_INCREF(obj); - for (i=0; i= PyArray_SIZE(self)) { - PyErr_SetString(PyExc_ValueError, - "index out of bounds"); - return NULL; - } - if (self->nd == 1) { - value *= self->strides[0]; - ret = self->descr->f->setitem(obj, self->data + value, - self); - goto finish; - } - nd = self->nd; - factor = 1; - while (nd--) { - factors[nd] = factor; - factor *= self->dimensions[nd]; - } - loc = 0; - for (nd=0; nd < self->nd; nd++) { - index = value / factors[nd]; - value = value % factors[nd]; - loc += self->strides[nd]*index; - } - - ret = self->descr->f->setitem(obj, self->data + loc, self); - } - else { - intp loc, index[MAX_DIMS]; - PyObject *tupargs; - tupargs = PyTuple_GetSlice(args, 0, n); - nd = PyArray_IntpFromSequence(tupargs, index, MAX_DIMS); - Py_DECREF(tupargs); - if (nd < n) return NULL; - loc = 0; - while (nd--) { - if (index[nd] < 0) - index[nd] += self->dimensions[nd]; - if (index[nd] < 0 || - index[nd] >= self->dimensions[nd]) { - PyErr_SetString(PyExc_ValueError, - "index out of bounds"); - return NULL; - } - loc += self->strides[nd]*index[nd]; - } - ret = self->descr->f->setitem(obj, self->data + loc, self); - } - - finish: - if (ret < 0) return NULL; - Py_INCREF(Py_None); - return Py_None; -} - - -static PyObject * -array_cast(PyArrayObject *self, PyObject *args) -{ - PyArray_Descr *descr=NULL; - PyObject *obj; - - if (!PyArg_ParseTuple(args, "O&", PyArray_DescrConverter, - &descr)) return NULL; - - if (descr == self->descr) { - obj = _ARET(PyArray_NewCopy(self,NPY_ANYORDER)); - Py_XDECREF(descr); - return obj; - } - if (descr->names != NULL) { - int flags; - flags = NPY_FORCECAST; - if (PyArray_ISFORTRAN(self)) { - flags |= NPY_FORTRAN; - } - return PyArray_FromArray(self, descr, flags); - } - return PyArray_CastToType(self, descr, PyArray_ISFORTRAN(self)); -} - -/* default sub-type implementation */ - - -static PyObject * -array_wraparray(PyArrayObject *self, PyObject *args) -{ - PyObject *arr; - PyObject *ret; - - if (PyTuple_Size(args) < 1) { - PyErr_SetString(PyExc_TypeError, - "only accepts 1 argument"); - return NULL; - } - arr = PyTuple_GET_ITEM(args, 0); - if (!PyArray_Check(arr)) { - PyErr_SetString(PyExc_TypeError, - "can only be called with ndarray object"); - return NULL; - } - - Py_INCREF(PyArray_DESCR(arr)); - ret = PyArray_NewFromDescr(self->ob_type, - PyArray_DESCR(arr), - PyArray_NDIM(arr), - PyArray_DIMS(arr), - PyArray_STRIDES(arr), PyArray_DATA(arr), - PyArray_FLAGS(arr), (PyObject *)self); - if (ret == NULL) return NULL; - Py_INCREF(arr); - PyArray_BASE(ret) = arr; - return ret; -} - - -static PyObject * -array_getarray(PyArrayObject *self, PyObject *args) -{ - PyArray_Descr *newtype=NULL; - PyObject *ret; - - if (!PyArg_ParseTuple(args, "|O&", PyArray_DescrConverter, - &newtype)) return NULL; - - /* convert to PyArray_Type */ - if (!PyArray_CheckExact(self)) { - PyObject *new; - PyTypeObject *subtype = &PyArray_Type; - - if (!PyType_IsSubtype(self->ob_type, &PyArray_Type)) { - subtype = &PyArray_Type; - } - - Py_INCREF(PyArray_DESCR(self)); - new = PyArray_NewFromDescr(subtype, - PyArray_DESCR(self), - PyArray_NDIM(self), - PyArray_DIMS(self), - PyArray_STRIDES(self), - PyArray_DATA(self), - PyArray_FLAGS(self), NULL); - if (new == NULL) return NULL; - Py_INCREF(self); - PyArray_BASE(new) = (PyObject *)self; - self = (PyArrayObject *)new; - } - else { - Py_INCREF(self); - } - - if ((newtype == NULL) || \ - PyArray_EquivTypes(self->descr, newtype)) { - return (PyObject *)self; - } - else { - ret = PyArray_CastToType(self, newtype, 0); - Py_DECREF(self); - return ret; - } -} - - -static PyObject * -array_copy(PyArrayObject *self, PyObject *args) -{ - PyArray_ORDER fortran=PyArray_CORDER; - if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, - &fortran)) return NULL; - - return PyArray_NewCopy(self, fortran); -} - - -static PyObject * -array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyArray_Dims newshape; - PyObject *ret; - int n; - int refcheck = 1; - PyArray_ORDER fortran=PyArray_ANYORDER; - - if (kwds != NULL) { - PyObject *ref; - ref = PyDict_GetItemString(kwds, "refcheck"); - if (ref) { - refcheck = PyInt_AsLong(ref); - if (refcheck==-1 && PyErr_Occurred()) { - return NULL; - } - } - ref = PyDict_GetItemString(kwds, "order"); - if (ref != NULL || - (PyArray_OrderConverter(ref, &fortran) == PY_FAIL)) - return NULL; - } - n = PyTuple_Size(args); - if (n <= 1) { - if (PyTuple_GET_ITEM(args, 0) == Py_None) { - Py_INCREF(Py_None); - return Py_None; - } - if (!PyArg_ParseTuple(args, "O&", PyArray_IntpConverter, - &newshape)) return NULL; - } - else { - if (!PyArray_IntpConverter(args, &newshape)) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "invalid shape"); - } - return NULL; - } - } - ret = PyArray_Resize(self, &newshape, refcheck, fortran); - PyDimMem_FREE(newshape.ptr); - if (ret == NULL) return NULL; - Py_DECREF(ret); - Py_INCREF(Py_None); - return Py_None; -} - -static PyObject * -array_repeat(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyObject *repeats; - int axis=MAX_DIMS; - static char *kwlist[] = {"repeats", "axis", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist, - &repeats, PyArray_AxisConverter, - &axis)) return NULL; - - return _ARET(PyArray_Repeat(self, repeats, axis)); -} - -static PyObject * -array_choose(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *choices; - int n; - PyArrayObject *out=NULL; - NPY_CLIPMODE clipmode=NPY_RAISE; - - n = PyTuple_Size(args); - if (n <= 1) { - if (!PyArg_ParseTuple(args, "O", &choices)) - return NULL; - } - else { - choices = args; - } - if (kwds && PyDict_Check(kwds)) { - if (PyArray_OutputConverter(PyDict_GetItemString(kwds, - "out"), - &out) == PY_FAIL) - return NULL; - if (PyArray_ClipmodeConverter(PyDict_GetItemString(kwds, - "mode"), - &clipmode) == PY_FAIL) - return NULL; - } - - return _ARET(PyArray_Choose(self, choices, out, clipmode)); -} - -static PyObject * -array_sort(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=-1; - int val; - PyArray_SORTKIND which=PyArray_QUICKSORT; - PyObject *order=NULL; - PyArray_Descr *saved=NULL; - PyArray_Descr *newd; - static char *kwlist[] = {"axis", "kind", "order", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&O", kwlist, &axis, - PyArray_SortkindConverter, &which, - &order)) - return NULL; - - if (order == Py_None) order = NULL; - if (order != NULL) { - PyObject *new_name; - PyObject *_numpy_internal; - saved = self->descr; - if (saved->names == NULL) { - PyErr_SetString(PyExc_ValueError, "Cannot specify " \ - "order when the array has no fields."); - return NULL; - } - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - new_name = PyObject_CallMethod(_numpy_internal, "_newnames", - "OO", saved, order); - Py_DECREF(_numpy_internal); - if (new_name == NULL) return NULL; - newd = PyArray_DescrNew(saved); - newd->names = new_name; - self->descr = newd; - } - - val = PyArray_Sort(self, axis, which); - if (order != NULL) { - Py_XDECREF(self->descr); - self->descr = saved; - } - if (val < 0) return NULL; - Py_INCREF(Py_None); - return Py_None; -} - -static PyObject * -array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=-1; - PyArray_SORTKIND which=PyArray_QUICKSORT; - PyObject *order=NULL, *res; - PyArray_Descr *newd, *saved=NULL; - static char *kwlist[] = {"axis", "kind", "order", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O", kwlist, - PyArray_AxisConverter, &axis, - PyArray_SortkindConverter, &which, - &order)) - return NULL; - - if (order == Py_None) order = NULL; - if (order != NULL) { - PyObject *new_name; - PyObject *_numpy_internal; - saved = self->descr; - if (saved->names == NULL) { - PyErr_SetString(PyExc_ValueError, "Cannot specify " \ - "order when the array has no fields."); - return NULL; - } - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - new_name = PyObject_CallMethod(_numpy_internal, "_newnames", - "OO", saved, order); - Py_DECREF(_numpy_internal); - if (new_name == NULL) return NULL; - newd = PyArray_DescrNew(saved); - newd->names = new_name; - self->descr = newd; - } - - res = PyArray_ArgSort(self, axis, which); - if (order != NULL) { - Py_XDECREF(self->descr); - self->descr = saved; - } - return _ARET(res); -} - -static PyObject * -array_searchsorted(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"keys", "side", NULL}; - PyObject *keys; - NPY_SEARCHSIDE side = NPY_SEARCHLEFT; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&:searchsorted", - kwlist, &keys, - PyArray_SearchsideConverter, &side)) - return NULL; - - return _ARET(PyArray_SearchSorted(self, keys, side)); -} - -static void -_deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, - PyObject *deepcopy, PyObject *visit) -{ - if (!PyDataType_REFCHK(dtype)) return; - else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos=0; - while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; - _deepcopy_call(iptr + offset, optr + offset, new, - deepcopy, visit); - } - } - else { - PyObject **itemp, **otemp; - PyObject *res; - itemp = (PyObject **)iptr; - otemp = (PyObject **)optr; - Py_XINCREF(*itemp); - /* call deepcopy on this argument */ - res = PyObject_CallFunctionObjArgs(deepcopy, - *itemp, visit, NULL); - Py_XDECREF(*itemp); - Py_XDECREF(*otemp); - *otemp = res; - } - -} - - -static PyObject * -array_deepcopy(PyArrayObject *self, PyObject *args) -{ - PyObject* visit; - char *optr; - PyArrayIterObject *it; - PyObject *copy, *ret, *deepcopy; - - if (!PyArg_ParseTuple(args, "O", &visit)) return NULL; - ret = PyArray_Copy(self); - if (PyDataType_REFCHK(self->descr)) { - copy = PyImport_ImportModule("copy"); - if (copy == NULL) return NULL; - deepcopy = PyObject_GetAttrString(copy, "deepcopy"); - Py_DECREF(copy); - if (deepcopy == NULL) return NULL; - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it == NULL) {Py_DECREF(deepcopy); return NULL;} - optr = PyArray_DATA(ret); - while(it->index < it->size) { - _deepcopy_call(it->dataptr, optr, self->descr, - deepcopy, visit); - optr += self->descr->elsize; - PyArray_ITER_NEXT(it); - } - Py_DECREF(deepcopy); - Py_DECREF(it); - } - return _ARET(ret); -} - -/* Convert Array to flat list (using getitem) */ -static PyObject * -_getlist_pkl(PyArrayObject *self) -{ - PyObject *theobject; - PyArrayIterObject *iter=NULL; - PyObject *list; - PyArray_GetItemFunc *getitem; - - getitem = self->descr->f->getitem; - iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (iter == NULL) return NULL; - list = PyList_New(iter->size); - if (list == NULL) {Py_DECREF(iter); return NULL;} - while (iter->index < iter->size) { - theobject = getitem(iter->dataptr, self); - PyList_SET_ITEM(list, (int) iter->index, theobject); - PyArray_ITER_NEXT(iter); - } - Py_DECREF(iter); - return list; -} - -static int -_setlist_pkl(PyArrayObject *self, PyObject *list) -{ - PyObject *theobject; - PyArrayIterObject *iter=NULL; - PyArray_SetItemFunc *setitem; - - setitem = self->descr->f->setitem; - iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (iter == NULL) return -1; - while(iter->index < iter->size) { - theobject = PyList_GET_ITEM(list, (int) iter->index); - setitem(theobject, iter->dataptr, self); - PyArray_ITER_NEXT(iter); - } - Py_XDECREF(iter); - return 0; -} - - -static PyObject * -array_reduce(PyArrayObject *self, PyObject *args) -{ - /* version number of this pickle type. Increment if we need to - change the format. Be sure to handle the old versions in - array_setstate. */ - const int version = 1; - PyObject *ret=NULL, *state=NULL, *obj=NULL, *mod=NULL; - PyObject *mybool, *thestr=NULL; - PyArray_Descr *descr; - - /* Return a tuple of (callable object, arguments, object's state) */ - /* We will put everything in the object's state, so that on UnPickle - it can use the string object as memory without a copy */ - - ret = PyTuple_New(3); - if (ret == NULL) return NULL; - mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) {Py_DECREF(ret); return NULL;} - obj = PyObject_GetAttrString(mod, "_reconstruct"); - Py_DECREF(mod); - PyTuple_SET_ITEM(ret, 0, obj); - PyTuple_SET_ITEM(ret, 1, - Py_BuildValue("ONc", - (PyObject *)self->ob_type, - Py_BuildValue("(N)", - PyInt_FromLong(0)), - /* dummy data-type */ - 'b')); - - /* Now fill in object's state. This is a tuple with - 5 arguments - - 1) an integer with the pickle version. - 2) a Tuple giving the shape - 3) a PyArray_Descr Object (with correct bytorder set) - 4) a Bool stating if Fortran or not - 5) a Python object representing the data (a string, or - a list or any user-defined object). - - Notice because Python does not describe a mechanism to write - raw data to the pickle, this performs a copy to a string first - */ - - state = PyTuple_New(5); - if (state == NULL) { - Py_DECREF(ret); return NULL; - } - PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version)); - PyTuple_SET_ITEM(state, 1, PyObject_GetAttrString((PyObject *)self, - "shape")); - descr = self->descr; - Py_INCREF(descr); - PyTuple_SET_ITEM(state, 2, (PyObject *)descr); - mybool = (PyArray_ISFORTRAN(self) ? Py_True : Py_False); - Py_INCREF(mybool); - PyTuple_SET_ITEM(state, 3, mybool); - if (PyDataType_FLAGCHK(self->descr, NPY_LIST_PICKLE)) { - thestr = _getlist_pkl(self); - } - else { - thestr = PyArray_ToString(self, NPY_ANYORDER); - } - if (thestr == NULL) { - Py_DECREF(ret); - Py_DECREF(state); - return NULL; - } - PyTuple_SET_ITEM(state, 4, thestr); - PyTuple_SET_ITEM(ret, 2, state); - return ret; -} - - - -static size_t _array_fill_strides(intp *, intp *, int, size_t, int, int *); - -static int _IsAligned(PyArrayObject *); - -static PyArray_Descr * _array_typedescr_fromstr(char *); - -static PyObject * -array_setstate(PyArrayObject *self, PyObject *args) -{ - PyObject *shape; - PyArray_Descr *typecode; - int version = 1; - int fortran; - PyObject *rawdata; - char *datastr; - Py_ssize_t len; - intp size, dimensions[MAX_DIMS]; - int nd; - - /* This will free any memory associated with a and - use the string in setstate as the (writeable) memory. - */ - if (!PyArg_ParseTuple(args, "(iO!O!iO)", &version, &PyTuple_Type, - &shape, &PyArrayDescr_Type, &typecode, - &fortran, &rawdata)) { - PyErr_Clear(); - version = 0; - if (!PyArg_ParseTuple(args, "(O!O!iO)", &PyTuple_Type, - &shape, &PyArrayDescr_Type, &typecode, - &fortran, &rawdata)) { - return NULL; - } - } - - /* If we ever need another pickle format, increment the version - number. But we should still be able to handle the old versions. - We've only got one right now. */ - if (version != 1 && version != 0) { - PyErr_Format(PyExc_ValueError, - "can't handle version %d of numpy.ndarray pickle", - version); - return NULL; - } - - Py_XDECREF(self->descr); - self->descr = typecode; - Py_INCREF(typecode); - nd = PyArray_IntpFromSequence(shape, dimensions, MAX_DIMS); - if (nd < 0) return NULL; - size = PyArray_MultiplyList(dimensions, nd); - if (self->descr->elsize == 0) { - PyErr_SetString(PyExc_ValueError, "Invalid data-type size."); - return NULL; - } - if (size < 0 || size > MAX_INTP / self->descr->elsize) { - PyErr_NoMemory(); - return NULL; - } - - if (PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { - if (!PyList_Check(rawdata)) { - PyErr_SetString(PyExc_TypeError, - "object pickle not returning list"); - return NULL; - } - } - else { - if (!PyString_Check(rawdata)) { - PyErr_SetString(PyExc_TypeError, - "pickle not returning string"); - return NULL; - } - - if (PyString_AsStringAndSize(rawdata, &datastr, &len)) - return NULL; - - if ((len != (self->descr->elsize * size))) { - PyErr_SetString(PyExc_ValueError, - "buffer size does not" \ - " match array size"); - return NULL; - } - } - - if ((self->flags & OWNDATA)) { - if (self->data != NULL) - PyDataMem_FREE(self->data); - self->flags &= ~OWNDATA; - } - Py_XDECREF(self->base); - - self->flags &= ~UPDATEIFCOPY; - - if (self->dimensions != NULL) { - PyDimMem_FREE(self->dimensions); - self->dimensions = NULL; - } - - self->flags = DEFAULT; - - self->nd = nd; - - if (nd > 0) { - self->dimensions = PyDimMem_NEW(nd * 2); - self->strides = self->dimensions + nd; - memcpy(self->dimensions, dimensions, sizeof(intp)*nd); - (void) _array_fill_strides(self->strides, dimensions, nd, - (size_t) self->descr->elsize, - (fortran ? FORTRAN : CONTIGUOUS), - &(self->flags)); - } - - if (!PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { - int swap=!PyArray_ISNOTSWAPPED(self); - self->data = datastr; - if (!_IsAligned(self) || swap) { - intp num = PyArray_NBYTES(self); - self->data = PyDataMem_NEW(num); - if (self->data == NULL) { - self->nd = 0; - PyDimMem_FREE(self->dimensions); - return PyErr_NoMemory(); - } - if (swap) { /* byte-swap on pickle-read */ - intp numels = num / self->descr->elsize; - self->descr->f->copyswapn(self->data, self->descr->elsize, - datastr, self->descr->elsize, - numels, 1, self); - if (!PyArray_ISEXTENDED(self)) { - self->descr = PyArray_DescrFromType(self->descr->type_num); - } - else { - self->descr = PyArray_DescrNew(typecode); - if (self->descr->byteorder == PyArray_BIG) - self->descr->byteorder = PyArray_LITTLE; - else if (self->descr->byteorder == PyArray_LITTLE) - self->descr->byteorder = PyArray_BIG; - } - Py_DECREF(typecode); - } - else { - memcpy(self->data, datastr, num); - } - self->flags |= OWNDATA; - self->base = NULL; - } - else { - self->base = rawdata; - Py_INCREF(self->base); - } - } - else { - self->data = PyDataMem_NEW(PyArray_NBYTES(self)); - if (self->data == NULL) { - self->nd = 0; - self->data = PyDataMem_NEW(self->descr->elsize); - if (self->dimensions) PyDimMem_FREE(self->dimensions); - return PyErr_NoMemory(); - } - if (PyDataType_FLAGCHK(self->descr, NPY_NEEDS_INIT)) - memset(self->data, 0, PyArray_NBYTES(self)); - self->flags |= OWNDATA; - self->base = NULL; - if (_setlist_pkl(self, rawdata) < 0) - return NULL; - } - - PyArray_UpdateFlags(self, UPDATE_ALL); - - Py_INCREF(Py_None); - return Py_None; -} - -/*OBJECT_API*/ -static int -PyArray_Dump(PyObject *self, PyObject *file, int protocol) -{ - PyObject *cpick=NULL; - PyObject *ret; - if (protocol < 0) protocol = 2; - - cpick = PyImport_ImportModule("cPickle"); - if (cpick==NULL) return -1; - - if PyString_Check(file) { - file = PyFile_FromString(PyString_AS_STRING(file), "wb"); - if (file==NULL) return -1; - } - else Py_INCREF(file); - ret = PyObject_CallMethod(cpick, "dump", "OOi", self, - file, protocol); - Py_XDECREF(ret); - Py_DECREF(file); - Py_DECREF(cpick); - if (PyErr_Occurred()) return -1; - return 0; -} - -/*OBJECT_API*/ -static PyObject * -PyArray_Dumps(PyObject *self, int protocol) -{ - PyObject *cpick=NULL; - PyObject *ret; - if (protocol < 0) protocol = 2; - - cpick = PyImport_ImportModule("cPickle"); - if (cpick==NULL) return NULL; - ret = PyObject_CallMethod(cpick, "dumps", "Oi", self, protocol); - Py_DECREF(cpick); - return ret; -} - - -static PyObject * -array_dump(PyArrayObject *self, PyObject *args) -{ - PyObject *file=NULL; - int ret; - - if (!PyArg_ParseTuple(args, "O", &file)) - return NULL; - ret = PyArray_Dump((PyObject *)self, file, 2); - if (ret < 0) return NULL; - Py_INCREF(Py_None); - return Py_None; -} - - -static PyObject * -array_dumps(PyArrayObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) - return NULL; - return PyArray_Dumps((PyObject *)self, 2); -} - - -static PyObject * -array_transpose(PyArrayObject *self, PyObject *args) -{ - PyObject *shape=Py_None; - int n; - PyArray_Dims permute; - PyObject *ret; - - n = PyTuple_Size(args); - if (n > 1) shape = args; - else if (n == 1) shape = PyTuple_GET_ITEM(args, 0); - - if (shape == Py_None) - ret = PyArray_Transpose(self, NULL); - else { - if (!PyArray_IntpConverter(shape, &permute)) return NULL; - ret = PyArray_Transpose(self, &permute); - PyDimMem_FREE(permute.ptr); - } - - return ret; -} - -/* Return typenumber from dtype2 unless it is NULL, then return - NPY_DOUBLE if dtype1->type_num is integer or bool - and dtype1->type_num otherwise. -*/ -static int -_get_type_num_double(PyArray_Descr *dtype1, PyArray_Descr *dtype2) -{ - if (dtype2 != NULL) - return dtype2->type_num; - - /* For integer or bool data-types */ - if (dtype1->type_num < NPY_FLOAT) { - return NPY_DOUBLE; - } - else { - return dtype1->type_num; - } -} - -#define _CHKTYPENUM(typ) ((typ) ? (typ)->type_num : PyArray_NOTYPE) - -static PyObject * -array_mean(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; - int num; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, - &axis, PyArray_DescrConverter2, - &dtype, - PyArray_OutputConverter, - &out)) return NULL; - - num = _get_type_num_double(self->descr, dtype); - return PyArray_Mean(self, axis, num, out); -} - -static PyObject * -array_sum(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, - &axis, PyArray_DescrConverter2, - &dtype, - PyArray_OutputConverter, - &out)) return NULL; - - return PyArray_Sum(self, axis, _CHKTYPENUM(dtype), out); -} - - -static PyObject * -array_cumsum(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, - &axis, PyArray_DescrConverter2, - &dtype, - PyArray_OutputConverter, - &out)) return NULL; - - return PyArray_CumSum(self, axis, _CHKTYPENUM(dtype), out); -} - -static PyObject * -array_prod(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, - &axis, PyArray_DescrConverter2, - &dtype, - PyArray_OutputConverter, - &out)) return NULL; - - return PyArray_Prod(self, axis, _CHKTYPENUM(dtype), out); -} - -static PyObject * -array_cumprod(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, - &axis, PyArray_DescrConverter2, - &dtype, - PyArray_OutputConverter, - &out)) return NULL; - - return PyArray_CumProd(self, axis, _CHKTYPENUM(dtype), out); -} - - -static PyObject * -array_any(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, - &axis, - PyArray_OutputConverter, - &out)) - return NULL; - - return PyArray_Any(self, axis, out); -} - - -static PyObject * -array_all(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArrayObject *out=NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, - &axis, - PyArray_OutputConverter, - &out)) - return NULL; - - return PyArray_All(self, axis, out); -} - - -static PyObject * -array_stddev(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; - int num; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, - &axis, PyArray_DescrConverter2, - &dtype, - PyArray_OutputConverter, - &out)) return NULL; - - num = _get_type_num_double(self->descr, dtype); - return PyArray_Std(self, axis, num, out, 0); -} - - -static PyObject * -array_variance(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; - int num; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, - &axis, PyArray_DescrConverter2, - &dtype, - PyArray_OutputConverter, - &out)) return NULL; - - num = _get_type_num_double(self->descr, dtype); - return PyArray_Std(self, axis, num, out, 1); -} - - -static PyObject * -array_compress(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=MAX_DIMS; - PyObject *condition; - PyArrayObject *out=NULL; - static char *kwlist[] = {"condition", "axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&", kwlist, - &condition, PyArray_AxisConverter, - &axis, - PyArray_OutputConverter, - &out)) return NULL; - - return _ARET(PyArray_Compress(self, condition, axis, out)); -} - - -static PyObject * -array_nonzero(PyArrayObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) return NULL; - - return PyArray_Nonzero(self); -} - - -static PyObject * -array_trace(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis1=0, axis2=1, offset=0; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; - static char *kwlist[] = {"offset", "axis1", "axis2", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiO&O&", kwlist, - &offset, &axis1, &axis2, - PyArray_DescrConverter2, &dtype, - PyArray_OutputConverter, &out)) - return NULL; - - return _ARET(PyArray_Trace(self, offset, axis1, axis2, - _CHKTYPENUM(dtype), out)); -} - -#undef _CHKTYPENUM - - -static PyObject * -array_clip(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *min=NULL, *max=NULL; - PyArrayObject *out=NULL; - static char *kwlist[] = {"min", "max", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO&", kwlist, - &min, &max, - PyArray_OutputConverter, - &out)) - return NULL; - - if (max == NULL && min == NULL) { - PyErr_SetString(PyExc_ValueError, "One of max or min must be given."); - return NULL; - } - return _ARET(PyArray_Clip(self, min, max, out)); -} - - -static PyObject * -array_conjugate(PyArrayObject *self, PyObject *args) -{ - - PyArrayObject *out=NULL; - if (!PyArg_ParseTuple(args, "|O&", - PyArray_OutputConverter, - &out)) return NULL; - - return PyArray_Conjugate(self, out); -} - - -static PyObject * -array_diagonal(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis1=0, axis2=1, offset=0; - static char *kwlist[] = {"offset", "axis1", "axis2", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iii", kwlist, - &offset, &axis1, &axis2)) - return NULL; - - return _ARET(PyArray_Diagonal(self, offset, axis1, axis2)); -} - - -static PyObject * -array_flatten(PyArrayObject *self, PyObject *args) -{ - PyArray_ORDER fortran=PyArray_CORDER; - - if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, - &fortran)) return NULL; - - return PyArray_Flatten(self, fortran); -} - - -static PyObject * -array_ravel(PyArrayObject *self, PyObject *args) -{ - PyArray_ORDER fortran=PyArray_CORDER; - - if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, - &fortran)) return NULL; - - return PyArray_Ravel(self, fortran); -} - - -static PyObject * -array_round(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int decimals = 0; - PyArrayObject *out=NULL; - static char *kwlist[] = {"decimals", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&", kwlist, - &decimals, PyArray_OutputConverter, - &out)) - return NULL; - - return _ARET(PyArray_Round(self, decimals, out)); -} - - - -static int _IsAligned(PyArrayObject *); -static Bool _IsWriteable(PyArrayObject *); - -static PyObject * -array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"write", "align", "uic", NULL}; - PyObject *write=Py_None; - PyObject *align=Py_None; - PyObject *uic=Py_None; - int flagback = self->flags; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO", kwlist, - &write, &align, &uic)) - return NULL; - - if (align != Py_None) { - if (PyObject_Not(align)) self->flags &= ~ALIGNED; - else if (_IsAligned(self)) self->flags |= ALIGNED; - else { - PyErr_SetString(PyExc_ValueError, - "cannot set aligned flag of mis-"\ - "aligned array to True"); - return NULL; - } - } - - if (uic != Py_None) { - if (PyObject_IsTrue(uic)) { - self->flags = flagback; - PyErr_SetString(PyExc_ValueError, - "cannot set UPDATEIFCOPY " \ - "flag to True"); - return NULL; - } - else { - self->flags &= ~UPDATEIFCOPY; - Py_XDECREF(self->base); - self->base = NULL; - } - } - - if (write != Py_None) { - if (PyObject_IsTrue(write)) - if (_IsWriteable(self)) { - self->flags |= WRITEABLE; - } - else { - self->flags = flagback; - PyErr_SetString(PyExc_ValueError, - "cannot set WRITEABLE " \ - "flag to True of this " \ - "array"); \ - return NULL; - } - else - self->flags &= ~WRITEABLE; - } - - Py_INCREF(Py_None); - return Py_None; -} - - -static PyObject * -array_newbyteorder(PyArrayObject *self, PyObject *args) -{ - char endian = PyArray_SWAP; - PyArray_Descr *new; - - if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter, - &endian)) return NULL; - - new = PyArray_DescrNewByteorder(self->descr, endian); - if (!new) return NULL; - return PyArray_View(self, new, NULL); - -} - -static PyMethodDef array_methods[] = { - - /* for subtypes */ - {"__array__", (PyCFunction)array_getarray, - METH_VARARGS, NULL}, - {"__array_wrap__", (PyCFunction)array_wraparray, - METH_VARARGS, NULL}, - - /* for the copy module */ - {"__copy__", (PyCFunction)array_copy, - METH_VARARGS, NULL}, - {"__deepcopy__", (PyCFunction)array_deepcopy, - METH_VARARGS, NULL}, - - /* for Pickling */ - {"__reduce__", (PyCFunction) array_reduce, - METH_VARARGS, NULL}, - {"__setstate__", (PyCFunction) array_setstate, - METH_VARARGS, NULL}, - {"dumps", (PyCFunction) array_dumps, - METH_VARARGS, NULL}, - {"dump", (PyCFunction) array_dump, - METH_VARARGS, NULL}, - - /* Original and Extended methods added 2005 */ - {"all", (PyCFunction)array_all, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"any", (PyCFunction)array_any, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"argmax", (PyCFunction)array_argmax, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"argmin", (PyCFunction)array_argmin, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"argsort", (PyCFunction)array_argsort, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"astype", (PyCFunction)array_cast, - METH_VARARGS, NULL}, - {"byteswap", (PyCFunction)array_byteswap, - METH_VARARGS, NULL}, - {"choose", (PyCFunction)array_choose, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"clip", (PyCFunction)array_clip, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"compress", (PyCFunction)array_compress, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"conj", (PyCFunction)array_conjugate, - METH_VARARGS, NULL}, - {"conjugate", (PyCFunction)array_conjugate, - METH_VARARGS, NULL}, - {"copy", (PyCFunction)array_copy, - METH_VARARGS, NULL}, - {"cumprod", (PyCFunction)array_cumprod, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"cumsum", (PyCFunction)array_cumsum, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"diagonal", (PyCFunction)array_diagonal, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"fill", (PyCFunction)array_fill, - METH_VARARGS, NULL}, - {"flatten", (PyCFunction)array_flatten, - METH_VARARGS, NULL}, - {"getfield", (PyCFunction)array_getfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"item", (PyCFunction)array_toscalar, - METH_VARARGS, NULL}, - {"itemset", (PyCFunction) array_setscalar, - METH_VARARGS, NULL}, - {"max", (PyCFunction)array_max, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"mean", (PyCFunction)array_mean, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"min", (PyCFunction)array_min, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"newbyteorder", (PyCFunction)array_newbyteorder, - METH_VARARGS, NULL}, - {"nonzero", (PyCFunction)array_nonzero, - METH_VARARGS, NULL}, - {"prod", (PyCFunction)array_prod, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"ptp", (PyCFunction)array_ptp, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"put", (PyCFunction)array_put, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"ravel", (PyCFunction)array_ravel, - METH_VARARGS, NULL}, - {"repeat", (PyCFunction)array_repeat, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"reshape", (PyCFunction)array_reshape, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"resize", (PyCFunction)array_resize, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"round", (PyCFunction)array_round, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"searchsorted", (PyCFunction)array_searchsorted, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"setfield", (PyCFunction)array_setfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"setflags", (PyCFunction)array_setflags, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"sort", (PyCFunction)array_sort, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"squeeze", (PyCFunction)array_squeeze, - METH_VARARGS, NULL}, - {"std", (PyCFunction)array_stddev, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"sum", (PyCFunction)array_sum, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"swapaxes", (PyCFunction)array_swapaxes, - METH_VARARGS, NULL}, - {"take", (PyCFunction)array_take, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"tofile", (PyCFunction)array_tofile, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"tolist", (PyCFunction)array_tolist, - METH_VARARGS, NULL}, - {"tostring", (PyCFunction)array_tostring, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"trace", (PyCFunction)array_trace, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"transpose", (PyCFunction)array_transpose, - METH_VARARGS, NULL}, - {"var", (PyCFunction)array_variance, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"view", (PyCFunction)array_view, - METH_VARARGS, NULL}, - {NULL, NULL} /* sentinel */ -}; - -#undef _ARET diff --git a/numpy/core/src/arrayobject.c b/numpy/core/src/arrayobject.c deleted file mode 100644 index bed5ce6cb..000000000 --- a/numpy/core/src/arrayobject.c +++ /dev/null @@ -1,12081 +0,0 @@ -/* - Provide multidimensional arrays as a basic object type in python. - - Based on Original Numeric implementation - Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu - - with contributions from many Numeric Python developers 1995-2004 - - Heavily modified in 2005 with inspiration from Numarray - - by - - Travis Oliphant, oliphant@ee.byu.edu - Brigham Young Univeristy - - maintainer email: oliphant.travis@ieee.org - - Numarray design (which provided guidance) by - Space Science Telescope Institute - (J. Todd Miller, Perry Greenfield, Rick White) -*/ - -/*OBJECT_API - Get Priority from object -*/ -static double -PyArray_GetPriority(PyObject *obj, double default_) -{ - PyObject *ret; - double priority=PyArray_PRIORITY; - - if (PyArray_CheckExact(obj)) - return priority; - - ret = PyObject_GetAttrString(obj, "__array_priority__"); - if (ret != NULL) priority = PyFloat_AsDouble(ret); - if (PyErr_Occurred()) { - PyErr_Clear(); - priority = default_; - } - Py_XDECREF(ret); - return priority; -} - -static int -_check_object_rec(PyArray_Descr *descr) -{ - if (PyDataType_HASFIELDS(descr) && PyDataType_REFCHK(descr)) { - PyErr_SetString(PyExc_TypeError, "Not supported for this data-type."); - return -1; - } - return 0; -} - -/* Backward compatibility only */ -/* In both Zero and One - -***You must free the memory once you are done with it -using PyDataMem_FREE(ptr) or you create a memory leak*** - -If arr is an Object array you are getting a -BORROWED reference to Zero or One. -Do not DECREF. -Please INCREF if you will be hanging on to it. - -The memory for the ptr still must be freed in any case; -*/ - - -/*OBJECT_API - Get pointer to zero of correct type for array. -*/ -static char * -PyArray_Zero(PyArrayObject *arr) -{ - char *zeroval; - int ret, storeflags; - PyObject *obj; - - if (_check_object_rec(arr->descr) < 0) return NULL; - zeroval = PyDataMem_NEW(arr->descr->elsize); - if (zeroval == NULL) { - PyErr_SetNone(PyExc_MemoryError); - return NULL; - } - - obj=PyInt_FromLong((long) 0); - if (PyArray_ISOBJECT(arr)) { - memcpy(zeroval, &obj, sizeof(PyObject *)); - Py_DECREF(obj); - return zeroval; - } - storeflags = arr->flags; - arr->flags |= BEHAVED; - ret = arr->descr->f->setitem(obj, zeroval, arr); - arr->flags = storeflags; - Py_DECREF(obj); - if (ret < 0) { - PyDataMem_FREE(zeroval); - return NULL; - } - return zeroval; -} - -/*OBJECT_API - Get pointer to one of correct type for array -*/ -static char * -PyArray_One(PyArrayObject *arr) -{ - char *oneval; - int ret, storeflags; - PyObject *obj; - - if (_check_object_rec(arr->descr) < 0) return NULL; - oneval = PyDataMem_NEW(arr->descr->elsize); - if (oneval == NULL) { - PyErr_SetNone(PyExc_MemoryError); - return NULL; - } - - obj = PyInt_FromLong((long) 1); - if (PyArray_ISOBJECT(arr)) { - memcpy(oneval, &obj, sizeof(PyObject *)); - Py_DECREF(obj); - return oneval; - } - - storeflags = arr->flags; - arr->flags |= BEHAVED; - ret = arr->descr->f->setitem(obj, oneval, arr); - arr->flags = storeflags; - Py_DECREF(obj); - if (ret < 0) { - PyDataMem_FREE(oneval); - return NULL; - } - return oneval; -} - -/* End deprecated */ - - -static PyObject *PyArray_New(PyTypeObject *, int nd, intp *, - int, intp *, void *, int, int, PyObject *); - - -/* Incref all objects found at this record */ -/*OBJECT_API - */ -static void -PyArray_Item_INCREF(char *data, PyArray_Descr *descr) -{ - PyObject **temp; - - if (!PyDataType_REFCHK(descr)) return; - - if (descr->type_num == PyArray_OBJECT) { - temp = (PyObject **)data; - Py_XINCREF(*temp); - } - else if (PyDescr_HASFIELDS(descr)) { - PyObject *key, *value, *title=NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos=0; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; - PyArray_Item_INCREF(data + offset, new); - } - } - return; -} - -/* XDECREF all objects found at this record */ -/*OBJECT_API - */ -static void -PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) -{ - PyObject **temp; - - if (!PyDataType_REFCHK(descr)) return; - - if (descr->type_num == PyArray_OBJECT) { - temp = (PyObject **)data; - Py_XDECREF(*temp); - } - else if PyDescr_HASFIELDS(descr) { - PyObject *key, *value, *title=NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos=0; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; - PyArray_Item_XDECREF(data + offset, new); - } - } - return; -} - -/* C-API functions */ - -/* Used for arrays of python objects to increment the reference count of */ -/* every python object in the array. */ -/*OBJECT_API - For object arrays, increment all internal references. -*/ -static int -PyArray_INCREF(PyArrayObject *mp) -{ - intp i, n; - PyObject **data, **temp; - PyArrayIterObject *it; - - if (!PyDataType_REFCHK(mp->descr)) return 0; - - if (mp->descr->type_num != PyArray_OBJECT) { - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp); - if (it == NULL) return -1; - while(it->index < it->size) { - PyArray_Item_INCREF(it->dataptr, mp->descr); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - return 0; - } - - if (PyArray_ISONESEGMENT(mp)) { - data = (PyObject **)mp->data; - n = PyArray_SIZE(mp); - if (PyArray_ISALIGNED(mp)) { - for(i=0; iindex < it->size) { - temp = (PyObject **)it->dataptr; - Py_XINCREF(*temp); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - } - return 0; -} - -/*OBJECT_API - Decrement all internal references for object arrays. - (or arrays with object fields) -*/ -static int -PyArray_XDECREF(PyArrayObject *mp) -{ - intp i, n; - PyObject **data; - PyObject **temp; - PyArrayIterObject *it; - - if (!PyDataType_REFCHK(mp->descr)) return 0; - - if (mp->descr->type_num != PyArray_OBJECT) { - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp); - if (it == NULL) return -1; - while(it->index < it->size) { - PyArray_Item_XDECREF(it->dataptr, mp->descr); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - return 0; - } - - if (PyArray_ISONESEGMENT(mp)) { - data = (PyObject **)mp->data; - n = PyArray_SIZE(mp); - if (PyArray_ISALIGNED(mp)) { - for(i=0; iindex < it->size) { - temp = (PyObject **)it->dataptr; - Py_XDECREF(*temp); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - } - return 0; -} - -static void -_strided_byte_copy(char *dst, intp outstrides, char *src, intp instrides, - intp N, int elsize) -{ - intp i, j; - char *tout = dst; - char *tin = src; - -#define _FAST_MOVE(_type_) \ - for (i=0; i 0; n--, a += stride-1) { - b = a + 3; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a = *b; *b = c; - } - break; - case 8: - for (a = (char*)p ; n > 0; n--, a += stride-3) { - b = a + 7; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a = *b; *b = c; - } - break; - case 2: - for (a = (char*)p ; n > 0; n--, a += stride) { - b = a + 1; - c = *a; *a = *b; *b = c; - } - break; - default: - m = size / 2; - for (a = (char *)p ; n > 0; n--, a += stride-m) { - b = a + (size-1); - for (j=0; j 1, then dst must be contiguous */ -static void -copy_and_swap(void *dst, void *src, int itemsize, intp numitems, - intp srcstrides, int swap) -{ - int i; - char *s1 = (char *)src; - char *d1 = (char *)dst; - - - if ((numitems == 1) || (itemsize == srcstrides)) - memcpy(d1, s1, itemsize*numitems); - else { - for (i = 0; i < numitems; i++) { - memcpy(d1, s1, itemsize); - d1 += itemsize; - s1 += srcstrides; - } - } - - if (swap) - byte_swap_vector(d1, numitems, itemsize); -} - - -#ifndef Py_UNICODE_WIDE -#include "ucsnarrow.c" -#endif - - -static PyArray_Descr **userdescrs=NULL; -#define error_converting(x) (((x) == -1) && PyErr_Occurred()) - - -/* Computer-generated arraytype and scalartype code */ -#include "scalartypes.inc" -#include "arraytypes.inc" - - -/* Helper functions */ - -/*OBJECT_API*/ -static intp -PyArray_PyIntAsIntp(PyObject *o) -{ - longlong long_value = -1; - PyObject *obj; - static char *msg = "an integer is required"; - PyObject *arr; - PyArray_Descr *descr; - intp ret; - - if (!o) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - - if (PyInt_Check(o)) { - long_value = (longlong) PyInt_AS_LONG(o); - goto finish; - } else if (PyLong_Check(o)) { - long_value = (longlong) PyLong_AsLongLong(o); - goto finish; - } - -#if SIZEOF_INTP == SIZEOF_LONG - descr = &LONG_Descr; -#elif SIZEOF_INTP == SIZEOF_INT - descr = &INT_Descr; -#else - descr = &LONGLONG_Descr; -#endif - arr = NULL; - - if (PyArray_Check(o)) { - if (PyArray_SIZE(o)!=1 || !PyArray_ISINTEGER(o)) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - Py_INCREF(descr); - arr = PyArray_CastToType((PyArrayObject *)o, descr, 0); - } - else if (PyArray_IsScalar(o, Integer)) { - Py_INCREF(descr); - arr = PyArray_FromScalar(o, descr); - } - if (arr != NULL) { - ret = *((intp *)PyArray_DATA(arr)); - Py_DECREF(arr); - return ret; - } - -#if (PY_VERSION_HEX >= 0x02050000) - if (PyIndex_Check(o)) { - PyObject* value = PyNumber_Index(o); - if (value==NULL) { - return -1; - } - long_value = (longlong) PyInt_AsSsize_t(value); - goto finish; - } -#endif - if (o->ob_type->tp_as_number != NULL && \ - o->ob_type->tp_as_number->nb_long != NULL) { - obj = o->ob_type->tp_as_number->nb_long(o); - if (obj != NULL) { - long_value = (longlong) PyLong_AsLongLong(obj); - Py_DECREF(obj); - } - } - else if (o->ob_type->tp_as_number != NULL && \ - o->ob_type->tp_as_number->nb_int != NULL) { - obj = o->ob_type->tp_as_number->nb_int(o); - if (obj != NULL) { - long_value = (longlong) PyLong_AsLongLong(obj); - Py_DECREF(obj); - } - } - else { - PyErr_SetString(PyExc_NotImplementedError,""); - } - - finish: - if error_converting(long_value) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - -#if (SIZEOF_LONGLONG > SIZEOF_INTP) - if ((long_value < MIN_INTP) || (long_value > MAX_INTP)) { - PyErr_SetString(PyExc_ValueError, - "integer won't fit into a C intp"); - return -1; - } -#endif - return (intp) long_value; -} - - -static PyObject *array_int(PyArrayObject *v); - -/*OBJECT_API*/ -static int -PyArray_PyIntAsInt(PyObject *o) -{ - long long_value = -1; - PyObject *obj; - static char *msg = "an integer is required"; - PyObject *arr; - PyArray_Descr *descr; - int ret; - - - if (!o) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - - if (PyInt_Check(o)) { - long_value = (long) PyInt_AS_LONG(o); - goto finish; - } else if (PyLong_Check(o)) { - long_value = (long) PyLong_AsLong(o); - goto finish; - } - - descr = &INT_Descr; - arr=NULL; - if (PyArray_Check(o)) { - if (PyArray_SIZE(o)!=1 || !PyArray_ISINTEGER(o)) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - Py_INCREF(descr); - arr = PyArray_CastToType((PyArrayObject *)o, descr, 0); - } - if (PyArray_IsScalar(o, Integer)) { - Py_INCREF(descr); - arr = PyArray_FromScalar(o, descr); - } - if (arr != NULL) { - ret = *((int *)PyArray_DATA(arr)); - Py_DECREF(arr); - return ret; - } -#if (PY_VERSION_HEX >= 0x02050000) - if (PyIndex_Check(o)) { - PyObject* value = PyNumber_Index(o); - long_value = (longlong) PyInt_AsSsize_t(value); - goto finish; - } -#endif - if (o->ob_type->tp_as_number != NULL && \ - o->ob_type->tp_as_number->nb_int != NULL) { - obj = o->ob_type->tp_as_number->nb_int(o); - if (obj == NULL) return -1; - long_value = (long) PyLong_AsLong(obj); - Py_DECREF(obj); - } - else if (o->ob_type->tp_as_number != NULL && \ - o->ob_type->tp_as_number->nb_long != NULL) { - obj = o->ob_type->tp_as_number->nb_long(o); - if (obj == NULL) return -1; - long_value = (long) PyLong_AsLong(obj); - Py_DECREF(obj); - } - else { - PyErr_SetString(PyExc_NotImplementedError,""); - } - - finish: - if error_converting(long_value) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - -#if (SIZEOF_LONG > SIZEOF_INT) - if ((long_value < INT_MIN) || (long_value > INT_MAX)) { - PyErr_SetString(PyExc_ValueError, - "integer won't fit into a C int"); - return -1; - } -#endif - return (int) long_value; -} - -static char * -index2ptr(PyArrayObject *mp, intp i) -{ - intp dim0; - if(mp->nd == 0) { - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed"); - return NULL; - } - dim0 = mp->dimensions[0]; - if (i<0) i += dim0; - if (i==0 && dim0 > 0) - return mp->data; - - if (i>0 && i < dim0) { - return mp->data+i*mp->strides[0]; - } - PyErr_SetString(PyExc_IndexError,"index out of bounds"); - return NULL; -} - -/*OBJECT_API - Compute the size of an array (in number of items) -*/ -static intp -PyArray_Size(PyObject *op) -{ - if (PyArray_Check(op)) { - return PyArray_SIZE((PyArrayObject *)op); - } - else { - return 0; - } -} - -static int -_copy_from0d(PyArrayObject *dest, PyArrayObject *src, int usecopy, int swap) -{ - char *aligned=NULL; - char *sptr; - int numcopies, nbytes; - void (*myfunc)(char *, intp, char *, intp, intp, int); - int retval=-1; - - NPY_BEGIN_THREADS_DEF - - numcopies = PyArray_SIZE(dest); - if (numcopies < 1) return 0; - nbytes = PyArray_ITEMSIZE(src); - - if (!PyArray_ISALIGNED(src)) { - aligned = malloc((size_t)nbytes); - if (aligned == NULL) { - PyErr_NoMemory(); - return -1; - } - memcpy(aligned, src->data, (size_t) nbytes); - usecopy = 1; - sptr = aligned; - } - else sptr = src->data; - if (PyArray_ISALIGNED(dest)) { - myfunc = _strided_byte_copy; - } - else if (usecopy) { - myfunc = _unaligned_strided_byte_copy; - } - else { - myfunc = _unaligned_strided_byte_move; - } - - if ((dest->nd < 2) || PyArray_ISONESEGMENT(dest)) { - char *dptr; - intp dstride; - - dptr = dest->data; - if (dest->nd == 1) - dstride = dest->strides[0]; - else - dstride = nbytes; - - PyArray_INCREF(src); - PyArray_XDECREF(dest); - - NPY_BEGIN_THREADS - - myfunc(dptr, dstride, sptr, 0, numcopies, (int) nbytes); - if (swap) - _strided_byte_swap(dptr, dstride, numcopies, (int) nbytes); - - NPY_END_THREADS - - } - else { - PyArrayIterObject *dit; - int axis=-1; - dit = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)dest, &axis); - if (dit == NULL) goto finish; - PyArray_INCREF(src); - PyArray_XDECREF(dest); - NPY_BEGIN_THREADS - while(dit->index < dit->size) { - myfunc(dit->dataptr, PyArray_STRIDE(dest, axis), - sptr, 0, - PyArray_DIM(dest, axis), nbytes); - if (swap) - _strided_byte_swap(dit->dataptr, - PyArray_STRIDE(dest, axis), - PyArray_DIM(dest, axis), nbytes); - PyArray_ITER_NEXT(dit); - } - NPY_END_THREADS - Py_DECREF(dit); - } - retval = 0; - finish: - if (aligned != NULL) free(aligned); - return retval; -} - -/* Special-case of PyArray_CopyInto when dst is 1-d - and contiguous (and aligned). - PyArray_CopyInto requires broadcastable arrays while - this one is a flattening operation... -*/ -int _flat_copyinto(PyObject *dst, PyObject *src, NPY_ORDER order) { - PyArrayIterObject *it; - void (*myfunc)(char *, intp, char *, intp, intp, int); - char *dptr; - int axis; - int elsize; - intp nbytes; - NPY_BEGIN_THREADS_DEF - - - if (PyArray_NDIM(src) == 0) { - PyArray_INCREF((PyArrayObject *)src); - PyArray_XDECREF((PyArrayObject *)dst); - NPY_BEGIN_THREADS - memcpy(PyArray_BYTES(dst), PyArray_BYTES(src), - PyArray_ITEMSIZE(src)); - NPY_END_THREADS - return 0; - } - - if (order == PyArray_FORTRANORDER) { - axis = 0; - } - else { - axis = PyArray_NDIM(src)-1; - } - - it = (PyArrayIterObject *)PyArray_IterAllButAxis(src, &axis); - if (it == NULL) return -1; - - if (PyArray_ISALIGNED(src)) { - myfunc = _strided_byte_copy; - } - else { - myfunc = _unaligned_strided_byte_copy; - } - - dptr = PyArray_BYTES(dst); - elsize = PyArray_ITEMSIZE(dst); - nbytes = elsize * PyArray_DIM(src, axis); - PyArray_INCREF((PyArrayObject *)src); - PyArray_XDECREF((PyArrayObject *)dst); - NPY_BEGIN_THREADS - while(it->index < it->size) { - myfunc(dptr, elsize, it->dataptr, - PyArray_STRIDE(src,axis), - PyArray_DIM(src,axis), elsize); - dptr += nbytes; - PyArray_ITER_NEXT(it); - } - NPY_END_THREADS - - Py_DECREF(it); - return 0; -} - - -static int -_copy_from_same_shape(PyArrayObject *dest, PyArrayObject *src, - void (*myfunc)(char *, intp, char *, intp, intp, int), - int swap) -{ - int maxaxis=-1, elsize; - intp maxdim; - PyArrayIterObject *dit, *sit; - NPY_BEGIN_THREADS_DEF - - dit = (PyArrayIterObject *) \ - PyArray_IterAllButAxis((PyObject *)dest, &maxaxis); - sit = (PyArrayIterObject *) \ - PyArray_IterAllButAxis((PyObject *)src, &maxaxis); - - maxdim = dest->dimensions[maxaxis]; - - if ((dit == NULL) || (sit == NULL)) { - Py_XDECREF(dit); - Py_XDECREF(sit); - return -1; - } - elsize = PyArray_ITEMSIZE(dest); - - PyArray_INCREF(src); - PyArray_XDECREF(dest); - - NPY_BEGIN_THREADS - while(dit->index < dit->size) { - /* strided copy of elsize bytes */ - myfunc(dit->dataptr, dest->strides[maxaxis], - sit->dataptr, src->strides[maxaxis], - maxdim, elsize); - if (swap) { - _strided_byte_swap(dit->dataptr, - dest->strides[maxaxis], - dest->dimensions[maxaxis], - elsize); - } - PyArray_ITER_NEXT(dit); - PyArray_ITER_NEXT(sit); - } - NPY_END_THREADS - - Py_DECREF(sit); - Py_DECREF(dit); - return 0; -} - -static int -_broadcast_copy(PyArrayObject *dest, PyArrayObject *src, - void (*myfunc)(char *, intp, char *, intp, intp, int), - int swap) -{ - int elsize; - PyArrayMultiIterObject *multi; - int maxaxis; intp maxdim; - NPY_BEGIN_THREADS_DEF - - elsize = PyArray_ITEMSIZE(dest); - multi = (PyArrayMultiIterObject *)PyArray_MultiIterNew(2, dest, src); - if (multi == NULL) return -1; - - if (multi->size != PyArray_SIZE(dest)) { - PyErr_SetString(PyExc_ValueError, - "array dimensions are not "\ - "compatible for copy"); - Py_DECREF(multi); - return -1; - } - - maxaxis = PyArray_RemoveSmallest(multi); - if (maxaxis < 0) { /* copy 1 0-d array to another */ - PyArray_INCREF(src); - PyArray_XDECREF(dest); - memcpy(dest->data, src->data, elsize); - if (swap) byte_swap_vector(dest->data, 1, elsize); - return 0; - } - maxdim = multi->dimensions[maxaxis]; - - /* Increment the source and decrement the destination - reference counts - */ - PyArray_INCREF(src); - PyArray_XDECREF(dest); - - NPY_BEGIN_THREADS - while(multi->index < multi->size) { - myfunc(multi->iters[0]->dataptr, - multi->iters[0]->strides[maxaxis], - multi->iters[1]->dataptr, - multi->iters[1]->strides[maxaxis], - maxdim, elsize); - if (swap) { - _strided_byte_swap(multi->iters[0]->dataptr, - multi->iters[0]->strides[maxaxis], - maxdim, elsize); - } - PyArray_MultiIter_NEXT(multi); - } - NPY_END_THREADS - - Py_DECREF(multi); - return 0; -} - -/* If destination is not the right type, then src - will be cast to destination -- this requires - src and dest to have the same shape -*/ - -/* Requires arrays to have broadcastable shapes - - The arrays are assumed to have the same number of elements - They can be different sizes and have different types however. -*/ - -static int -_array_copy_into(PyArrayObject *dest, PyArrayObject *src, int usecopy) -{ - int swap; - void (*myfunc)(char *, intp, char *, intp, intp, int); - int simple; - int same; - NPY_BEGIN_THREADS_DEF - - - if (!PyArray_EquivArrTypes(dest, src)) { - return PyArray_CastTo(dest, src); - } - - if (!PyArray_ISWRITEABLE(dest)) { - PyErr_SetString(PyExc_RuntimeError, - "cannot write to array"); - return -1; - } - - same = PyArray_SAMESHAPE(dest, src); - simple = same && ((PyArray_ISCARRAY_RO(src) && PyArray_ISCARRAY(dest)) || - (PyArray_ISFARRAY_RO(src) && PyArray_ISFARRAY(dest))); - - if (simple) { - PyArray_INCREF(src); - PyArray_XDECREF(dest); - NPY_BEGIN_THREADS - if (usecopy) - memcpy(dest->data, src->data, PyArray_NBYTES(dest)); - else - memmove(dest->data, src->data, PyArray_NBYTES(dest)); - NPY_END_THREADS - return 0; - } - - swap = PyArray_ISNOTSWAPPED(dest) != PyArray_ISNOTSWAPPED(src); - - if (src->nd == 0) { - return _copy_from0d(dest, src, usecopy, swap); - } - - if (PyArray_ISALIGNED(dest) && PyArray_ISALIGNED(src)) { - myfunc = _strided_byte_copy; - } - else if (usecopy) { - myfunc = _unaligned_strided_byte_copy; - } - else { - myfunc = _unaligned_strided_byte_move; - } - - /* Could combine these because _broadcasted_copy would work as well. - But, same-shape copying is so common we want to speed it up. - */ - if (same) { - return _copy_from_same_shape(dest, src, myfunc, swap); - } - else { - return _broadcast_copy(dest, src, myfunc, swap); - } -} - -/*OBJECT_API - Copy an Array into another array -- memory must not overlap - Does not require src and dest to have "broadcastable" shapes - (only the same number of elements). -*/ -static int -PyArray_CopyAnyInto(PyArrayObject *dest, PyArrayObject *src) -{ - int elsize, simple; - PyArrayIterObject *idest, *isrc; - void (*myfunc)(char *, intp, char *, intp, intp, int); - NPY_BEGIN_THREADS_DEF - - if (!PyArray_EquivArrTypes(dest, src)) { - return PyArray_CastAnyTo(dest, src); - } - - if (!PyArray_ISWRITEABLE(dest)) { - PyErr_SetString(PyExc_RuntimeError, - "cannot write to array"); - return -1; - } - - if (PyArray_SIZE(dest) != PyArray_SIZE(src)) { - PyErr_SetString(PyExc_ValueError, - "arrays must have the same number of elements" - " for copy"); - return -1; - } - - simple = ((PyArray_ISCARRAY_RO(src) && PyArray_ISCARRAY(dest)) || - (PyArray_ISFARRAY_RO(src) && PyArray_ISFARRAY(dest))); - - if (simple) { - PyArray_INCREF(src); - PyArray_XDECREF(dest); - NPY_BEGIN_THREADS - memcpy(dest->data, src->data, PyArray_NBYTES(dest)); - NPY_END_THREADS - return 0; - } - - if (PyArray_SAMESHAPE(dest, src)) { - int swap; - if (PyArray_ISALIGNED(dest) && PyArray_ISALIGNED(src)) { - myfunc = _strided_byte_copy; - } - else { - myfunc = _unaligned_strided_byte_copy; - } - swap = PyArray_ISNOTSWAPPED(dest) != PyArray_ISNOTSWAPPED(src); - return _copy_from_same_shape(dest, src, myfunc, swap); - } - - /* Otherwise we have to do an iterator-based copy */ - idest = (PyArrayIterObject *)PyArray_IterNew((PyObject *)dest); - if (idest == NULL) return -1; - isrc = (PyArrayIterObject *)PyArray_IterNew((PyObject *)src); - if (isrc == NULL) {Py_DECREF(idest); return -1;} - elsize = dest->descr->elsize; - PyArray_INCREF(src); - PyArray_XDECREF(dest); - NPY_BEGIN_THREADS - while(idest->index < idest->size) { - memcpy(idest->dataptr, isrc->dataptr, elsize); - PyArray_ITER_NEXT(idest); - PyArray_ITER_NEXT(isrc); - } - NPY_END_THREADS - Py_DECREF(idest); - Py_DECREF(isrc); - return 0; -} - -/*OBJECT_API - Copy an Array into another array -- memory must not overlap. -*/ -static int -PyArray_CopyInto(PyArrayObject *dest, PyArrayObject *src) -{ - return _array_copy_into(dest, src, 1); -} - - -/*OBJECT_API - Move the memory of one array into another. -*/ -static int -PyArray_MoveInto(PyArrayObject *dest, PyArrayObject *src) -{ - return _array_copy_into(dest, src, 0); -} - - -/*OBJECT_API*/ -static int -PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) -{ - PyArrayObject *src; - PyObject *r; - int ret; - - /* Special code to mimic Numeric behavior for - character arrays. - */ - if (dest->descr->type == PyArray_CHARLTR && dest->nd > 0 \ - && PyString_Check(src_object)) { - int n_new, n_old; - char *new_string; - PyObject *tmp; - n_new = dest->dimensions[dest->nd-1]; - n_old = PyString_Size(src_object); - if (n_new > n_old) { - new_string = (char *)malloc(n_new); - memmove(new_string, - PyString_AS_STRING(src_object), - n_old); - memset(new_string+n_old, ' ', n_new-n_old); - tmp = PyString_FromStringAndSize(new_string, n_new); - free(new_string); - src_object = tmp; - } - } - - if (PyArray_Check(src_object)) { - src = (PyArrayObject *)src_object; - Py_INCREF(src); - } - else if (!PyArray_IsScalar(src_object, Generic) && - PyArray_HasArrayInterface(src_object, r)) { - src = (PyArrayObject *)r; - } - else { - PyArray_Descr* dtype; - dtype = dest->descr; - Py_INCREF(dtype); - src = (PyArrayObject *)PyArray_FromAny(src_object, dtype, 0, - dest->nd, - FORTRAN_IF(dest), - NULL); - } - if (src == NULL) return -1; - - ret = PyArray_MoveInto(dest, src); - Py_DECREF(src); - return ret; -} - - -/* These are also old calls (should use PyArray_NewFromDescr) */ - -/* They all zero-out the memory as previously done */ - -/* steals reference to descr -- and enforces native byteorder on it.*/ -/*OBJECT_API - Like FromDimsAndData but uses the Descr structure instead of typecode - as input. -*/ -static PyObject * -PyArray_FromDimsAndDataAndDescr(int nd, int *d, - PyArray_Descr *descr, - char *data) -{ - PyObject *ret; -#if SIZEOF_INTP != SIZEOF_INT - int i; - intp newd[MAX_DIMS]; -#endif - - if (!PyArray_ISNBO(descr->byteorder)) - descr->byteorder = '='; - -#if SIZEOF_INTP != SIZEOF_INT - for (i=0; itype_num != PyArray_OBJECT)) { - memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret)); - } - return ret; -} - -/* end old calls */ - - -/*OBJECT_API - Copy an array. -*/ -static PyObject * -PyArray_NewCopy(PyArrayObject *m1, NPY_ORDER fortran) -{ - PyArrayObject *ret; - if (fortran == PyArray_ANYORDER) - fortran = PyArray_ISFORTRAN(m1); - - Py_INCREF(m1->descr); - ret = (PyArrayObject *)PyArray_NewFromDescr(m1->ob_type, - m1->descr, - m1->nd, - m1->dimensions, - NULL, NULL, - fortran, - (PyObject *)m1); - if (ret == NULL) return NULL; - if (PyArray_CopyInto(ret, m1) == -1) { - Py_DECREF(ret); - return NULL; - } - - return (PyObject *)ret; -} - -static PyObject *array_big_item(PyArrayObject *, intp); - -/* Does nothing with descr (cannot be NULL) */ -/*OBJECT_API - Get scalar-equivalent to a region of memory described by a descriptor. -*/ -static PyObject * -PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) -{ - PyTypeObject *type; - PyObject *obj; - void *destptr; - PyArray_CopySwapFunc *copyswap; - int type_num; - int itemsize; - int swap; - - type_num = descr->type_num; - if (type_num == PyArray_BOOL) - PyArrayScalar_RETURN_BOOL_FROM_LONG(*(Bool*)data); - else if (PyDataType_FLAGCHK(descr, NPY_USE_GETITEM)) { - return descr->f->getitem(data, base); - } - itemsize = descr->elsize; - copyswap = descr->f->copyswap; - type = descr->typeobj; - swap = !PyArray_ISNBO(descr->byteorder); - if PyTypeNum_ISSTRING(type_num) { /* Eliminate NULL bytes */ - char *dptr = data; - dptr += itemsize-1; - while(itemsize && *dptr-- == 0) itemsize--; - if (type_num == PyArray_UNICODE && itemsize) { - /* make sure itemsize is a multiple of 4 */ - /* so round up to nearest multiple */ - itemsize = (((itemsize-1) >> 2) + 1) << 2; - } - } - if (type->tp_itemsize != 0) /* String type */ - obj = type->tp_alloc(type, itemsize); - else - obj = type->tp_alloc(type, 0); - if (obj == NULL) return NULL; - if PyTypeNum_ISFLEXIBLE(type_num) { - if (type_num == PyArray_STRING) { - destptr = PyString_AS_STRING(obj); - ((PyStringObject *)obj)->ob_shash = -1; - ((PyStringObject *)obj)->ob_sstate = \ - SSTATE_NOT_INTERNED; - memcpy(destptr, data, itemsize); - return obj; - } - else if (type_num == PyArray_UNICODE) { - PyUnicodeObject *uni = (PyUnicodeObject*)obj; - int length = itemsize >> 2; -#ifndef Py_UNICODE_WIDE - char *buffer; - int alloc=0; - length *= 2; -#endif - /* Need an extra slot and need to use - Python memory manager */ - uni->str = NULL; - destptr = PyMem_NEW(Py_UNICODE,length+1); - if (destptr == NULL) { - Py_DECREF(obj); - return PyErr_NoMemory(); - } - uni->str = (Py_UNICODE *)destptr; - uni->str[0] = 0; - uni->str[length] = 0; - uni->length = length; - uni->hash = -1; - uni->defenc = NULL; -#ifdef Py_UNICODE_WIDE - memcpy(destptr, data, itemsize); - if (swap) - byte_swap_vector(destptr, length, 4); -#else - /* need aligned data buffer */ - if ((swap) || ((((intp)data) % descr->alignment) != 0)) { - buffer = _pya_malloc(itemsize); - if (buffer == NULL) - return PyErr_NoMemory(); - alloc = 1; - memcpy(buffer, data, itemsize); - if (swap) { - byte_swap_vector(buffer, - itemsize >> 2, 4); - } - } - else buffer = data; - - /* Allocated enough for 2-characters per itemsize. - Now convert from the data-buffer - */ - length = PyUCS2Buffer_FromUCS4(uni->str, - (PyArray_UCS4 *)buffer, - itemsize >> 2); - if (alloc) _pya_free(buffer); - /* Resize the unicode result */ - if (MyPyUnicode_Resize(uni, length) < 0) { - Py_DECREF(obj); - return NULL; - } -#endif - return obj; - } - else { - PyVoidScalarObject *vobj = (PyVoidScalarObject *)obj; - vobj->base = NULL; - vobj->descr = descr; - Py_INCREF(descr); - vobj->obval = NULL; - vobj->ob_size = itemsize; - vobj->flags = BEHAVED | OWNDATA; - swap = 0; - if (descr->names) { - if (base) { - Py_INCREF(base); - vobj->base = base; - vobj->flags = PyArray_FLAGS(base); - vobj->flags &= ~OWNDATA; - vobj->obval = data; - return obj; - } - } - destptr = PyDataMem_NEW(itemsize); - if (destptr == NULL) { - Py_DECREF(obj); - return PyErr_NoMemory(); - } - vobj->obval = destptr; - } - } - else { - destptr = scalar_value(obj, descr); - } - /* copyswap for OBJECT increments the reference count */ - copyswap(destptr, data, swap, base); - return obj; -} - -/* returns an Array-Scalar Object of the type of arr - from the given pointer to memory -- main Scalar creation function - default new method calls this. -*/ - -/* Ideally, here the descriptor would contain all the information needed. - So, that we simply need the data and the descriptor, and perhaps - a flag -*/ - - -/* Return Array Scalar if 0-d array object is encountered */ - -/*OBJECT_API - Return either an array or the appropriate Python object if the array - is 0d and matches a Python type. -*/ -static PyObject * -PyArray_Return(PyArrayObject *mp) -{ - - if (mp == NULL) return NULL; - - if (PyErr_Occurred()) { - Py_XDECREF(mp); - return NULL; - } - - if (!PyArray_Check(mp)) return (PyObject *)mp; - - if (mp->nd == 0) { - PyObject *ret; - ret = PyArray_ToScalar(mp->data, mp); - Py_DECREF(mp); - return ret; - } - else { - return (PyObject *)mp; - } -} - - -/*MULTIARRAY_API - Initialize arrfuncs to NULL -*/ -static void -PyArray_InitArrFuncs(PyArray_ArrFuncs *f) -{ - int i; - for (i=0; icast[i] = NULL; - } - f->getitem = NULL; - f->setitem = NULL; - f->copyswapn = NULL; - f->copyswap = NULL; - f->compare = NULL; - f->argmax = NULL; - f->dotfunc = NULL; - f->scanfunc = NULL; - f->fromstr = NULL; - f->nonzero = NULL; - f->fill = NULL; - f->fillwithscalar = NULL; - for (i=0; isort[i] = NULL; - f->argsort[i] = NULL; - } - f->castdict = NULL; - f->scalarkind = NULL; - f->cancastscalarkindto = NULL; - f->cancastto = NULL; -} - -static Bool -_default_nonzero(void *ip, void *arr) -{ - int elsize = PyArray_ITEMSIZE(arr); - char *ptr = ip; - while (elsize--) { - if (*ptr++ != 0) return TRUE; - } - return FALSE; -} - -/* - Given a string return the type-number for - the data-type with that string as the type-object name. - Returns PyArray_NOTYPE without setting an error if no type can be - found. Only works for user-defined data-types. -*/ - -/*MULTIARRAY_API - */ -static int -PyArray_TypeNumFromName(char *str) -{ - int i; - PyArray_Descr *descr; - - for (i=0; itypeobj->tp_name, str) == 0) - return descr->type_num; - } - - return PyArray_NOTYPE; -} - -/* - returns typenum to associate with this type >=PyArray_USERDEF. - needs the userdecrs table and PyArray_NUMUSER variables - defined in arraytypes.inc -*/ -/*MULTIARRAY_API - Register Data type - Does not change the reference count of descr -*/ -static int -PyArray_RegisterDataType(PyArray_Descr *descr) -{ - PyArray_Descr *descr2; - int typenum; - int i; - PyArray_ArrFuncs *f; - - /* See if this type is already registered */ - for (i=0; itype_num; - } - typenum = PyArray_USERDEF + NPY_NUMUSERTYPES; - descr->type_num = typenum; - if (descr->elsize == 0) { - PyErr_SetString(PyExc_ValueError, "cannot register a" \ - "flexible data-type"); - return -1; - } - f = descr->f; - if (f->nonzero == NULL) { - f->nonzero = _default_nonzero; - } - if (f->copyswap == NULL || f->getitem == NULL || - f->copyswapn == NULL || f->setitem == NULL) { - PyErr_SetString(PyExc_ValueError, "a required array function" \ - " is missing."); - return -1; - } - if (descr->typeobj == NULL) { - PyErr_SetString(PyExc_ValueError, "missing typeobject"); - return -1; - } - userdescrs = realloc(userdescrs, - (NPY_NUMUSERTYPES+1)*sizeof(void *)); - if (userdescrs == NULL) { - PyErr_SetString(PyExc_MemoryError, "RegisterDataType"); - return -1; - } - userdescrs[NPY_NUMUSERTYPES++] = descr; - return typenum; -} - -/*MULTIARRAY_API - Register Casting Function - Replaces any function currently stored. -*/ -static int -PyArray_RegisterCastFunc(PyArray_Descr *descr, int totype, - PyArray_VectorUnaryFunc *castfunc) -{ - PyObject *cobj, *key; - int ret; - if (totype < PyArray_NTYPES) { - descr->f->cast[totype] = castfunc; - return 0; - } - if (!PyTypeNum_ISUSERDEF(totype)) { - PyErr_SetString(PyExc_TypeError, "invalid type number."); - return -1; - } - if (descr->f->castdict == NULL) { - descr->f->castdict = PyDict_New(); - if (descr->f->castdict == NULL) return -1; - } - key = PyInt_FromLong(totype); - if (PyErr_Occurred()) return -1; - cobj = PyCObject_FromVoidPtr((void *)castfunc, NULL); - if (cobj == NULL) {Py_DECREF(key); return -1;} - ret = PyDict_SetItem(descr->f->castdict, key, cobj); - Py_DECREF(key); - Py_DECREF(cobj); - return ret; -} - -static int * -_append_new(int *types, int insert) -{ - int n=0; - int *newtypes; - - while (types[n] != PyArray_NOTYPE) n++; - newtypes = (int *)realloc(types, (n+2)*sizeof(int)); - newtypes[n] = insert; - newtypes[n+1] = PyArray_NOTYPE; - return newtypes; -} - -/*MULTIARRAY_API - Register a type number indicating that a descriptor can be cast - to it safely -*/ -static int -PyArray_RegisterCanCast(PyArray_Descr *descr, int totype, - NPY_SCALARKIND scalar) -{ - if (scalar == PyArray_NOSCALAR) { - /* register with cancastto */ - /* These lists won't be freed once created - -- they become part of the data-type */ - if (descr->f->cancastto == NULL) { - descr->f->cancastto = (int *)malloc(1*sizeof(int)); - descr->f->cancastto[0] = PyArray_NOTYPE; - } - descr->f->cancastto = _append_new(descr->f->cancastto, - totype); - } - else { - /* register with cancastscalarkindto */ - if (descr->f->cancastscalarkindto == NULL) { - int i; - descr->f->cancastscalarkindto = \ - (int **)malloc(PyArray_NSCALARKINDS* \ - sizeof(int*)); - for (i=0; if->cancastscalarkindto[i] = NULL; - } - } - if (descr->f->cancastscalarkindto[scalar] == NULL) { - descr->f->cancastscalarkindto[scalar] = \ - (int *)malloc(1*sizeof(int)); - descr->f->cancastscalarkindto[scalar][0] = \ - PyArray_NOTYPE; - } - descr->f->cancastscalarkindto[scalar] = \ - _append_new(descr->f->cancastscalarkindto[scalar], - totype); - } - return 0; -} - -/*OBJECT_API - To File -*/ -static int -PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) -{ - intp size; - intp n, n2; - size_t n3, n4; - PyArrayIterObject *it; - PyObject *obj, *strobj, *tupobj; - - n3 = (sep ? strlen((const char *)sep) : 0); - if (n3 == 0) { /* binary data */ - if (PyDataType_FLAGCHK(self->descr, NPY_LIST_PICKLE)) { - PyErr_SetString(PyExc_ValueError, "cannot write " \ - "object arrays to a file in " \ - "binary mode"); - return -1; - } - - if (PyArray_ISCONTIGUOUS(self)) { - size = PyArray_SIZE(self); - NPY_BEGIN_ALLOW_THREADS - n=fwrite((const void *)self->data, - (size_t) self->descr->elsize, - (size_t) size, fp); - NPY_END_ALLOW_THREADS - if (n < size) { - PyErr_Format(PyExc_ValueError, - "%ld requested and %ld written", - (long) size, (long) n); - return -1; - } - } - else { - NPY_BEGIN_THREADS_DEF - - it=(PyArrayIterObject *) \ - PyArray_IterNew((PyObject *)self); - NPY_BEGIN_THREADS - while(it->index < it->size) { - if (fwrite((const void *)it->dataptr, - (size_t) self->descr->elsize, - 1, fp) < 1) { - NPY_END_THREADS - PyErr_Format(PyExc_IOError, - "problem writing element"\ - " %d to file", - (int)it->index); - Py_DECREF(it); - return -1; - } - PyArray_ITER_NEXT(it); - } - NPY_END_THREADS - Py_DECREF(it); - } - } - else { /* text data */ - - it=(PyArrayIterObject *) \ - PyArray_IterNew((PyObject *)self); - n4 = (format ? strlen((const char *)format) : 0); - while(it->index < it->size) { - obj = self->descr->f->getitem(it->dataptr, self); - if (obj == NULL) {Py_DECREF(it); return -1;} - if (n4 == 0) { /* standard writing */ - strobj = PyObject_Str(obj); - Py_DECREF(obj); - if (strobj == NULL) {Py_DECREF(it); return -1;} - } - else { /* use format string */ - tupobj = PyTuple_New(1); - if (tupobj == NULL) {Py_DECREF(it); return -1;} - PyTuple_SET_ITEM(tupobj,0,obj); - obj = PyString_FromString((const char *)format); - if (obj == NULL) {Py_DECREF(tupobj); - Py_DECREF(it); return -1;} - strobj = PyString_Format(obj, tupobj); - Py_DECREF(obj); - Py_DECREF(tupobj); - if (strobj == NULL) {Py_DECREF(it); return -1;} - } - NPY_BEGIN_ALLOW_THREADS - n=fwrite(PyString_AS_STRING(strobj), 1, - n2=PyString_GET_SIZE(strobj), fp); - NPY_END_ALLOW_THREADS - if (n < n2) { - PyErr_Format(PyExc_IOError, - "problem writing element %d"\ - " to file", - (int) it->index); - Py_DECREF(strobj); - Py_DECREF(it); - return -1; - } - /* write separator for all but last one */ - if (it->index != it->size-1) - if (fwrite(sep, 1, n3, fp) < n3) { - PyErr_Format(PyExc_IOError, - "problem writing "\ - "separator to file"); - Py_DECREF(strobj); - Py_DECREF(it); - return -1; - } - Py_DECREF(strobj); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - } - return 0; -} - -/*OBJECT_API - To List -*/ -static PyObject * -PyArray_ToList(PyArrayObject *self) -{ - PyObject *lp; - PyArrayObject *v; - intp sz, i; - - if (!PyArray_Check(self)) return (PyObject *)self; - - if (self->nd == 0) - return self->descr->f->getitem(self->data,self); - - sz = self->dimensions[0]; - lp = PyList_New(sz); - - for (i=0; ind >= self->nd) { - PyErr_SetString(PyExc_RuntimeError, - "array_item not returning smaller-" \ - "dimensional array"); - Py_DECREF(v); - Py_DECREF(lp); - return NULL; - } - PyList_SetItem(lp, i, PyArray_ToList(v)); - Py_DECREF(v); - } - - return lp; -} - -/*OBJECT_API*/ -static PyObject * -PyArray_ToString(PyArrayObject *self, NPY_ORDER order) -{ - intp numbytes; - intp index; - char *dptr; - int elsize; - PyObject *ret; - PyArrayIterObject *it; - - if (order == NPY_ANYORDER) - order = PyArray_ISFORTRAN(self); - - /* if (PyArray_TYPE(self) == PyArray_OBJECT) { - PyErr_SetString(PyExc_ValueError, "a string for the data" \ - "in an object array is not appropriate"); - return NULL; - } - */ - - numbytes = PyArray_NBYTES(self); - if ((PyArray_ISCONTIGUOUS(self) && (order == NPY_CORDER)) || \ - (PyArray_ISFORTRAN(self) && (order == NPY_FORTRANORDER))) { - ret = PyString_FromStringAndSize(self->data, (int) numbytes); - } - else { - PyObject *new; - if (order == NPY_FORTRANORDER) { - /* iterators are always in C-order */ - new = PyArray_Transpose(self, NULL); - if (new == NULL) return NULL; - } - else { - Py_INCREF(self); - new = (PyObject *)self; - } - it = (PyArrayIterObject *)PyArray_IterNew(new); - Py_DECREF(new); - if (it==NULL) return NULL; - ret = PyString_FromStringAndSize(NULL, (int) numbytes); - if (ret == NULL) {Py_DECREF(it); return NULL;} - dptr = PyString_AS_STRING(ret); - index = it->size; - elsize = self->descr->elsize; - while(index--) { - memcpy(dptr, it->dataptr, elsize); - dptr += elsize; - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - } - return ret; -} - - -/*********************** end C-API functions **********************/ - - -/* array object functions */ - -static void -array_dealloc(PyArrayObject *self) { - - if (self->weakreflist != NULL) - PyObject_ClearWeakRefs((PyObject *)self); - - if(self->base) { - /* UPDATEIFCOPY means that base points to an - array that should be updated with the contents - of this array upon destruction. - self->base->flags must have been WRITEABLE - (checked previously) and it was locked here - thus, unlock it. - */ - if (self->flags & UPDATEIFCOPY) { - ((PyArrayObject *)self->base)->flags |= WRITEABLE; - Py_INCREF(self); /* hold on to self in next call */ - if (PyArray_CopyAnyInto((PyArrayObject *)self->base, - self) < 0) { - PyErr_Print(); - PyErr_Clear(); - } - /* Don't need to DECREF -- because we are deleting - self already... */ - } - /* In any case base is pointing to something that we need - to DECREF -- either a view or a buffer object */ - Py_DECREF(self->base); - } - - if ((self->flags & OWNDATA) && self->data) { - /* Free internal references if an Object array */ - if (PyDataType_FLAGCHK(self->descr, NPY_ITEM_REFCOUNT)) { - Py_INCREF(self); /*hold on to self */ - PyArray_XDECREF(self); - /* Don't need to DECREF -- because we are deleting - self already... */ - } - PyDataMem_FREE(self->data); - } - - PyDimMem_FREE(self->dimensions); - - Py_DECREF(self->descr); - - self->ob_type->tp_free((PyObject *)self); -} - -/************************************************************************* - **************** Implement Mapping Protocol *************************** - *************************************************************************/ - -static Py_ssize_t -array_length(PyArrayObject *self) -{ - if (self->nd != 0) { - return self->dimensions[0]; - } else { - PyErr_SetString(PyExc_TypeError, "len() of unsized object"); - return -1; - } -} - -static PyObject * -array_big_item(PyArrayObject *self, intp i) -{ - char *item; - PyArrayObject *r; - - if(self->nd == 0) { - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed"); - return NULL; - } - if ((item = index2ptr(self, i)) == NULL) return NULL; - - Py_INCREF(self->descr); - r = (PyArrayObject *)PyArray_NewFromDescr(self->ob_type, - self->descr, - self->nd-1, - self->dimensions+1, - self->strides+1, item, - self->flags, - (PyObject *)self); - if (r == NULL) return NULL; - Py_INCREF(self); - r->base = (PyObject *)self; - PyArray_UpdateFlags(r, CONTIGUOUS | FORTRAN); - return (PyObject *)r; -} - -/* contains optimization for 1-d arrays */ -static PyObject * -array_item_nice(PyArrayObject *self, Py_ssize_t i) -{ - if (self->nd == 1) { - char *item; - if ((item = index2ptr(self, i)) == NULL) return NULL; - return PyArray_Scalar(item, self->descr, (PyObject *)self); - } - else { - return PyArray_Return((PyArrayObject *)\ - array_big_item(self, (intp) i)); - } -} - -static int -array_ass_big_item(PyArrayObject *self, intp i, PyObject *v) -{ - PyArrayObject *tmp; - char *item; - int ret; - - if (v == NULL) { - PyErr_SetString(PyExc_ValueError, - "can't delete array elements"); - return -1; - } - if (!PyArray_ISWRITEABLE(self)) { - PyErr_SetString(PyExc_RuntimeError, - "array is not writeable"); - return -1; - } - if (self->nd == 0) { - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed."); - return -1; - } - - - if (self->nd > 1) { - if((tmp = (PyArrayObject *)array_big_item(self, i)) == NULL) - return -1; - ret = PyArray_CopyObject(tmp, v); - Py_DECREF(tmp); - return ret; - } - - if ((item = index2ptr(self, i)) == NULL) return -1; - if (self->descr->f->setitem(v, item, self) == -1) return -1; - return 0; -} - -#if PY_VERSION_HEX < 0x02050000 -#if SIZEOF_INT == SIZEOF_INTP -#define array_ass_item array_ass_big_item -#endif -#else -#if SIZEOF_SIZE_T == SIZEOF_INTP -#define array_ass_item array_ass_big_item -#endif -#endif -#ifndef array_ass_item -static int -array_ass_item(PyArrayObject *self, Py_ssize_t i, PyObject *v) -{ - return array_ass_big_item(self, (intp) i, v); -} -#endif - - -/* -------------------------------------------------------------- */ -static int -slice_coerce_index(PyObject *o, intp *v) -{ - *v = PyArray_PyIntAsIntp(o); - if (error_converting(*v)) { - PyErr_Clear(); - return 0; - } - return 1; -} - - -/* This is basically PySlice_GetIndicesEx, but with our coercion - * of indices to integers (plus, that function is new in Python 2.3) */ -static int -slice_GetIndices(PySliceObject *r, intp length, - intp *start, intp *stop, intp *step, - intp *slicelength) -{ - intp defstop; - - if (r->step == Py_None) { - *step = 1; - } else { - if (!slice_coerce_index(r->step, step)) return -1; - if (*step == 0) { - PyErr_SetString(PyExc_ValueError, - "slice step cannot be zero"); - return -1; - } - } - /* defstart = *step < 0 ? length - 1 : 0; */ - - defstop = *step < 0 ? -1 : length; - - if (r->start == Py_None) { - *start = *step < 0 ? length-1 : 0; - } else { - if (!slice_coerce_index(r->start, start)) return -1; - if (*start < 0) *start += length; - if (*start < 0) *start = (*step < 0) ? -1 : 0; - if (*start >= length) { - *start = (*step < 0) ? length - 1 : length; - } - } - - if (r->stop == Py_None) { - *stop = defstop; - } else { - if (!slice_coerce_index(r->stop, stop)) return -1; - if (*stop < 0) *stop += length; - if (*stop < 0) *stop = -1; - if (*stop > length) *stop = length; - } - - if ((*step < 0 && *stop >= *start) || \ - (*step > 0 && *start >= *stop)) { - *slicelength = 0; - } else if (*step < 0) { - *slicelength = (*stop - *start + 1) / (*step) + 1; - } else { - *slicelength = (*stop - *start - 1) / (*step) + 1; - } - - return 0; -} - -#define PseudoIndex -1 -#define RubberIndex -2 -#define SingleIndex -3 - -static intp -parse_subindex(PyObject *op, intp *step_size, intp *n_steps, intp max) -{ - intp index; - - if (op == Py_None) { - *n_steps = PseudoIndex; - index = 0; - } else if (op == Py_Ellipsis) { - *n_steps = RubberIndex; - index = 0; - } else if (PySlice_Check(op)) { - intp stop; - if (slice_GetIndices((PySliceObject *)op, max, - &index, &stop, step_size, n_steps) < 0) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_IndexError, - "invalid slice"); - } - goto fail; - } - if (*n_steps <= 0) { - *n_steps = 0; - *step_size = 1; - index = 0; - } - } else { - index = PyArray_PyIntAsIntp(op); - if (error_converting(index)) { - PyErr_SetString(PyExc_IndexError, - "each subindex must be either a "\ - "slice, an integer, Ellipsis, or "\ - "newaxis"); - goto fail; - } - *n_steps = SingleIndex; - *step_size = 0; - if (index < 0) index += max; - if (index >= max || index < 0) { - PyErr_SetString(PyExc_IndexError, "invalid index"); - goto fail; - } - } - return index; - fail: - return -1; -} - - -static int -parse_index(PyArrayObject *self, PyObject *op, - intp *dimensions, intp *strides, intp *offset_ptr) -{ - int i, j, n; - int nd_old, nd_new, n_add, n_pseudo; - intp n_steps, start, offset, step_size; - PyObject *op1=NULL; - int is_slice; - - if (PySlice_Check(op) || op == Py_Ellipsis || op == Py_None) { - n = 1; - op1 = op; - Py_INCREF(op); - /* this relies on the fact that n==1 for loop below */ - is_slice = 1; - } - else { - if (!PySequence_Check(op)) { - PyErr_SetString(PyExc_IndexError, - "index must be either an int "\ - "or a sequence"); - return -1; - } - n = PySequence_Length(op); - is_slice = 0; - } - - nd_old = nd_new = 0; - - offset = 0; - for(i=0; ind ? \ - self->dimensions[nd_old] : 0); - Py_DECREF(op1); - if (start == -1) break; - - if (n_steps == PseudoIndex) { - dimensions[nd_new] = 1; strides[nd_new] = 0; - nd_new++; - } else { - if (n_steps == RubberIndex) { - for(j=i+1, n_pseudo=0; jnd-(n-i-n_pseudo-1+nd_old); - if (n_add < 0) { - PyErr_SetString(PyExc_IndexError, - "too many indices"); - return -1; - } - for(j=0; jdimensions[nd_old]; - strides[nd_new] = \ - self->strides[nd_old]; - nd_new++; nd_old++; - } - } else { - if (nd_old >= self->nd) { - PyErr_SetString(PyExc_IndexError, - "too many indices"); - return -1; - } - offset += self->strides[nd_old]*start; - nd_old++; - if (n_steps != SingleIndex) { - dimensions[nd_new] = n_steps; - strides[nd_new] = step_size * \ - self->strides[nd_old-1]; - nd_new++; - } - } - } - } - if (i < n) return -1; - n_add = self->nd-nd_old; - for(j=0; jdimensions[nd_old]; - strides[nd_new] = self->strides[nd_old]; - nd_new++; nd_old++; - } - *offset_ptr = offset; - return nd_new; -} - -static void -_swap_axes(PyArrayMapIterObject *mit, PyArrayObject **ret, int getmap) -{ - PyObject *new; - int n1, n2, n3, val, bnd; - int i; - PyArray_Dims permute; - intp d[MAX_DIMS]; - PyArrayObject *arr; - - permute.ptr = d; - permute.len = mit->nd; - - /* arr might not have the right number of dimensions - and need to be reshaped first by pre-pending ones */ - arr = *ret; - if (arr->nd != mit->nd) { - for (i=1; i<=arr->nd; i++) { - permute.ptr[mit->nd-i] = arr->dimensions[arr->nd-i]; - } - for (i=0; ind-arr->nd; i++) { - permute.ptr[i] = 1; - } - new = PyArray_Newshape(arr, &permute, PyArray_ANYORDER); - Py_DECREF(arr); - *ret = (PyArrayObject *)new; - if (new == NULL) return; - } - - /* Setting and getting need to have different permutations. - On the get we are permuting the returned object, but on - setting we are permuting the object-to-be-set. - The set permutation is the inverse of the get permutation. - */ - - /* For getting the array the tuple for transpose is - (n1,...,n1+n2-1,0,...,n1-1,n1+n2,...,n3-1) - n1 is the number of dimensions of - the broadcasted index array - n2 is the number of dimensions skipped at the - start - n3 is the number of dimensions of the - result - */ - - /* For setting the array the tuple for transpose is - (n2,...,n1+n2-1,0,...,n2-1,n1+n2,...n3-1) - */ - n1 = mit->iters[0]->nd_m1 + 1; - n2 = mit->iteraxes[0]; - n3 = mit->nd; - - bnd = (getmap ? n1 : n2); /* use n1 as the boundary if getting - but n2 if setting */ - - val = bnd; - i = 0; - while(val < n1+n2) - permute.ptr[i++] = val++; - val = 0; - while(val < bnd) - permute.ptr[i++] = val++; - val = n1+n2; - while(val < n3) - permute.ptr[i++] = val++; - - new = PyArray_Transpose(*ret, &permute); - Py_DECREF(*ret); - *ret = (PyArrayObject *)new; -} - -/* Prototypes for Mapping calls --- not part of the C-API - because only useful as part of a getitem call. -*/ - -static void PyArray_MapIterReset(PyArrayMapIterObject *); -static void PyArray_MapIterNext(PyArrayMapIterObject *); -static void PyArray_MapIterBind(PyArrayMapIterObject *, PyArrayObject *); -static PyObject* PyArray_MapIterNew(PyObject *, int, int); - -static PyObject * -PyArray_GetMap(PyArrayMapIterObject *mit) -{ - - PyArrayObject *ret, *temp; - PyArrayIterObject *it; - int index; - int swap; - PyArray_CopySwapFunc *copyswap; - - /* Unbound map iterator --- Bind should have been called */ - if (mit->ait == NULL) return NULL; - - /* This relies on the map iterator object telling us the shape - of the new array in nd and dimensions. - */ - temp = mit->ait->ao; - Py_INCREF(temp->descr); - ret = (PyArrayObject *)\ - PyArray_NewFromDescr(temp->ob_type, - temp->descr, - mit->nd, mit->dimensions, - NULL, NULL, - PyArray_ISFORTRAN(temp), - (PyObject *)temp); - if (ret == NULL) return NULL; - - /* Now just iterate through the new array filling it in - with the next object from the original array as - defined by the mapping iterator */ - - if ((it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ret)) - == NULL) { - Py_DECREF(ret); - return NULL; - } - index = it->size; - swap = (PyArray_ISNOTSWAPPED(temp) != PyArray_ISNOTSWAPPED(ret)); - copyswap = ret->descr->f->copyswap; - PyArray_MapIterReset(mit); - while (index--) { - copyswap(it->dataptr, mit->dataptr, swap, ret); - PyArray_MapIterNext(mit); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - - /* check for consecutive axes */ - if ((mit->subspace != NULL) && (mit->consec)) { - if (mit->iteraxes[0] > 0) { /* then we need to swap */ - _swap_axes(mit, &ret, 1); - } - } - return (PyObject *)ret; -} - -static int -PyArray_SetMap(PyArrayMapIterObject *mit, PyObject *op) -{ - PyObject *arr=NULL; - PyArrayIterObject *it; - int index; - int swap; - PyArray_CopySwapFunc *copyswap; - PyArray_Descr *descr; - - /* Unbound Map Iterator */ - if (mit->ait == NULL) return -1; - - descr = mit->ait->ao->descr; - Py_INCREF(descr); - arr = PyArray_FromAny(op, descr, 0, 0, FORCECAST, NULL); - if (arr == NULL) return -1; - - if ((mit->subspace != NULL) && (mit->consec)) { - if (mit->iteraxes[0] > 0) { /* then we need to swap */ - _swap_axes(mit, (PyArrayObject **)&arr, 0); - if (arr == NULL) return -1; - } - } - - /* Be sure values array is "broadcastable" - to shape of mit->dimensions, mit->nd */ - - if ((it = (PyArrayIterObject *)\ - PyArray_BroadcastToShape(arr, mit->dimensions, mit->nd))==NULL) { - Py_DECREF(arr); - return -1; - } - - index = mit->size; - swap = (PyArray_ISNOTSWAPPED(mit->ait->ao) != \ - (PyArray_ISNOTSWAPPED(arr))); - copyswap = PyArray_DESCR(arr)->f->copyswap; - PyArray_MapIterReset(mit); - /* Need to decref hasobject arrays */ - if (PyDataType_FLAGCHK(descr, NPY_ITEM_REFCOUNT)) { - while (index--) { - PyArray_Item_XDECREF(mit->dataptr, PyArray_DESCR(arr)); - PyArray_Item_INCREF(it->dataptr, PyArray_DESCR(arr)); - memmove(mit->dataptr, it->dataptr, sizeof(PyObject *)); - /* ignored unless VOID array with object's */ - if (swap) - copyswap(mit->dataptr, NULL, swap, arr); - PyArray_MapIterNext(mit); - PyArray_ITER_NEXT(it); - } - Py_DECREF(arr); - Py_DECREF(it); - return 0; - } - while(index--) { - memmove(mit->dataptr, it->dataptr, PyArray_ITEMSIZE(arr)); - if (swap) - copyswap(mit->dataptr, NULL, swap, arr); - PyArray_MapIterNext(mit); - PyArray_ITER_NEXT(it); - } - Py_DECREF(arr); - Py_DECREF(it); - return 0; -} - -int -count_new_axes_0d(PyObject *tuple) -{ - int i, argument_count; - int ellipsis_count = 0; - int newaxis_count = 0; - - argument_count = PyTuple_GET_SIZE(tuple); - - for (i = 0; i < argument_count; ++i) { - PyObject *arg = PyTuple_GET_ITEM(tuple, i); - if (arg == Py_Ellipsis && !ellipsis_count) ellipsis_count++; - else if (arg == Py_None) newaxis_count++; - else break; - } - if (i < argument_count) { - PyErr_SetString(PyExc_IndexError, - "0-d arrays can only use a single ()" - " or a list of newaxes (and a single ...)" - " as an index"); - return -1; - } - if (newaxis_count > MAX_DIMS) { - PyErr_SetString(PyExc_IndexError, - "too many dimensions"); - return -1; - } - return newaxis_count; -} - -static PyObject * -add_new_axes_0d(PyArrayObject *arr, int newaxis_count) -{ - PyArrayObject *other; - intp dimensions[MAX_DIMS]; - int i; - for (i = 0; i < newaxis_count; ++i) { - dimensions[i] = 1; - } - Py_INCREF(arr->descr); - if ((other = (PyArrayObject *) - PyArray_NewFromDescr(arr->ob_type, arr->descr, - newaxis_count, dimensions, - NULL, arr->data, - arr->flags, - (PyObject *)arr)) == NULL) - return NULL; - other->base = (PyObject *)arr; - Py_INCREF(arr); - return (PyObject *)other; -} - - -/* This checks the args for any fancy indexing objects */ - -#define SOBJ_NOTFANCY 0 -#define SOBJ_ISFANCY 1 -#define SOBJ_BADARRAY 2 -#define SOBJ_TOOMANY 3 -#define SOBJ_LISTTUP 4 - -static int -fancy_indexing_check(PyObject *args) -{ - int i, n; - PyObject *obj; - int retval = SOBJ_NOTFANCY; - - if (PyTuple_Check(args)) { - n = PyTuple_GET_SIZE(args); - if (n >= MAX_DIMS) return SOBJ_TOOMANY; - for (i=0; i=MAX_DIMS) return SOBJ_ISFANCY; - for (i=0; i SOBJ_ISFANCY) return retval; - } - } - return retval; -} - -/* Called when treating array object like a mapping -- called first from - Python when using a[object] unless object is a standard slice object - (not an extended one). - -*/ - -/* There are two situations: - - 1 - the subscript is a standard view and a reference to the - array can be returned - - 2 - the subscript uses Boolean masks or integer indexing and - therefore a new array is created and returned. - -*/ - -/* Always returns arrays */ - -static PyObject *iter_subscript(PyArrayIterObject *, PyObject *); - - -static PyObject * -array_subscript_simple(PyArrayObject *self, PyObject *op) -{ - intp dimensions[MAX_DIMS], strides[MAX_DIMS]; - intp offset; - int nd; - PyArrayObject *other; - intp value; - - value = PyArray_PyIntAsIntp(op); - if (!PyErr_Occurred()) { - return array_big_item(self, value); - } - PyErr_Clear(); - - /* Standard (view-based) Indexing */ - if ((nd = parse_index(self, op, dimensions, strides, &offset)) - == -1) return NULL; - - /* This will only work if new array will be a view */ - Py_INCREF(self->descr); - if ((other = (PyArrayObject *) \ - PyArray_NewFromDescr(self->ob_type, self->descr, - nd, dimensions, - strides, self->data+offset, - self->flags, - (PyObject *)self)) == NULL) - return NULL; - - other->base = (PyObject *)self; - Py_INCREF(self); - - PyArray_UpdateFlags(other, UPDATE_ALL); - - return (PyObject *)other; -} - -static PyObject * -array_subscript(PyArrayObject *self, PyObject *op) -{ - int nd, fancy; - PyArrayObject *other; - PyArrayMapIterObject *mit; - - if (PyString_Check(op) || PyUnicode_Check(op)) { - if (self->descr->names) { - PyObject *obj; - obj = PyDict_GetItem(self->descr->fields, op); - if (obj != NULL) { - PyArray_Descr *descr; - int offset; - PyObject *title; - - if (PyArg_ParseTuple(obj, "Oi|O", - &descr, &offset, &title)) { - Py_INCREF(descr); - return PyArray_GetField(self, descr, - offset); - } - } - } - - PyErr_Format(PyExc_ValueError, - "field named %s not found.", - PyString_AsString(op)); - return NULL; - } - - if (self->nd == 0) { - if (op == Py_Ellipsis) { - /* XXX: This leads to a small inconsistency - XXX: with the nd>0 case where (x[...] is x) - XXX: is false for nd>0 case. */ - Py_INCREF(self); - return (PyObject *)self; - } - if (op == Py_None) - return add_new_axes_0d(self, 1); - if (PyTuple_Check(op)) { - if (0 == PyTuple_GET_SIZE(op)) { - Py_INCREF(self); - return (PyObject *)self; - } - if ((nd = count_new_axes_0d(op)) == -1) - return NULL; - return add_new_axes_0d(self, nd); - } - /* Allow Boolean mask selection also */ - if (PyBool_Check(op) || PyArray_IsScalar(op, Bool) || - (PyArray_Check(op) && (PyArray_DIMS(op)==0) && - PyArray_ISBOOL(op))) { - if (PyObject_IsTrue(op)) { - Py_INCREF(self); - return (PyObject *)self; - } - else { - intp oned = 0; - Py_INCREF(self->descr); - return PyArray_NewFromDescr(self->ob_type, - self->descr, - 1, &oned, - NULL, NULL, - NPY_DEFAULT, - NULL); - } - } - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed."); - return NULL; - } - - fancy = fancy_indexing_check(op); - - if (fancy != SOBJ_NOTFANCY) { - int oned; - oned = ((self->nd == 1) && - !(PyTuple_Check(op) && PyTuple_GET_SIZE(op) > 1)); - - /* wrap arguments into a mapiter object */ - mit = (PyArrayMapIterObject *)\ - PyArray_MapIterNew(op, oned, fancy); - if (mit == NULL) return NULL; - if (oned) { - PyArrayIterObject *it; - PyObject *rval; - it = (PyArrayIterObject *)\ - PyArray_IterNew((PyObject *)self); - if (it == NULL) {Py_DECREF(mit); return NULL;} - rval = iter_subscript(it, mit->indexobj); - Py_DECREF(it); - Py_DECREF(mit); - return rval; - } - PyArray_MapIterBind(mit, self); - other = (PyArrayObject *)PyArray_GetMap(mit); - Py_DECREF(mit); - return (PyObject *)other; - } - - return array_subscript_simple(self, op); -} - - -/* Another assignment hacked by using CopyObject. */ - -/* This only works if subscript returns a standard view. */ - -/* Again there are two cases. In the first case, PyArray_CopyObject - can be used. In the second case, a new indexing function has to be - used. -*/ - -static int iter_ass_subscript(PyArrayIterObject *, PyObject *, PyObject *); - -static int -array_ass_sub_simple(PyArrayObject *self, PyObject *index, PyObject *op) -{ - int ret; - PyArrayObject *tmp; - intp value; - - value = PyArray_PyIntAsIntp(index); - if (!error_converting(value)) { - return array_ass_big_item(self, value, op); - } - PyErr_Clear(); - - /* Rest of standard (view-based) indexing */ - - if (PyArray_CheckExact(self)) { - tmp = (PyArrayObject *)array_subscript_simple(self, index); - if (tmp == NULL) return -1; - } - else { - PyObject *tmp0; - tmp0 = PyObject_GetItem((PyObject *)self, index); - if (tmp0 == NULL) return -1; - if (!PyArray_Check(tmp0)) { - PyErr_SetString(PyExc_RuntimeError, - "Getitem not returning array."); - Py_DECREF(tmp0); - return -1; - } - tmp = (PyArrayObject *)tmp0; - } - - if (PyArray_ISOBJECT(self) && (tmp->nd == 0)) { - ret = tmp->descr->f->setitem(op, tmp->data, tmp); - } - else { - ret = PyArray_CopyObject(tmp, op); - } - Py_DECREF(tmp); - return ret; -} - - -/* return -1 if tuple-object seq is not a tuple of integers. - otherwise fill vals with converted integers -*/ -static int -_tuple_of_integers(PyObject *seq, intp *vals, int maxvals) -{ - int i; - PyObject *obj; - intp temp; - - for (i=0; i 0) || - PyList_Check(obj)) return -1; - temp = PyArray_PyIntAsIntp(obj); - if (error_converting(temp)) return -1; - vals[i] = temp; - } - return 0; -} - - -static int -array_ass_sub(PyArrayObject *self, PyObject *index, PyObject *op) -{ - int ret, oned, fancy; - PyArrayMapIterObject *mit; - intp vals[MAX_DIMS]; - - if (op == NULL) { - PyErr_SetString(PyExc_ValueError, - "cannot delete array elements"); - return -1; - } - if (!PyArray_ISWRITEABLE(self)) { - PyErr_SetString(PyExc_RuntimeError, - "array is not writeable"); - return -1; - } - - if (PyInt_Check(index) || PyArray_IsScalar(index, Integer) || - PyLong_Check(index) || (PyIndex_Check(index) && - !PySequence_Check(index))) { - intp value; - value = PyArray_PyIntAsIntp(index); - if (PyErr_Occurred()) - PyErr_Clear(); - else - return array_ass_big_item(self, value, op); - } - - if (PyString_Check(index) || PyUnicode_Check(index)) { - if (self->descr->names) { - PyObject *obj; - obj = PyDict_GetItem(self->descr->fields, index); - if (obj != NULL) { - PyArray_Descr *descr; - int offset; - PyObject *title; - - if (PyArg_ParseTuple(obj, "Oi|O", - &descr, &offset, &title)) { - Py_INCREF(descr); - return PyArray_SetField(self, descr, - offset, op); - } - } - } - - PyErr_Format(PyExc_ValueError, - "field named %s not found.", - PyString_AsString(index)); - return -1; - } - - if (self->nd == 0) { - /* Several different exceptions to the 0-d no-indexing rule - - 1) ellipses - 2) empty tuple - 3) Using newaxis (None) - 4) Boolean mask indexing - */ - if (index == Py_Ellipsis || index == Py_None || \ - (PyTuple_Check(index) && (0 == PyTuple_GET_SIZE(index) || \ - count_new_axes_0d(index) > 0))) - return self->descr->f->setitem(op, self->data, self); - if (PyBool_Check(index) || PyArray_IsScalar(index, Bool) || - (PyArray_Check(index) && (PyArray_DIMS(index)==0) && - PyArray_ISBOOL(index))) { - if (PyObject_IsTrue(index)) { - return self->descr->f->setitem(op, self->data, self); - } - else { /* don't do anything */ - return 0; - } - } - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed."); - return -1; - } - - /* optimization for integer-tuple */ - if (self->nd > 1 && - (PyTuple_Check(index) && (PyTuple_GET_SIZE(index) == self->nd)) - && (_tuple_of_integers(index, vals, self->nd) >= 0)) { - int i; - char *item; - for (i=0; ind; i++) { - if (vals[i] < 0) vals[i] += self->dimensions[i]; - if ((vals[i] < 0) || (vals[i] >= self->dimensions[i])) { - PyErr_Format(PyExc_IndexError, - "index (%"INTP_FMT") out of range "\ - "(0<=index<%"INTP_FMT") in dimension %d", - vals[i], self->dimensions[i], i); - return -1; - } - } - item = PyArray_GetPtr(self, vals); - /* fprintf(stderr, "Here I am...\n");*/ - return self->descr->f->setitem(op, item, self); - } - PyErr_Clear(); - - fancy = fancy_indexing_check(index); - - if (fancy != SOBJ_NOTFANCY) { - oned = ((self->nd == 1) && - !(PyTuple_Check(index) && PyTuple_GET_SIZE(index) > 1)); - - mit = (PyArrayMapIterObject *) \ - PyArray_MapIterNew(index, oned, fancy); - if (mit == NULL) return -1; - if (oned) { - PyArrayIterObject *it; - int rval; - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it == NULL) {Py_DECREF(mit); return -1;} - rval = iter_ass_subscript(it, mit->indexobj, op); - Py_DECREF(it); - Py_DECREF(mit); - return rval; - } - PyArray_MapIterBind(mit, self); - ret = PyArray_SetMap(mit, op); - Py_DECREF(mit); - return ret; - } - - return array_ass_sub_simple(self, index, op); -} - - -/* There are places that require that array_subscript return a PyArrayObject - and not possibly a scalar. Thus, this is the function exposed to - Python so that 0-dim arrays are passed as scalars -*/ - - -static PyObject * -array_subscript_nice(PyArrayObject *self, PyObject *op) -{ - - PyArrayObject *mp; - intp vals[MAX_DIMS]; - - if (PyInt_Check(op) || PyArray_IsScalar(op, Integer) || \ - PyLong_Check(op) || (PyIndex_Check(op) && - !PySequence_Check(op))) { - intp value; - value = PyArray_PyIntAsIntp(op); - if (PyErr_Occurred()) - PyErr_Clear(); - else { - return array_item_nice(self, (Py_ssize_t) value); - } - } - /* optimization for a tuple of integers */ - if (self->nd > 1 && PyTuple_Check(op) && - (PyTuple_GET_SIZE(op) == self->nd) - && (_tuple_of_integers(op, vals, self->nd) >= 0)) { - int i; - char *item; - for (i=0; ind; i++) { - if (vals[i] < 0) vals[i] += self->dimensions[i]; - if ((vals[i] < 0) || (vals[i] >= self->dimensions[i])) { - PyErr_Format(PyExc_IndexError, - "index (%"INTP_FMT") out of range "\ - "(0<=index<=%"INTP_FMT") in dimension %d", - vals[i], self->dimensions[i], i); - return NULL; - } - } - item = PyArray_GetPtr(self, vals); - return PyArray_Scalar(item, self->descr, (PyObject *)self); - } - PyErr_Clear(); - - mp = (PyArrayObject *)array_subscript(self, op); - - /* The following is just a copy of PyArray_Return with an - additional logic in the nd == 0 case. - */ - - if (mp == NULL) return NULL; - - if (PyErr_Occurred()) { - Py_XDECREF(mp); - return NULL; - } - - if (mp->nd == 0) { - Bool noellipses = TRUE; - if ((op == Py_Ellipsis) || PyString_Check(op) || PyUnicode_Check(op)) - noellipses = FALSE; - else if (PyBool_Check(op) || PyArray_IsScalar(op, Bool) || - (PyArray_Check(op) && (PyArray_DIMS(op)==0))) - noellipses = FALSE; - else if (PySequence_Check(op)) { - int n, i; - PyObject *temp; - n = PySequence_Size(op); - i=0; - while (idata, mp); - Py_DECREF(mp); - return ret; - } - } - return (PyObject *)mp; -} - - -static PyMappingMethods array_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)array_length, /*mp_length*/ -#else - (inquiry)array_length, /*mp_length*/ -#endif - (binaryfunc)array_subscript_nice, /*mp_subscript*/ - (objobjargproc)array_ass_sub, /*mp_ass_subscript*/ -}; - -/****************** End of Mapping Protocol ******************************/ - - -/************************************************************************* - **************** Implement Buffer Protocol **************************** - *************************************************************************/ - -/* removed multiple segment interface */ - -static Py_ssize_t -array_getsegcount(PyArrayObject *self, Py_ssize_t *lenp) -{ - if (lenp) - *lenp = PyArray_NBYTES(self); - - if (PyArray_ISONESEGMENT(self)) { - return 1; - } - - if (lenp) - *lenp = 0; - return 0; -} - -static Py_ssize_t -array_getreadbuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr) -{ - if (segment != 0) { - PyErr_SetString(PyExc_ValueError, - "accessing non-existing array segment"); - return -1; - } - - if (PyArray_ISONESEGMENT(self)) { - *ptrptr = self->data; - return PyArray_NBYTES(self); - } - PyErr_SetString(PyExc_ValueError, "array is not a single segment"); - *ptrptr = NULL; - return -1; -} - - -static Py_ssize_t -array_getwritebuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr) -{ - if (PyArray_CHKFLAGS(self, WRITEABLE)) - return array_getreadbuf(self, segment, (void **) ptrptr); - else { - PyErr_SetString(PyExc_ValueError, "array cannot be "\ - "accessed as a writeable buffer"); - return -1; - } -} - -static Py_ssize_t -array_getcharbuf(PyArrayObject *self, Py_ssize_t segment, constchar **ptrptr) -{ - if (self->descr->type_num == PyArray_STRING || \ - self->descr->type_num == PyArray_UNICODE || \ - self->descr->elsize == 1) - return array_getreadbuf(self, segment, (void **) ptrptr); - else { - PyErr_SetString(PyExc_TypeError, - "non-character (or 8-bit) array cannot be "\ - "interpreted as character buffer"); - return -1; - } -} - -static PyBufferProcs array_as_buffer = { -#if PY_VERSION_HEX >= 0x02050000 - (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (segcountproc)array_getsegcount, /*bf_getsegcount*/ - (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ -#else - (getreadbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (getwritebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (getsegcountproc)array_getsegcount, /*bf_getsegcount*/ - (getcharbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ -#endif -}; - -/****************** End of Buffer Protocol *******************************/ - - -/************************************************************************* - **************** Implement Number Protocol **************************** - *************************************************************************/ - - -typedef struct { - PyObject *add, - *subtract, - *multiply, - *divide, - *remainder, - *power, - *square, - *reciprocal, - *ones_like, - *sqrt, - *negative, - *absolute, - *invert, - *left_shift, - *right_shift, - *bitwise_and, - *bitwise_xor, - *bitwise_or, - *less, - *less_equal, - *equal, - *not_equal, - *greater, - *greater_equal, - *floor_divide, - *true_divide, - *logical_or, - *logical_and, - *floor, - *ceil, - *maximum, - *minimum, - *rint, - *conjugate; -} NumericOps; - -static NumericOps n_ops; /* NB: static objects inlitialized to zero */ - -/* Dictionary can contain any of the numeric operations, by name. - Those not present will not be changed -*/ - -#define SET(op) temp=PyDict_GetItemString(dict, #op); \ - if (temp != NULL) { \ - if (!(PyCallable_Check(temp))) return -1; \ - Py_XDECREF(n_ops.op); \ - n_ops.op = temp; \ - } - - -/*OBJECT_API - Set internal structure with number functions that all arrays will use -*/ -int -PyArray_SetNumericOps(PyObject *dict) -{ - PyObject *temp = NULL; - SET(add); - SET(subtract); - SET(multiply); - SET(divide); - SET(remainder); - SET(power); - SET(square); - SET(reciprocal); - SET(ones_like); - SET(sqrt); - SET(negative); - SET(absolute); - SET(invert); - SET(left_shift); - SET(right_shift); - SET(bitwise_and); - SET(bitwise_or); - SET(bitwise_xor); - SET(less); - SET(less_equal); - SET(equal); - SET(not_equal); - SET(greater); - SET(greater_equal); - SET(floor_divide); - SET(true_divide); - SET(logical_or); - SET(logical_and); - SET(floor); - SET(ceil); - SET(maximum); - SET(minimum); - SET(rint); - SET(conjugate); - return 0; -} - -#define GET(op) if (n_ops.op && \ - (PyDict_SetItemString(dict, #op, n_ops.op)==-1)) \ - goto fail; - -/*OBJECT_API - Get dictionary showing number functions that all arrays will use -*/ -static PyObject * -PyArray_GetNumericOps(void) -{ - PyObject *dict; - if ((dict = PyDict_New())==NULL) - return NULL; - GET(add); - GET(subtract); - GET(multiply); - GET(divide); - GET(remainder); - GET(power); - GET(square); - GET(reciprocal); - GET(ones_like); - GET(sqrt); - GET(negative); - GET(absolute); - GET(invert); - GET(left_shift); - GET(right_shift); - GET(bitwise_and); - GET(bitwise_or); - GET(bitwise_xor); - GET(less); - GET(less_equal); - GET(equal); - GET(not_equal); - GET(greater); - GET(greater_equal); - GET(floor_divide); - GET(true_divide); - GET(logical_or); - GET(logical_and); - GET(floor); - GET(ceil); - GET(maximum); - GET(minimum); - GET(rint); - GET(conjugate); - return dict; - - fail: - Py_DECREF(dict); - return NULL; -} - -static PyObject * -_get_keywords(int rtype, PyArrayObject *out) -{ - PyObject *kwds=NULL; - if (rtype != PyArray_NOTYPE || out != NULL) { - kwds = PyDict_New(); - if (rtype != PyArray_NOTYPE) { - PyArray_Descr *descr; - descr = PyArray_DescrFromType(rtype); - if (descr) { - PyDict_SetItemString(kwds, "dtype", - (PyObject *)descr); - Py_DECREF(descr); - } - } - if (out != NULL) { - PyDict_SetItemString(kwds, "out", - (PyObject *)out); - } - } - return kwds; -} - -static PyObject * -PyArray_GenericReduceFunction(PyArrayObject *m1, PyObject *op, int axis, - int rtype, PyArrayObject *out) -{ - PyObject *args, *ret=NULL, *meth; - PyObject *kwds; - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - args = Py_BuildValue("(Oi)", m1, axis); - kwds = _get_keywords(rtype, out); - meth = PyObject_GetAttrString(op, "reduce"); - if (meth && PyCallable_Check(meth)) { - ret = PyObject_Call(meth, args, kwds); - } - Py_DECREF(args); - Py_DECREF(meth); - Py_XDECREF(kwds); - return ret; -} - - -static PyObject * -PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, - int rtype, PyArrayObject *out) -{ - PyObject *args, *ret=NULL, *meth; - PyObject *kwds; - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - args = Py_BuildValue("(Oi)", m1, axis); - kwds = _get_keywords(rtype, out); - meth = PyObject_GetAttrString(op, "accumulate"); - if (meth && PyCallable_Check(meth)) { - ret = PyObject_Call(meth, args, kwds); - } - Py_DECREF(args); - Py_DECREF(meth); - Py_XDECREF(kwds); - return ret; -} - - -static PyObject * -PyArray_GenericBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op) -{ - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - return PyObject_CallFunction(op, "OO", m1, m2); -} - -static PyObject * -PyArray_GenericUnaryFunction(PyArrayObject *m1, PyObject *op) -{ - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - return PyObject_CallFunction(op, "(O)", m1); -} - -static PyObject * -PyArray_GenericInplaceBinaryFunction(PyArrayObject *m1, - PyObject *m2, PyObject *op) -{ - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - return PyObject_CallFunction(op, "OOO", m1, m2, m1); -} - -static PyObject * -PyArray_GenericInplaceUnaryFunction(PyArrayObject *m1, PyObject *op) -{ - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - return PyObject_CallFunction(op, "OO", m1, m1); -} - -static PyObject * -array_add(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.add); -} - -static PyObject * -array_subtract(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.subtract); -} - -static PyObject * -array_multiply(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.multiply); -} - -static PyObject * -array_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.divide); -} - -static PyObject * -array_remainder(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.remainder); -} - -static int -array_power_is_scalar(PyObject *o2, double* exp) -{ - PyObject *temp; - const int optimize_fpexps = 1; - - if (PyInt_Check(o2)) { - *exp = (double)PyInt_AsLong(o2); - return 1; - } - if (optimize_fpexps && PyFloat_Check(o2)) { - *exp = PyFloat_AsDouble(o2); - return 1; - } - if ((PyArray_IsZeroDim(o2) && - ((PyArray_ISINTEGER(o2) || - (optimize_fpexps && PyArray_ISFLOAT(o2))))) || - PyArray_IsScalar(o2, Integer) || - (optimize_fpexps && PyArray_IsScalar(o2, Floating))) { - temp = o2->ob_type->tp_as_number->nb_float(o2); - if (temp != NULL) { - *exp = PyFloat_AsDouble(o2); - Py_DECREF(temp); - return 1; - } - } -#if (PY_VERSION_HEX >= 0x02050000) - if (PyIndex_Check(o2)) { - PyObject* value = PyNumber_Index(o2); - Py_ssize_t val; - if (value==NULL) { - if (PyErr_Occurred()) - PyErr_Clear(); - return 0; - } - val = PyInt_AsSsize_t(value); - if (val == -1 && PyErr_Occurred()) { - PyErr_Clear(); - return 0; - } - *exp = (double) val; - return 1; - } -#endif - return 0; -} - -/* optimize float array or complex array to a scalar power */ -static PyObject * -fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace) { - double exp; - if (PyArray_Check(a1) && array_power_is_scalar(o2, &exp)) { - PyObject *fastop = NULL; - if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) { - if (exp == 1.0) { - /* we have to do this one special, as the - "copy" method of array objects isn't set - up early enough to be added - by PyArray_SetNumericOps. - */ - if (inplace) { - Py_INCREF(a1); - return (PyObject *)a1; - } else { - return PyArray_Copy(a1); - } - } else if (exp == -1.0) { - fastop = n_ops.reciprocal; - } else if (exp == 0.0) { - fastop = n_ops.ones_like; - } else if (exp == 0.5) { - fastop = n_ops.sqrt; - } else if (exp == 2.0) { - fastop = n_ops.square; - } else { - return NULL; - } - if (inplace) { - return PyArray_GenericInplaceUnaryFunction(a1, - fastop); - } else { - return PyArray_GenericUnaryFunction(a1, - fastop); - } - } - else if (exp==2.0) { - fastop = n_ops.multiply; - if (inplace) { - return PyArray_GenericInplaceBinaryFunction \ - (a1, (PyObject *)a1, fastop); - } - else { - return PyArray_GenericBinaryFunction \ - (a1, (PyObject *)a1, fastop); - } - } - } - return NULL; -} - -static PyObject * -array_power(PyArrayObject *a1, PyObject *o2, PyObject *modulo) -{ - /* modulo is ignored! */ - PyObject *value; - value = fast_scalar_power(a1, o2, 0); - if (!value) { - value = PyArray_GenericBinaryFunction(a1, o2, n_ops.power); - } - return value; -} - - -static PyObject * -array_negative(PyArrayObject *m1) -{ - return PyArray_GenericUnaryFunction(m1, n_ops.negative); -} - -static PyObject * -array_absolute(PyArrayObject *m1) -{ - return PyArray_GenericUnaryFunction(m1, n_ops.absolute); -} - -static PyObject * -array_invert(PyArrayObject *m1) -{ - return PyArray_GenericUnaryFunction(m1, n_ops.invert); -} - -static PyObject * -array_left_shift(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.left_shift); -} - -static PyObject * -array_right_shift(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.right_shift); -} - -static PyObject * -array_bitwise_and(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.bitwise_and); -} - -static PyObject * -array_bitwise_or(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.bitwise_or); -} - -static PyObject * -array_bitwise_xor(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.bitwise_xor); -} - -static PyObject * -array_inplace_add(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.add); -} - -static PyObject * -array_inplace_subtract(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.subtract); -} - -static PyObject * -array_inplace_multiply(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.multiply); -} - -static PyObject * -array_inplace_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.divide); -} - -static PyObject * -array_inplace_remainder(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.remainder); -} - -static PyObject * -array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *modulo) -{ - /* modulo is ignored! */ - PyObject *value; - value = fast_scalar_power(a1, o2, 1); - if (!value) { - value = PyArray_GenericInplaceBinaryFunction(a1, o2, n_ops.power); - } - return value; -} - -static PyObject * -array_inplace_left_shift(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.left_shift); -} - -static PyObject * -array_inplace_right_shift(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.right_shift); -} - -static PyObject * -array_inplace_bitwise_and(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_and); -} - -static PyObject * -array_inplace_bitwise_or(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_or); -} - -static PyObject * -array_inplace_bitwise_xor(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_xor); -} - -static PyObject * -array_floor_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.floor_divide); -} - -static PyObject * -array_true_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.true_divide); -} - -static PyObject * -array_inplace_floor_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, - n_ops.floor_divide); -} - -static PyObject * -array_inplace_true_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, - n_ops.true_divide); -} - -/* Array evaluates as "TRUE" if any of the elements are non-zero*/ -static int -array_any_nonzero(PyArrayObject *mp) -{ - intp index; - PyArrayIterObject *it; - Bool anyTRUE = FALSE; - - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp); - if (it==NULL) return anyTRUE; - index = it->size; - while(index--) { - if (mp->descr->f->nonzero(it->dataptr, mp)) { - anyTRUE = TRUE; - break; - } - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - return anyTRUE; -} - -static int -_array_nonzero(PyArrayObject *mp) -{ - intp n; - n = PyArray_SIZE(mp); - if (n == 1) { - return mp->descr->f->nonzero(mp->data, mp); - } - else if (n == 0) { - return 0; - } - else { - PyErr_SetString(PyExc_ValueError, - "The truth value of an array " \ - "with more than one element is ambiguous. " \ - "Use a.any() or a.all()"); - return -1; - } -} - - - -static PyObject * -array_divmod(PyArrayObject *op1, PyObject *op2) -{ - PyObject *divp, *modp, *result; - - divp = array_floor_divide(op1, op2); - if (divp == NULL) return NULL; - modp = array_remainder(op1, op2); - if (modp == NULL) { - Py_DECREF(divp); - return NULL; - } - result = Py_BuildValue("OO", divp, modp); - Py_DECREF(divp); - Py_DECREF(modp); - return result; -} - - -static PyObject * -array_int(PyArrayObject *v) -{ - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can be"\ - " converted to Python scalars"); - return NULL; - } - pv = v->descr->f->getitem(v->data, v); - if (pv == NULL) return NULL; - if (pv->ob_type->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (pv->ob_type->tp_as_number->nb_int == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to int"); - Py_DECREF(pv); - return NULL; - } - - pv2 = pv->ob_type->tp_as_number->nb_int(pv); - Py_DECREF(pv); - return pv2; -} - -static PyObject * -array_float(PyArrayObject *v) -{ - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = v->descr->f->getitem(v->data, v); - if (pv == NULL) return NULL; - if (pv->ob_type->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to a "\ - "float; scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (pv->ob_type->tp_as_number->nb_float == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to float"); - Py_DECREF(pv); - return NULL; - } - pv2 = pv->ob_type->tp_as_number->nb_float(pv); - Py_DECREF(pv); - return pv2; -} - -static PyObject * -array_long(PyArrayObject *v) -{ - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = v->descr->f->getitem(v->data, v); - if (pv->ob_type->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - return NULL; - } - if (pv->ob_type->tp_as_number->nb_long == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to long"); - return NULL; - } - pv2 = pv->ob_type->tp_as_number->nb_long(pv); - Py_DECREF(pv); - return pv2; -} - -static PyObject * -array_oct(PyArrayObject *v) -{ - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = v->descr->f->getitem(v->data, v); - if (pv->ob_type->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - return NULL; - } - if (pv->ob_type->tp_as_number->nb_oct == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to oct"); - return NULL; - } - pv2 = pv->ob_type->tp_as_number->nb_oct(pv); - Py_DECREF(pv); - return pv2; -} - -static PyObject * -array_hex(PyArrayObject *v) -{ - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = v->descr->f->getitem(v->data, v); - if (pv->ob_type->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - return NULL; - } - if (pv->ob_type->tp_as_number->nb_hex == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to hex"); - return NULL; - } - pv2 = pv->ob_type->tp_as_number->nb_hex(pv); - Py_DECREF(pv); - return pv2; -} - -static PyObject * -_array_copy_nice(PyArrayObject *self) -{ - return PyArray_Return((PyArrayObject *) \ - PyArray_Copy(self)); -} - -#if PY_VERSION_HEX >= 0x02050000 -static PyObject * -array_index(PyArrayObject *v) -{ - if (!PyArray_ISINTEGER(v) || PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only integer arrays with " \ - "one element can be converted to an index"); - return NULL; - } - return v->descr->f->getitem(v->data, v); -} -#endif - - -static PyNumberMethods array_as_number = { - (binaryfunc)array_add, /*nb_add*/ - (binaryfunc)array_subtract, /*nb_subtract*/ - (binaryfunc)array_multiply, /*nb_multiply*/ - (binaryfunc)array_divide, /*nb_divide*/ - (binaryfunc)array_remainder, /*nb_remainder*/ - (binaryfunc)array_divmod, /*nb_divmod*/ - (ternaryfunc)array_power, /*nb_power*/ - (unaryfunc)array_negative, /*nb_neg*/ - (unaryfunc)_array_copy_nice, /*nb_pos*/ - (unaryfunc)array_absolute, /*(unaryfunc)array_abs,*/ - (inquiry)_array_nonzero, /*nb_nonzero*/ - (unaryfunc)array_invert, /*nb_invert*/ - (binaryfunc)array_left_shift, /*nb_lshift*/ - (binaryfunc)array_right_shift, /*nb_rshift*/ - (binaryfunc)array_bitwise_and, /*nb_and*/ - (binaryfunc)array_bitwise_xor, /*nb_xor*/ - (binaryfunc)array_bitwise_or, /*nb_or*/ - 0, /*nb_coerce*/ - (unaryfunc)array_int, /*nb_int*/ - (unaryfunc)array_long, /*nb_long*/ - (unaryfunc)array_float, /*nb_float*/ - (unaryfunc)array_oct, /*nb_oct*/ - (unaryfunc)array_hex, /*nb_hex*/ - - /*This code adds augmented assignment functionality*/ - /*that was made available in Python 2.0*/ - (binaryfunc)array_inplace_add, /*inplace_add*/ - (binaryfunc)array_inplace_subtract, /*inplace_subtract*/ - (binaryfunc)array_inplace_multiply, /*inplace_multiply*/ - (binaryfunc)array_inplace_divide, /*inplace_divide*/ - (binaryfunc)array_inplace_remainder, /*inplace_remainder*/ - (ternaryfunc)array_inplace_power, /*inplace_power*/ - (binaryfunc)array_inplace_left_shift, /*inplace_lshift*/ - (binaryfunc)array_inplace_right_shift, /*inplace_rshift*/ - (binaryfunc)array_inplace_bitwise_and, /*inplace_and*/ - (binaryfunc)array_inplace_bitwise_xor, /*inplace_xor*/ - (binaryfunc)array_inplace_bitwise_or, /*inplace_or*/ - - (binaryfunc)array_floor_divide, /*nb_floor_divide*/ - (binaryfunc)array_true_divide, /*nb_true_divide*/ - (binaryfunc)array_inplace_floor_divide, /*nb_inplace_floor_divide*/ - (binaryfunc)array_inplace_true_divide, /*nb_inplace_true_divide*/ - -#if PY_VERSION_HEX >= 0x02050000 - (unaryfunc)array_index, /* nb_index */ -#endif - -}; - -/****************** End of Buffer Protocol *******************************/ - - -/************************************************************************* - **************** Implement Sequence Protocol ************************** - *************************************************************************/ - -/* Some of this is repeated in the array_as_mapping protocol. But - we fill it in here so that PySequence_XXXX calls work as expected -*/ - - -static PyObject * -array_slice(PyArrayObject *self, Py_ssize_t ilow, - Py_ssize_t ihigh) -{ - PyArrayObject *r; - Py_ssize_t l; - char *data; - - if (self->nd == 0) { - PyErr_SetString(PyExc_ValueError, "cannot slice a 0-d array"); - return NULL; - } - - l=self->dimensions[0]; - if (ilow < 0) ilow = 0; - else if (ilow > l) ilow = l; - if (ihigh < ilow) ihigh = ilow; - else if (ihigh > l) ihigh = l; - - if (ihigh != ilow) { - data = index2ptr(self, ilow); - if (data == NULL) return NULL; - } else { - data = self->data; - } - - self->dimensions[0] = ihigh-ilow; - Py_INCREF(self->descr); - r = (PyArrayObject *) \ - PyArray_NewFromDescr(self->ob_type, self->descr, - self->nd, self->dimensions, - self->strides, data, - self->flags, (PyObject *)self); - self->dimensions[0] = l; - if (r == NULL) return NULL; - r->base = (PyObject *)self; - Py_INCREF(self); - PyArray_UpdateFlags(r, UPDATE_ALL); - return (PyObject *)r; -} - - -static int -array_ass_slice(PyArrayObject *self, Py_ssize_t ilow, - Py_ssize_t ihigh, PyObject *v) { - int ret; - PyArrayObject *tmp; - - if (v == NULL) { - PyErr_SetString(PyExc_ValueError, - "cannot delete array elements"); - return -1; - } - if (!PyArray_ISWRITEABLE(self)) { - PyErr_SetString(PyExc_RuntimeError, - "array is not writeable"); - return -1; - } - if ((tmp = (PyArrayObject *)array_slice(self, ilow, ihigh)) \ - == NULL) - return -1; - ret = PyArray_CopyObject(tmp, v); - Py_DECREF(tmp); - - return ret; -} - -static int -array_contains(PyArrayObject *self, PyObject *el) -{ - /* equivalent to (self == el).any() */ - - PyObject *res; - int ret; - - res = PyArray_EnsureAnyArray(PyObject_RichCompare((PyObject *)self, - el, Py_EQ)); - if (res == NULL) return -1; - ret = array_any_nonzero((PyArrayObject *)res); - Py_DECREF(res); - return ret; -} - -static PySequenceMethods array_as_sequence = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)array_length, /*sq_length*/ - (binaryfunc)NULL, /* sq_concat is handled by nb_add*/ - (ssizeargfunc)NULL, - (ssizeargfunc)array_item_nice, - (ssizessizeargfunc)array_slice, - (ssizeobjargproc)array_ass_item, /*sq_ass_item*/ - (ssizessizeobjargproc)array_ass_slice, /*sq_ass_slice*/ - (objobjproc) array_contains, /* sq_contains */ - (binaryfunc) NULL, /* sg_inplace_concat */ - (ssizeargfunc)NULL, -#else - (inquiry)array_length, /*sq_length*/ - (binaryfunc)NULL, /* sq_concat is handled by nb_add*/ - (intargfunc)NULL, /* sq_repeat is handled nb_multiply*/ - (intargfunc)array_item_nice, /*sq_item*/ - (intintargfunc)array_slice, /*sq_slice*/ - (intobjargproc)array_ass_item, /*sq_ass_item*/ - (intintobjargproc)array_ass_slice, /*sq_ass_slice*/ - (objobjproc) array_contains, /* sq_contains */ - (binaryfunc) NULL, /* sg_inplace_concat */ - (intargfunc) NULL /* sg_inplace_repeat */ -#endif -}; - - -/****************** End of Sequence Protocol ****************************/ - - -static int -dump_data(char **string, int *n, int *max_n, char *data, int nd, - intp *dimensions, intp *strides, PyArrayObject* self) -{ - PyArray_Descr *descr=self->descr; - PyObject *op, *sp; - char *ostring; - int i, N; - -#define CHECK_MEMORY if (*n >= *max_n-16) { *max_n *= 2; \ - *string = (char *)_pya_realloc(*string, *max_n); } - - if (nd == 0) { - - if ((op = descr->f->getitem(data, self)) == NULL) return -1; - sp = PyObject_Repr(op); - if (sp == NULL) {Py_DECREF(op); return -1;} - ostring = PyString_AsString(sp); - N = PyString_Size(sp)*sizeof(char); - *n += N; - CHECK_MEMORY - memmove(*string+(*n-N), ostring, N); - Py_DECREF(sp); - Py_DECREF(op); - return 0; - } else { - CHECK_MEMORY - (*string)[*n] = '['; - *n += 1; - for(i=0; idata, - self->nd, self->dimensions, - self->strides, self) < 0) { - _pya_free(string); return NULL; - } - - if (repr) { - if (PyArray_ISEXTENDED(self)) { - char buf[100]; - snprintf(buf, sizeof(buf), "%d", self->descr->elsize); - sprintf(string+n, ", '%c%s')", self->descr->type, buf); - ret = PyString_FromStringAndSize(string, n+6+strlen(buf)); - } - else { - sprintf(string+n, ", '%c')", self->descr->type); - ret = PyString_FromStringAndSize(string, n+6); - } - } - else { - ret = PyString_FromStringAndSize(string, n); - } - - _pya_free(string); - return ret; -} - -static PyObject *PyArray_StrFunction=NULL; -static PyObject *PyArray_ReprFunction=NULL; - -/*OBJECT_API - Set the array print function to be a Python function. -*/ -static void -PyArray_SetStringFunction(PyObject *op, int repr) -{ - if (repr) { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_ReprFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_ReprFunction = op; - } else { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_StrFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_StrFunction = op; - } -} - -static PyObject * -array_repr(PyArrayObject *self) -{ - PyObject *s, *arglist; - - if (PyArray_ReprFunction == NULL) { - s = array_repr_builtin(self, 1); - } else { - arglist = Py_BuildValue("(O)", self); - s = PyEval_CallObject(PyArray_ReprFunction, arglist); - Py_DECREF(arglist); - } - return s; -} - -static PyObject * -array_str(PyArrayObject *self) -{ - PyObject *s, *arglist; - - if (PyArray_StrFunction == NULL) { - s = array_repr_builtin(self, 0); - } else { - arglist = Py_BuildValue("(O)", self); - s = PyEval_CallObject(PyArray_StrFunction, arglist); - Py_DECREF(arglist); - } - return s; -} - - - -/*OBJECT_API - */ -static int -PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, register size_t len) -{ - register PyArray_UCS4 c1, c2; - while(len-- > 0) { - c1 = *s1++; - c2 = *s2++; - if (c1 != c2) { - return (c1 < c2) ? -1 : 1; - } - } - return 0; -} - -/* This also handles possibly mis-aligned data */ -/* Compare s1 and s2 which are not necessarily NULL-terminated. - s1 is of length len1 - s2 is of length len2 - If they are NULL terminated, then stop comparison. -*/ -static int -_myunincmp(PyArray_UCS4 *s1, PyArray_UCS4 *s2, int len1, int len2) -{ - PyArray_UCS4 *sptr; - PyArray_UCS4 *s1t=s1, *s2t=s2; - int val; - intp size; - int diff; - - if ((intp)s1 % sizeof(PyArray_UCS4) != 0) { - size = len1*sizeof(PyArray_UCS4); - s1t = malloc(size); - memcpy(s1t, s1, size); - } - if ((intp)s2 % sizeof(PyArray_UCS4) != 0) { - size = len2*sizeof(PyArray_UCS4); - s2t = malloc(size); - memcpy(s2t, s2, size); - } - val = PyArray_CompareUCS4(s1t, s2t, MIN(len1,len2)); - if ((val != 0) || (len1 == len2)) goto finish; - if (len2 > len1) {sptr = s2t+len1; val = -1; diff=len2-len1;} - else {sptr = s1t+len2; val = 1; diff=len1-len2;} - while (diff--) { - if (*sptr != 0) goto finish; - sptr++; - } - val = 0; - - finish: - if (s1t != s1) free(s1t); - if (s2t != s2) free(s2t); - return val; -} - - - - -/* Compare s1 and s2 which are not necessarily NULL-terminated. - s1 is of length len1 - s2 is of length len2 - If they are NULL terminated, then stop comparison. -*/ -static int -_mystrncmp(char *s1, char *s2, int len1, int len2) -{ - char *sptr; - int val; - int diff; - - val = memcmp(s1, s2, MIN(len1, len2)); - if ((val != 0) || (len1 == len2)) return val; - if (len2 > len1) {sptr = s2+len1; val = -1; diff=len2-len1;} - else {sptr = s1+len2; val = 1; diff=len1-len2;} - while (diff--) { - if (*sptr != 0) return val; - sptr++; - } - return 0; /* Only happens if NULLs are everywhere */ -} - -/* Borrowed from Numarray */ - -#define SMALL_STRING 2048 - -#if defined(isspace) -#undef isspace -#define isspace(c) ((c==' ')||(c=='\t')||(c=='\n')||(c=='\r')||(c=='\v')||(c=='\f')) -#endif - -static void _rstripw(char *s, int n) -{ - int i; - for(i=n-1; i>=1; i--) /* Never strip to length 0. */ - { - int c = s[i]; - if (!c || isspace(c)) - s[i] = 0; - else - break; - } -} - -static void _unistripw(PyArray_UCS4 *s, int n) -{ - int i; - for(i=n-1; i>=1; i--) /* Never strip to length 0. */ - { - PyArray_UCS4 c = s[i]; - if (!c || isspace(c)) - s[i] = 0; - else - break; - } -} - - -static char * -_char_copy_n_strip(char *original, char *temp, int nc) -{ - if (nc > SMALL_STRING) { - temp = malloc(nc); - if (!temp) { - PyErr_NoMemory(); - return NULL; - } - } - memcpy(temp, original, nc); - _rstripw(temp, nc); - return temp; -} - -static void -_char_release(char *ptr, int nc) -{ - if (nc > SMALL_STRING) { - free(ptr); - } -} - -static char * -_uni_copy_n_strip(char *original, char *temp, int nc) -{ - if (nc*sizeof(PyArray_UCS4) > SMALL_STRING) { - temp = malloc(nc*sizeof(PyArray_UCS4)); - if (!temp) { - PyErr_NoMemory(); - return NULL; - } - } - memcpy(temp, original, nc*sizeof(PyArray_UCS4)); - _unistripw((PyArray_UCS4 *)temp, nc); - return temp; -} - -static void -_uni_release(char *ptr, int nc) -{ - if (nc*sizeof(PyArray_UCS4) > SMALL_STRING) { - free(ptr); - } -} - - -/* End borrowed from numarray */ - -#define _rstrip_loop(CMP) { \ - void *aptr, *bptr; \ - char atemp[SMALL_STRING], btemp[SMALL_STRING]; \ - while(size--) { \ - aptr = stripfunc(iself->dataptr, atemp, N1); \ - if (!aptr) return -1; \ - bptr = stripfunc(iother->dataptr, btemp, N2); \ - if (!bptr) { \ - relfunc(aptr, N1); \ - return -1; \ - } \ - val = cmpfunc(aptr, bptr, N1, N2); \ - *dptr = (val CMP 0); \ - PyArray_ITER_NEXT(iself); \ - PyArray_ITER_NEXT(iother); \ - dptr += 1; \ - relfunc(aptr, N1); \ - relfunc(bptr, N2); \ - } \ - } - -#define _reg_loop(CMP) { \ - while(size--) { \ - val = cmpfunc((void *)iself->dataptr, \ - (void *)iother->dataptr, \ - N1, N2); \ - *dptr = (val CMP 0); \ - PyArray_ITER_NEXT(iself); \ - PyArray_ITER_NEXT(iother); \ - dptr += 1; \ - } \ - } - -#define _loop(CMP) if (rstrip) _rstrip_loop(CMP) \ - else _reg_loop(CMP) - -static int -_compare_strings(PyObject *result, PyArrayMultiIterObject *multi, - int cmp_op, void *func, int rstrip) -{ - PyArrayIterObject *iself, *iother; - Bool *dptr; - intp size; - int val; - int N1, N2; - int (*cmpfunc)(void *, void *, int, int); - void (*relfunc)(char *, int); - char* (*stripfunc)(char *, char *, int); - - cmpfunc = func; - dptr = (Bool *)PyArray_DATA(result); - iself = multi->iters[0]; - iother = multi->iters[1]; - size = multi->size; - N1 = iself->ao->descr->elsize; - N2 = iother->ao->descr->elsize; - if ((void *)cmpfunc == (void *)_myunincmp) { - N1 >>= 2; - N2 >>= 2; - stripfunc = _uni_copy_n_strip; - relfunc = _uni_release; - } - else { - stripfunc = _char_copy_n_strip; - relfunc = _char_release; - } - switch (cmp_op) { - case Py_EQ: - _loop(==) - break; - case Py_NE: - _loop(!=) - break; - case Py_LT: - _loop(<) - break; - case Py_LE: - _loop(<=) - break; - case Py_GT: - _loop(>) - break; - case Py_GE: - _loop(>=) - break; - default: - PyErr_SetString(PyExc_RuntimeError, - "bad comparison operator"); - return -1; - } - return 0; -} - -#undef _loop -#undef _reg_loop -#undef _rstrip_loop -#undef SMALL_STRING - -static PyObject * -_strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op, - int rstrip) -{ - PyObject *result; - PyArrayMultiIterObject *mit; - int val; - - /* Cast arrays to a common type */ - if (self->descr->type_num != other->descr->type_num) { - PyObject *new; - if (self->descr->type_num == PyArray_STRING && \ - other->descr->type_num == PyArray_UNICODE) { - Py_INCREF(other); - Py_INCREF(other->descr); - new = PyArray_FromAny((PyObject *)self, other->descr, - 0, 0, 0, NULL); - if (new == NULL) return NULL; - self = (PyArrayObject *)new; - } - else if (self->descr->type_num == PyArray_UNICODE && \ - other->descr->type_num == PyArray_STRING) { - Py_INCREF(self); - Py_INCREF(self->descr); - new = PyArray_FromAny((PyObject *)other, self->descr, - 0, 0, 0, NULL); - if (new == NULL) return NULL; - other = (PyArrayObject *)new; - } - else { - PyErr_SetString(PyExc_TypeError, - "invalid string data-types " - "in comparison"); - return NULL; - } - } - else { - Py_INCREF(self); - Py_INCREF(other); - } - - /* Broad-cast the arrays to a common shape */ - mit = (PyArrayMultiIterObject *)PyArray_MultiIterNew(2, self, other); - Py_DECREF(self); - Py_DECREF(other); - if (mit == NULL) return NULL; - - result = PyArray_NewFromDescr(&PyArray_Type, - PyArray_DescrFromType(PyArray_BOOL), - mit->nd, - mit->dimensions, - NULL, NULL, 0, - NULL); - if (result == NULL) goto finish; - - if (self->descr->type_num == PyArray_UNICODE) { - val = _compare_strings(result, mit, cmp_op, _myunincmp, - rstrip); - } - else { - val = _compare_strings(result, mit, cmp_op, _mystrncmp, - rstrip); - } - - if (val < 0) {Py_DECREF(result); result = NULL;} - - finish: - Py_DECREF(mit); - return result; -} - -/* VOID-type arrays can only be compared equal and not-equal - in which case the fields are all compared by extracting the fields - and testing one at a time... - equality testing is performed using logical_ands on all the fields. - in-equality testing is performed using logical_ors on all the fields. - - VOID-type arrays without fields are compared for equality by comparing their - memory at each location directly (using string-code). -*/ - -static PyObject *array_richcompare(PyArrayObject *, PyObject *, int); - - -static PyObject * -_void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op) -{ - if (!(cmp_op == Py_EQ || cmp_op == Py_NE)) { - PyErr_SetString(PyExc_ValueError, "Void-arrays can only" \ - "be compared for equality."); - return NULL; - } - if (PyArray_HASFIELDS(self)) { - PyObject *res=NULL, *temp, *a, *b; - PyObject *key, *value, *temp2; - PyObject *op; - Py_ssize_t pos=0; - op = (cmp_op == Py_EQ ? n_ops.logical_and : n_ops.logical_or); - while (PyDict_Next(self->descr->fields, &pos, &key, &value)) { - a = PyArray_EnsureAnyArray(array_subscript(self, key)); - if (a==NULL) {Py_XDECREF(res); return NULL;} - b = array_subscript(other, key); - if (b==NULL) {Py_XDECREF(res); Py_DECREF(a); return NULL;} - temp = array_richcompare((PyArrayObject *)a,b,cmp_op); - Py_DECREF(a); - Py_DECREF(b); - if (temp == NULL) {Py_XDECREF(res); return NULL;} - if (res == NULL) { - res = temp; - } - else { - temp2 = PyObject_CallFunction(op, "OO", res, temp); - Py_DECREF(temp); - Py_DECREF(res); - if (temp2 == NULL) return NULL; - res = temp2; - } - } - if (res == NULL && !PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, "No fields found."); - } - return res; - } - else { /* compare as a string */ - /* assumes self and other have same descr->type */ - return _strings_richcompare(self, other, cmp_op, 0); - } -} - -static PyObject * -array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) -{ - PyObject *array_other, *result = NULL; - int typenum; - - switch (cmp_op) - { - case Py_LT: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.less); - break; - case Py_LE: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.less_equal); - break; - case Py_EQ: - if (other == Py_None) { - Py_INCREF(Py_False); - return Py_False; - } - /* Try to convert other to an array */ - if (!PyArray_Check(other)) { - typenum = self->descr->type_num; - if (typenum != PyArray_OBJECT) { - typenum = PyArray_NOTYPE; - } - array_other = PyArray_FromObject(other, - typenum, 0, 0); - /* If not successful, then return False - This fixes code that used to - allow equality comparisons between arrays - and other objects which would give a result - of False - */ - if ((array_other == NULL) || \ - (array_other == Py_None)) { - Py_XDECREF(array_other); - PyErr_Clear(); - Py_INCREF(Py_False); - return Py_False; - } - } - else { - Py_INCREF(other); - array_other = other; - } - result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.equal); - if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { - int _res; - _res = PyObject_RichCompareBool \ - ((PyObject *)self->descr, - (PyObject *)\ - PyArray_DESCR(array_other), - Py_EQ); - if (_res < 0) { - Py_DECREF(result); - Py_DECREF(array_other); - return NULL; - } - if (_res) { - Py_DECREF(result); - result = _void_compare\ - (self, - (PyArrayObject *)array_other, - cmp_op); - Py_DECREF(array_other); - } - return result; - } - /* If the comparison results in NULL, then the - two array objects can not be compared together so - return zero - */ - Py_DECREF(array_other); - if (result == NULL) { - PyErr_Clear(); - Py_INCREF(Py_False); - return Py_False; - } - break; - case Py_NE: - if (other == Py_None) { - Py_INCREF(Py_True); - return Py_True; - } - /* Try to convert other to an array */ - if (!PyArray_Check(other)) { - typenum = self->descr->type_num; - if (typenum != PyArray_OBJECT) { - typenum = PyArray_NOTYPE; - } - array_other = PyArray_FromObject(other, - typenum, 0, 0); - /* If not successful, then objects cannot be - compared and cannot be equal, therefore, - return True; - */ - if ((array_other == NULL) || \ - (array_other == Py_None)) { - Py_XDECREF(array_other); - PyErr_Clear(); - Py_INCREF(Py_True); - return Py_True; - } - } - else { - Py_INCREF(other); - array_other = other; - } - result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.not_equal); - if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { - int _res; - _res = PyObject_RichCompareBool\ - ((PyObject *)self->descr, - (PyObject *)\ - PyArray_DESCR(array_other), - Py_EQ); - if (_res < 0) { - Py_DECREF(result); - Py_DECREF(array_other); - return NULL; - } - if (_res) { - Py_DECREF(result); - result = _void_compare\ - (self, - (PyArrayObject *)array_other, - cmp_op); - Py_DECREF(array_other); - } - return result; - } - - Py_DECREF(array_other); - if (result == NULL) { - PyErr_Clear(); - Py_INCREF(Py_True); - return Py_True; - } - break; - case Py_GT: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater); - break; - case Py_GE: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater_equal); - break; - default: - result = Py_NotImplemented; - Py_INCREF(result); - } - if (result == Py_NotImplemented) { - /* Try to handle string comparisons */ - if (self->descr->type_num == PyArray_OBJECT) return result; - array_other = PyArray_FromObject(other,PyArray_NOTYPE, 0, 0); - if (PyArray_ISSTRING(self) && PyArray_ISSTRING(array_other)) { - Py_DECREF(result); - result = _strings_richcompare(self, (PyArrayObject *) - array_other, cmp_op, 0); - } - Py_DECREF(array_other); - } - return result; -} - -static PyObject * -_check_axis(PyArrayObject *arr, int *axis, int flags) -{ - PyObject *temp1, *temp2; - int n = arr->nd; - - if ((*axis >= MAX_DIMS) || (n==0)) { - if (n != 1) { - temp1 = PyArray_Ravel(arr,0); - if (temp1 == NULL) {*axis=0; return NULL;} - *axis = PyArray_NDIM(temp1)-1; - } - else { - temp1 = (PyObject *)arr; - Py_INCREF(temp1); - *axis = 0; - } - if (!flags) return temp1; - } - else { - temp1 = (PyObject *)arr; - Py_INCREF(temp1); - } - if (flags) { - temp2 = PyArray_CheckFromAny((PyObject *)temp1, NULL, - 0, 0, flags, NULL); - Py_DECREF(temp1); - if (temp2 == NULL) return NULL; - } - else { - temp2 = (PyObject *)temp1; - } - n = PyArray_NDIM(temp2); - if (*axis < 0) *axis += n; - if ((*axis < 0) || (*axis >= n)) { - PyErr_Format(PyExc_ValueError, - "axis(=%d) out of bounds", *axis); - Py_DECREF(temp2); - return NULL; - } - return temp2; -} - -#include "arraymethods.c" - -/* Lifted from numarray */ -/*MULTIARRAY_API - PyArray_IntTupleFromIntp -*/ -static PyObject * -PyArray_IntTupleFromIntp(int len, intp *vals) -{ - int i; - PyObject *intTuple = PyTuple_New(len); - if (!intTuple) goto fail; - for(i=0; i= SIZEOF_INTP - if (!(op = PyNumber_Int(seq))) return -1; -#else - if (!(op = PyNumber_Long(seq))) return -1; -#endif - nd = 1; -#if SIZEOF_LONG >= SIZEOF_INTP - vals[0] = (intp ) PyInt_AsLong(op); -#else - vals[0] = (intp ) PyLong_AsLongLong(op); -#endif - Py_DECREF(op); - } else { - for(i=0; i < MIN(nd,maxvals); i++) { - op = PySequence_GetItem(seq, i); - if (op == NULL) return -1; -#if SIZEOF_LONG >= SIZEOF_INTP - vals[i]=(intp )PyInt_AsLong(op); -#else - vals[i]=(intp )PyLong_AsLongLong(op); -#endif - Py_DECREF(op); - if(PyErr_Occurred()) return -1; - } - } - return nd; -} - - - -/* Check whether the given array is stored contiguously (row-wise) in - memory. */ - -/* 0-strided arrays are not contiguous (even if dimension == 1) */ -static int -_IsContiguous(PyArrayObject *ap) -{ - register intp sd; - register intp dim; - register int i; - - if (ap->nd == 0) return 1; - sd = ap->descr->elsize; - if (ap->nd == 1) return (ap->dimensions[0] == 1 || \ - sd == ap->strides[0]); - for (i = ap->nd-1; i >= 0; --i) { - dim = ap->dimensions[i]; - /* contiguous by definition */ - if (dim == 0) return 1; - if (ap->strides[i] != sd) return 0; - sd *= dim; - } - return 1; -} - - -/* 0-strided arrays are not contiguous (even if dimension == 1) */ -static int -_IsFortranContiguous(PyArrayObject *ap) -{ - register intp sd; - register intp dim; - register int i; - - if (ap->nd == 0) return 1; - sd = ap->descr->elsize; - if (ap->nd == 1) return (ap->dimensions[0] == 1 || \ - sd == ap->strides[0]); - for (i=0; i< ap->nd; ++i) { - dim = ap->dimensions[i]; - /* fortran contiguous by definition */ - if (dim == 0) return 1; - if (ap->strides[i] != sd) return 0; - sd *= dim; - } - return 1; -} - -static int -_IsAligned(PyArrayObject *ap) -{ - int i, alignment, aligned=1; - intp ptr; - int type = ap->descr->type_num; - - if ((type == PyArray_STRING) || (type == PyArray_VOID)) - return 1; - - alignment = ap->descr->alignment; - if (alignment == 1) return 1; - - ptr = (intp) ap->data; - aligned = (ptr % alignment) == 0; - for (i=0; i nd; i++) - aligned &= ((ap->strides[i] % alignment) == 0); - return aligned != 0; -} - -static Bool -_IsWriteable(PyArrayObject *ap) -{ - PyObject *base=ap->base; - void *dummy; - Py_ssize_t n; - - /* If we own our own data, then no-problem */ - if ((base == NULL) || (ap->flags & OWNDATA)) return TRUE; - - /* Get to the final base object - If it is a writeable array, then return TRUE - If we can find an array object - or a writeable buffer object as the final base object - or a string object (for pickling support memory savings). - - this last could be removed if a proper pickleable - buffer was added to Python. - */ - - while(PyArray_Check(base)) { - if (PyArray_CHKFLAGS(base, OWNDATA)) - return (Bool) (PyArray_ISWRITEABLE(base)); - base = PyArray_BASE(base); - } - - /* here so pickle support works seamlessly - and unpickled array can be set and reset writeable - -- could be abused -- */ - if PyString_Check(base) return TRUE; - - if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) - return FALSE; - - return TRUE; -} - - -/*OBJECT_API - */ -static int -PyArray_ElementStrides(PyObject *arr) -{ - register int itemsize = PyArray_ITEMSIZE(arr); - register int i, N=PyArray_NDIM(arr); - register intp *strides = PyArray_STRIDES(arr); - - for (i=0; iflags |= FORTRAN; - if (ret->nd > 1) ret->flags &= ~CONTIGUOUS; - } - else ret->flags &= ~FORTRAN; - } - if (flagmask & CONTIGUOUS) { - if (_IsContiguous(ret)) { - ret->flags |= CONTIGUOUS; - if (ret->nd > 1) ret->flags &= ~FORTRAN; - } - else ret->flags &= ~CONTIGUOUS; - } - if (flagmask & ALIGNED) { - if (_IsAligned(ret)) ret->flags |= ALIGNED; - else ret->flags &= ~ALIGNED; - } - /* This is not checked by default WRITEABLE is not - part of UPDATE_ALL */ - if (flagmask & WRITEABLE) { - if (_IsWriteable(ret)) ret->flags |= WRITEABLE; - else ret->flags &= ~WRITEABLE; - } - return; -} - -/* This routine checks to see if newstrides (of length nd) will not - ever be able to walk outside of the memory implied numbytes and offset. - - The available memory is assumed to start at -offset and proceed - to numbytes-offset. The strides are checked to ensure - that accessing memory using striding will not try to reach beyond - this memory for any of the axes. - - If numbytes is 0 it will be calculated using the dimensions and - element-size. - - This function checks for walking beyond the beginning and right-end - of the buffer and therefore works for any integer stride (positive - or negative). -*/ - -/*OBJECT_API*/ -static Bool -PyArray_CheckStrides(int elsize, int nd, intp numbytes, intp offset, - intp *dims, intp *newstrides) -{ - int i; - intp byte_begin; - intp begin; - intp end; - - if (numbytes == 0) - numbytes = PyArray_MultiplyList(dims, nd) * elsize; - - begin = -offset; - end = numbytes - offset - elsize; - for (i=0; i end)) - return FALSE; - } - return TRUE; - -} - - -/* This is the main array creation routine. */ - -/* Flags argument has multiple related meanings - depending on data and strides: - - If data is given, then flags is flags associated with data. - If strides is not given, then a contiguous strides array will be created - and the CONTIGUOUS bit will be set. If the flags argument - has the FORTRAN bit set, then a FORTRAN-style strides array will be - created (and of course the FORTRAN flag bit will be set). - - If data is not given but created here, then flags will be DEFAULT - and a non-zero flags argument can be used to indicate a FORTRAN style - array is desired. -*/ - -static size_t -_array_fill_strides(intp *strides, intp *dims, int nd, size_t itemsize, - int inflag, int *objflags) -{ - int i; - /* Only make Fortran strides if not contiguous as well */ - if ((inflag & FORTRAN) && !(inflag & CONTIGUOUS)) { - for (i=0; i 1) *objflags &= ~CONTIGUOUS; - else *objflags |= CONTIGUOUS; - } - else { - for (i=nd-1;i>=0;i--) { - strides[i] = itemsize; - itemsize *= dims[i] ? dims[i] : 1; - } - *objflags |= CONTIGUOUS; - if (nd > 1) *objflags &= ~FORTRAN; - else *objflags |= FORTRAN; - } - return itemsize; -} - -/*OBJECT_API - Generic new array creation routine. -*/ -static PyObject * -PyArray_New(PyTypeObject *subtype, int nd, intp *dims, int type_num, - intp *strides, void *data, int itemsize, int flags, - PyObject *obj) -{ - PyArray_Descr *descr; - PyObject *new; - - descr = PyArray_DescrFromType(type_num); - if (descr == NULL) return NULL; - if (descr->elsize == 0) { - if (itemsize < 1) { - PyErr_SetString(PyExc_ValueError, - "data type must provide an itemsize"); - Py_DECREF(descr); - return NULL; - } - PyArray_DESCR_REPLACE(descr); - descr->elsize = itemsize; - } - new = PyArray_NewFromDescr(subtype, descr, nd, dims, strides, - data, flags, obj); - return new; -} - -/* Change a sub-array field to the base descriptor */ -/* and update the dimensions and strides - appropriately. Dimensions and strides are added - to the end unless we have a FORTRAN array - and then they are added to the beginning - - Strides are only added if given (because data is given). -*/ -static int -_update_descr_and_dimensions(PyArray_Descr **des, intp *newdims, - intp *newstrides, int oldnd, int isfortran) -{ - PyArray_Descr *old; - int newnd; - int numnew; - intp *mydim; - int i; - int tuple; - - old = *des; - *des = old->subarray->base; - - - mydim = newdims + oldnd; - tuple = PyTuple_Check(old->subarray->shape); - if (tuple) { - numnew = PyTuple_GET_SIZE(old->subarray->shape); - } - else { - numnew = 1; - } - - - newnd = oldnd + numnew; - if (newnd > MAX_DIMS) goto finish; - if (isfortran) { - memmove(newdims+numnew, newdims, oldnd*sizeof(intp)); - mydim = newdims; - } - - if (tuple) { - for (i=0; isubarray->shape, i)); - } - } - else { - mydim[0] = (intp) PyInt_AsLong(old->subarray->shape); - } - - if (newstrides) { - intp tempsize; - intp *mystrides; - mystrides = newstrides + oldnd; - if (isfortran) { - memmove(newstrides+numnew, newstrides, - oldnd*sizeof(intp)); - mystrides = newstrides; - } - /* Make new strides -- alwasy C-contiguous */ - tempsize = (*des)->elsize; - for (i=numnew-1; i>=0; i--) { - mystrides[i] = tempsize; - tempsize *= mydim[i] ? mydim[i] : 1; - } - } - - finish: - Py_INCREF(*des); - Py_DECREF(old); - return newnd; -} - - -/* steals a reference to descr (even on failure) */ -/*OBJECT_API - Generic new array creation routine. -*/ -static PyObject * -PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd, - intp *dims, intp *strides, void *data, - int flags, PyObject *obj) -{ - PyArrayObject *self; - register int i; - size_t sd; - intp largest; - intp size; - - if (descr->subarray) { - PyObject *ret; - intp newdims[2*MAX_DIMS]; - intp *newstrides=NULL; - int isfortran=0; - isfortran = (data && (flags & FORTRAN) && !(flags & CONTIGUOUS)) || \ - (!data && flags); - memcpy(newdims, dims, nd*sizeof(intp)); - if (strides) { - newstrides = newdims + MAX_DIMS; - memcpy(newstrides, strides, nd*sizeof(intp)); - } - nd =_update_descr_and_dimensions(&descr, newdims, - newstrides, nd, isfortran); - ret = PyArray_NewFromDescr(subtype, descr, nd, newdims, - newstrides, - data, flags, obj); - return ret; - } - - if (nd < 0) { - PyErr_SetString(PyExc_ValueError, - "number of dimensions must be >=0"); - Py_DECREF(descr); - return NULL; - } - if (nd > MAX_DIMS) { - PyErr_Format(PyExc_ValueError, - "maximum number of dimensions is %d", MAX_DIMS); - Py_DECREF(descr); - return NULL; - } - - /* Check dimensions */ - size = 1; - sd = (size_t) descr->elsize; - if (sd == 0) { - if (!PyDataType_ISSTRING(descr)) { - PyErr_SetString(PyExc_ValueError, "Empty data-type"); - Py_DECREF(descr); - return NULL; - } - PyArray_DESCR_REPLACE(descr); - if (descr->type_num == NPY_STRING) descr->elsize = 1; - else descr->elsize = sizeof(PyArray_UCS4); - sd = (size_t) descr->elsize; - } - largest = MAX_INTP / sd; - for (i=0;i largest) { - PyErr_SetString(PyExc_ValueError, - "dimensions too large."); - Py_DECREF(descr); - return NULL; - } - } - - self = (PyArrayObject *) subtype->tp_alloc(subtype, 0); - if (self == NULL) { - Py_DECREF(descr); - return NULL; - } - self->nd = nd; - self->dimensions = NULL; - self->data = NULL; - if (data == NULL) { - self->flags = DEFAULT; - if (flags) { - self->flags |= FORTRAN; - if (nd > 1) self->flags &= ~CONTIGUOUS; - flags = FORTRAN; - } - } - else self->flags = (flags & ~UPDATEIFCOPY); - - self->descr = descr; - self->base = (PyObject *)NULL; - self->weakreflist = (PyObject *)NULL; - - if (nd > 0) { - self->dimensions = PyDimMem_NEW(2*nd); - if (self->dimensions == NULL) { - PyErr_NoMemory(); - goto fail; - } - self->strides = self->dimensions + nd; - memcpy(self->dimensions, dims, sizeof(intp)*nd); - if (strides == NULL) { /* fill it in */ - sd = _array_fill_strides(self->strides, dims, nd, sd, - flags, &(self->flags)); - } - else { /* we allow strides even when we create - the memory, but be careful with this... - */ - memcpy(self->strides, strides, sizeof(intp)*nd); - sd *= size; - } - } - else { self->dimensions = self->strides = NULL; } - - if (data == NULL) { - - /* Allocate something even for zero-space arrays - e.g. shape=(0,) -- otherwise buffer exposure - (a.data) doesn't work as it should. */ - - if (sd==0) sd = descr->elsize; - - if ((data = PyDataMem_NEW(sd))==NULL) { - PyErr_NoMemory(); - goto fail; - } - self->flags |= OWNDATA; - - /* It is bad to have unitialized OBJECT pointers */ - /* which could also be sub-fields of a VOID array */ - if (PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT)) { - memset(data, 0, sd); - } - } - else { - self->flags &= ~OWNDATA; /* If data is passed in, - this object won't own it - by default. - Caller must arrange for - this to be reset if truly - desired */ - } - self->data = data; - - /* call the __array_finalize__ - method if a subtype. - If obj is NULL, then call method with Py_None - */ - if ((subtype != &PyArray_Type)) { - PyObject *res, *func, *args; - static PyObject *str=NULL; - - if (str == NULL) { - str = PyString_InternFromString("__array_finalize__"); - } - func = PyObject_GetAttr((PyObject *)self, str); - if (func && func != Py_None) { - if (strides != NULL) { /* did not allocate own data - or funny strides */ - /* update flags before finalize function */ - PyArray_UpdateFlags(self, UPDATE_ALL); - } - if PyCObject_Check(func) { /* A C-function is stored here */ - PyArray_FinalizeFunc *cfunc; - cfunc = PyCObject_AsVoidPtr(func); - Py_DECREF(func); - if (cfunc(self, obj) < 0) goto fail; - } - else { - args = PyTuple_New(1); - if (obj == NULL) obj=Py_None; - Py_INCREF(obj); - PyTuple_SET_ITEM(args, 0, obj); - res = PyObject_Call(func, args, NULL); - Py_DECREF(args); - Py_DECREF(func); - if (res == NULL) goto fail; - else Py_DECREF(res); - } - } - else Py_XDECREF(func); - } - - return (PyObject *)self; - - fail: - Py_DECREF(self); - return NULL; -} - -static void -_putzero(char *optr, PyObject *zero, PyArray_Descr *dtype) -{ - if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { - memset(optr, 0, dtype->elsize); - } - else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos=0; - while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; - _putzero(optr + offset, zero, new); - } - } - else { - PyObject **temp; - Py_INCREF(zero); - temp = (PyObject **)optr; - *temp = zero; - } - return; -} - - -/*OBJECT_API - Resize (reallocate data). Only works if nothing else is referencing - this array and it is contiguous. - If refcheck is 0, then the reference count is not checked - and assumed to be 1. - You still must own this data and have no weak-references and no base - object. -*/ -static PyObject * -PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, - NPY_ORDER fortran) -{ - intp oldsize, newsize; - int new_nd=newshape->len, k, n, elsize; - int refcnt; - intp* new_dimensions=newshape->ptr; - intp new_strides[MAX_DIMS]; - size_t sd; - intp *dimptr; - char *new_data; - intp largest; - - if (!PyArray_ISONESEGMENT(self)) { - PyErr_SetString(PyExc_ValueError, - "resize only works on single-segment arrays"); - return NULL; - } - - if (fortran == PyArray_ANYORDER) - fortran = PyArray_CORDER; - - if (self->descr->elsize == 0) { - PyErr_SetString(PyExc_ValueError, "Bad data-type size."); - return NULL; - } - newsize = 1; - largest = MAX_INTP / self->descr->elsize; - for (k=0; k largest) { - return PyErr_NoMemory(); - } - } - oldsize = PyArray_SIZE(self); - - if (oldsize != newsize) { - if (!(self->flags & OWNDATA)) { - PyErr_SetString(PyExc_ValueError, - "cannot resize this array: " \ - "it does not own its data"); - return NULL; - } - - if (refcheck) refcnt = REFCOUNT(self); - else refcnt = 1; - if ((refcnt > 2) || (self->base != NULL) || \ - (self->weakreflist != NULL)) { - PyErr_SetString(PyExc_ValueError, - "cannot resize an array that has "\ - "been referenced or is referencing\n"\ - "another array in this way. Use the "\ - "resize function"); - return NULL; - } - - if (newsize == 0) sd = self->descr->elsize; - else sd = newsize * self->descr->elsize; - /* Reallocate space if needed */ - new_data = PyDataMem_RENEW(self->data, sd); - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate memory for array"); - return NULL; - } - self->data = new_data; - } - - if ((newsize > oldsize) && PyArray_ISWRITEABLE(self)) { - /* Fill new memory with zeros */ - elsize = self->descr->elsize; - if (PyDataType_FLAGCHK(self->descr, NPY_ITEM_REFCOUNT)) { - PyObject *zero = PyInt_FromLong(0); - char *optr; - optr = self->data + oldsize*elsize; - n = newsize - oldsize; - for (k=0; kdescr); - optr += elsize; - } - Py_DECREF(zero); - } - else{ - memset(self->data+oldsize*elsize, 0, - (newsize-oldsize)*elsize); - } - } - - if (self->nd != new_nd) { /* Different number of dimensions. */ - self->nd = new_nd; - - /* Need new dimensions and strides arrays */ - dimptr = PyDimMem_RENEW(self->dimensions, 2*new_nd); - if (dimptr == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate memory for array " \ - "(array may be corrupted)"); - return NULL; - } - self->dimensions = dimptr; - self->strides = dimptr + new_nd; - } - - /* make new_strides variable */ - sd = (size_t) self->descr->elsize; - sd = (size_t) _array_fill_strides(new_strides, new_dimensions, new_nd, sd, - self->flags, &(self->flags)); - - memmove(self->dimensions, new_dimensions, new_nd*sizeof(intp)); - memmove(self->strides, new_strides, new_nd*sizeof(intp)); - - Py_INCREF(Py_None); - return Py_None; - -} - -static void -_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype) -{ - if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { - if ((obj == Py_None) || - (PyInt_Check(obj) && PyInt_AsLong(obj)==0)) - return; - else { - PyObject *arr; - Py_INCREF(dtype); - arr = PyArray_NewFromDescr(&PyArray_Type, dtype, - 0, NULL, NULL, NULL, - 0, NULL); - if (arr!=NULL) - dtype->f->setitem(obj, optr, arr); - Py_XDECREF(arr); - } - } - else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos=0; - while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; - _fillobject(optr + offset, obj, new); - } - } - else { - PyObject **temp; - Py_XINCREF(obj); - temp = (PyObject **)optr; - *temp = obj; - return; - } -} - -/* Assumes contiguous */ -/*OBJECT_API*/ -static void -PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) -{ - intp i,n; - n = PyArray_SIZE(arr); - if (arr->descr->type_num == PyArray_OBJECT) { - PyObject **optr; - optr = (PyObject **)(arr->data); - n = PyArray_SIZE(arr); - if (obj == NULL) { - for (i=0; idata; - for (i=0; idescr); - optr += arr->descr->elsize; - } - } -} - -/*OBJECT_API*/ -static int -PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) -{ - PyObject *newarr; - int itemsize, swap; - void *fromptr; - PyArray_Descr *descr; - intp size; - PyArray_CopySwapFunc *copyswap; - - itemsize = arr->descr->elsize; - if (PyArray_ISOBJECT(arr)) { - fromptr = &obj; - swap = 0; - newarr = NULL; - } - else { - descr = PyArray_DESCR(arr); - Py_INCREF(descr); - newarr = PyArray_FromAny(obj, descr, 0,0, ALIGNED, NULL); - if (newarr == NULL) return -1; - fromptr = PyArray_DATA(newarr); - swap=!PyArray_ISNOTSWAPPED(arr); - } - size=PyArray_SIZE(arr); - copyswap = arr->descr->f->copyswap; - if (PyArray_ISONESEGMENT(arr)) { - char *toptr=PyArray_DATA(arr); - PyArray_FillWithScalarFunc* fillwithscalar = - arr->descr->f->fillwithscalar; - if (fillwithscalar && PyArray_ISALIGNED(arr)) { - copyswap(fromptr, NULL, swap, newarr); - fillwithscalar(toptr, size, fromptr, arr); - } - else { - while (size--) { - copyswap(toptr, fromptr, swap, arr); - toptr += itemsize; - } - } - } - else { - PyArrayIterObject *iter; - - iter = (PyArrayIterObject *)\ - PyArray_IterNew((PyObject *)arr); - if (iter == NULL) { - Py_XDECREF(newarr); - return -1; - } - while(size--) { - copyswap(iter->dataptr, fromptr, swap, arr); - PyArray_ITER_NEXT(iter); - } - Py_DECREF(iter); - } - Py_XDECREF(newarr); - return 0; -} - -static PyObject * -array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"shape", "dtype", "buffer", - "offset", "strides", - "order", NULL}; - PyArray_Descr *descr=NULL; - int itemsize; - PyArray_Dims dims = {NULL, 0}; - PyArray_Dims strides = {NULL, 0}; - PyArray_Chunk buffer; - longlong offset=0; - NPY_ORDER order=PyArray_CORDER; - int fortran = 0; - PyArrayObject *ret; - - buffer.ptr = NULL; - /* Usually called with shape and type - but can also be called with buffer, strides, and swapped info - */ - - /* For now, let's just use this to create an empty, contiguous - array of a specific type and shape. - */ - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&LO&O&", - kwlist, PyArray_IntpConverter, - &dims, - PyArray_DescrConverter, - &descr, - PyArray_BufferConverter, - &buffer, - &offset, - &PyArray_IntpConverter, - &strides, - &PyArray_OrderConverter, - &order)) - goto fail; - - if (order == PyArray_FORTRANORDER) fortran = 1; - - if (descr == NULL) - descr = PyArray_DescrFromType(PyArray_DEFAULT); - - itemsize = descr->elsize; - - if (itemsize == 0) { - PyErr_SetString(PyExc_ValueError, - "data-type with unspecified variable length"); - goto fail; - } - - if (strides.ptr != NULL) { - intp nb, off; - if (strides.len != dims.len) { - PyErr_SetString(PyExc_ValueError, - "strides, if given, must be " \ - "the same length as shape"); - goto fail; - } - - if (buffer.ptr == NULL) { - nb = 0; - off = 0; - } - else { - nb = buffer.len; - off = (intp) offset; - } - - - if (!PyArray_CheckStrides(itemsize, dims.len, - nb, off, - dims.ptr, strides.ptr)) { - PyErr_SetString(PyExc_ValueError, - "strides is incompatible " \ - "with shape of requested " \ - "array and size of buffer"); - goto fail; - } - } - - if (buffer.ptr == NULL) { - ret = (PyArrayObject *) \ - PyArray_NewFromDescr(subtype, descr, - (int)dims.len, - dims.ptr, - strides.ptr, NULL, fortran, NULL); - if (ret == NULL) {descr=NULL;goto fail;} - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)) { - /* place Py_None in object positions */ - PyArray_FillObjectArray(ret, Py_None); - if (PyErr_Occurred()) { - descr=NULL; - goto fail; - } - } - } - else { /* buffer given -- use it */ - if (dims.len == 1 && dims.ptr[0] == -1) { - dims.ptr[0] = (buffer.len-(intp)offset) / itemsize; - } - else if ((strides.ptr == NULL) && \ - (buffer.len < ((intp)itemsize)* \ - PyArray_MultiplyList(dims.ptr, dims.len))) { - PyErr_SetString(PyExc_TypeError, - "buffer is too small for " \ - "requested array"); - goto fail; - } - /* get writeable and aligned */ - if (fortran) buffer.flags |= FORTRAN; - ret = (PyArrayObject *)\ - PyArray_NewFromDescr(subtype, descr, - dims.len, dims.ptr, - strides.ptr, - offset + (char *)buffer.ptr, - buffer.flags, NULL); - if (ret == NULL) {descr=NULL; goto fail;} - PyArray_UpdateFlags(ret, UPDATE_ALL); - ret->base = buffer.base; - Py_INCREF(buffer.base); - } - - PyDimMem_FREE(dims.ptr); - if (strides.ptr) PyDimMem_FREE(strides.ptr); - return (PyObject *)ret; - - fail: - Py_XDECREF(descr); - if (dims.ptr) PyDimMem_FREE(dims.ptr); - if (strides.ptr) PyDimMem_FREE(strides.ptr); - return NULL; -} - - -static PyObject * -array_iter(PyArrayObject *arr) -{ - if (arr->nd == 0) { - PyErr_SetString(PyExc_TypeError, - "iteration over a 0-d array"); - return NULL; - } - return PySeqIter_New((PyObject *)arr); -} - - -/******************* array attribute get and set routines ******************/ - -static PyObject * -array_ndim_get(PyArrayObject *self) -{ - return PyInt_FromLong(self->nd); -} - -static PyObject * -array_flags_get(PyArrayObject *self) -{ - return PyArray_NewFlagsObject((PyObject *)self); -} - -static PyObject * -array_shape_get(PyArrayObject *self) -{ - return PyArray_IntTupleFromIntp(self->nd, self->dimensions); -} - - -static int -array_shape_set(PyArrayObject *self, PyObject *val) -{ - int nd; - PyObject *ret; - - /* Assumes C-order */ - ret = PyArray_Reshape(self, val); - if (ret == NULL) return -1; - if (PyArray_DATA(ret) != PyArray_DATA(self)) { - Py_DECREF(ret); - PyErr_SetString(PyExc_AttributeError, - "incompatible shape for a non-contiguous "\ - "array"); - return -1; - } - - /* Free old dimensions and strides */ - PyDimMem_FREE(self->dimensions); - nd = PyArray_NDIM(ret); - self->nd = nd; - if (nd > 0) { /* create new dimensions and strides */ - self->dimensions = PyDimMem_NEW(2*nd); - if (self->dimensions == NULL) { - Py_DECREF(ret); - PyErr_SetString(PyExc_MemoryError,""); - return -1; - } - self->strides = self->dimensions + nd; - memcpy(self->dimensions, PyArray_DIMS(ret), - nd*sizeof(intp)); - memcpy(self->strides, PyArray_STRIDES(ret), - nd*sizeof(intp)); - } - else {self->dimensions=NULL; self->strides=NULL;} - Py_DECREF(ret); - PyArray_UpdateFlags(self, CONTIGUOUS | FORTRAN); - return 0; -} - - -static PyObject * -array_strides_get(PyArrayObject *self) -{ - return PyArray_IntTupleFromIntp(self->nd, self->strides); -} - -static int -array_strides_set(PyArrayObject *self, PyObject *obj) -{ - PyArray_Dims newstrides = {NULL, 0}; - PyArrayObject *new; - intp numbytes=0; - intp offset=0; - Py_ssize_t buf_len; - char *buf; - - if (!PyArray_IntpConverter(obj, &newstrides) || \ - newstrides.ptr == NULL) { - PyErr_SetString(PyExc_TypeError, "invalid strides"); - return -1; - } - if (newstrides.len != self->nd) { - PyErr_Format(PyExc_ValueError, "strides must be " \ - " same length as shape (%d)", self->nd); - goto fail; - } - new = self; - while(new->base && PyArray_Check(new->base)) { - new = (PyArrayObject *)(new->base); - } - /* Get the available memory through the buffer - interface on new->base or if that fails - from the current new */ - if (new->base && PyObject_AsReadBuffer(new->base, - (const void **)&buf, - &buf_len) >= 0) { - offset = self->data - buf; - numbytes = buf_len + offset; - } - else { - PyErr_Clear(); - numbytes = PyArray_MultiplyList(new->dimensions, - new->nd)*new->descr->elsize; - offset = self->data - new->data; - } - - if (!PyArray_CheckStrides(self->descr->elsize, self->nd, numbytes, - offset, - self->dimensions, newstrides.ptr)) { - PyErr_SetString(PyExc_ValueError, "strides is not "\ - "compatible with available memory"); - goto fail; - } - memcpy(self->strides, newstrides.ptr, sizeof(intp)*newstrides.len); - PyArray_UpdateFlags(self, CONTIGUOUS | FORTRAN); - PyDimMem_FREE(newstrides.ptr); - return 0; - - fail: - PyDimMem_FREE(newstrides.ptr); - return -1; -} - - - -static PyObject * -array_priority_get(PyArrayObject *self) -{ - if (PyArray_CheckExact(self)) - return PyFloat_FromDouble(PyArray_PRIORITY); - else - return PyFloat_FromDouble(PyArray_SUBTYPE_PRIORITY); -} - -static PyObject *arraydescr_protocol_typestr_get(PyArray_Descr *); - -static PyObject * -array_typestr_get(PyArrayObject *self) -{ - return arraydescr_protocol_typestr_get(self->descr); -} - -static PyObject * -array_descr_get(PyArrayObject *self) -{ - Py_INCREF(self->descr); - return (PyObject *)self->descr; -} - -static PyObject *arraydescr_protocol_descr_get(PyArray_Descr *self); - -static PyObject * -array_protocol_descr_get(PyArrayObject *self) -{ - PyObject *res; - PyObject *dobj; - - res = arraydescr_protocol_descr_get(self->descr); - if (res) return res; - PyErr_Clear(); - - /* get default */ - dobj = PyTuple_New(2); - if (dobj == NULL) return NULL; - PyTuple_SET_ITEM(dobj, 0, PyString_FromString("")); - PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); - res = PyList_New(1); - if (res == NULL) {Py_DECREF(dobj); return NULL;} - PyList_SET_ITEM(res, 0, dobj); - return res; -} - -static PyObject * -array_protocol_strides_get(PyArrayObject *self) -{ - if PyArray_ISCONTIGUOUS(self) { - Py_INCREF(Py_None); - return Py_None; - } - return PyArray_IntTupleFromIntp(self->nd, self->strides); -} - - - -static PyObject * -array_dataptr_get(PyArrayObject *self) -{ - return Py_BuildValue("NO", - PyLong_FromVoidPtr(self->data), - (self->flags & WRITEABLE ? Py_False : - Py_True)); -} - -static PyObject * -array_ctypes_get(PyArrayObject *self) -{ - PyObject *_numpy_internal; - PyObject *ret; - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - ret = PyObject_CallMethod(_numpy_internal, "_ctypes", - "ON", self, - PyLong_FromVoidPtr(self->data)); - Py_DECREF(_numpy_internal); - return ret; -} - -static PyObject * -array_interface_get(PyArrayObject *self) -{ - PyObject *dict; - PyObject *obj; - dict = PyDict_New(); - if (dict == NULL) return NULL; - - /* dataptr */ - obj = array_dataptr_get(self); - PyDict_SetItemString(dict, "data", obj); - Py_DECREF(obj); - - obj = array_protocol_strides_get(self); - PyDict_SetItemString(dict, "strides", obj); - Py_DECREF(obj); - - obj = array_protocol_descr_get(self); - PyDict_SetItemString(dict, "descr", obj); - Py_DECREF(obj); - - obj = arraydescr_protocol_typestr_get(self->descr); - PyDict_SetItemString(dict, "typestr", obj); - Py_DECREF(obj); - - obj = array_shape_get(self); - PyDict_SetItemString(dict, "shape", obj); - Py_DECREF(obj); - - obj = PyInt_FromLong(3); - PyDict_SetItemString(dict, "version", obj); - Py_DECREF(obj); - - return dict; -} - -static PyObject * -array_data_get(PyArrayObject *self) -{ - intp nbytes; - if (!(PyArray_ISONESEGMENT(self))) { - PyErr_SetString(PyExc_AttributeError, "cannot get single-"\ - "segment buffer for discontiguous array"); - return NULL; - } - nbytes = PyArray_NBYTES(self); - if PyArray_ISWRITEABLE(self) - return PyBuffer_FromReadWriteObject((PyObject *)self, 0, - (int) nbytes); - else - return PyBuffer_FromObject((PyObject *)self, 0, (int) nbytes); -} - -static int -array_data_set(PyArrayObject *self, PyObject *op) -{ - void *buf; - Py_ssize_t buf_len; - int writeable=1; - - if (PyObject_AsWriteBuffer(op, &buf, &buf_len) < 0) { - writeable = 0; - if (PyObject_AsReadBuffer(op, (const void **)&buf, - &buf_len) < 0) { - PyErr_SetString(PyExc_AttributeError, - "object does not have single-segment " \ - "buffer interface"); - return -1; - } - } - if (!PyArray_ISONESEGMENT(self)) { - PyErr_SetString(PyExc_AttributeError, "cannot set single-" \ - "segment buffer for discontiguous array"); - return -1; - } - if (PyArray_NBYTES(self) > buf_len) { - PyErr_SetString(PyExc_AttributeError, - "not enough data for array"); - return -1; - } - if (self->flags & OWNDATA) { - PyArray_XDECREF(self); - PyDataMem_FREE(self->data); - } - if (self->base) { - if (self->flags & UPDATEIFCOPY) { - ((PyArrayObject *)self->base)->flags |= WRITEABLE; - self->flags &= ~UPDATEIFCOPY; - } - Py_DECREF(self->base); - } - Py_INCREF(op); - self->base = op; - self->data = buf; - self->flags = CARRAY; - if (!writeable) - self->flags &= ~WRITEABLE; - return 0; -} - - -static PyObject * -array_itemsize_get(PyArrayObject *self) -{ - return PyInt_FromLong((long) self->descr->elsize); -} - -static PyObject * -array_size_get(PyArrayObject *self) -{ - intp size=PyArray_SIZE(self); -#if SIZEOF_INTP <= SIZEOF_LONG - return PyInt_FromLong((long) size); -#else - if (size > MAX_LONG || size < MIN_LONG) - return PyLong_FromLongLong(size); - else - return PyInt_FromLong((long) size); -#endif -} - -static PyObject * -array_nbytes_get(PyArrayObject *self) -{ - intp nbytes = PyArray_NBYTES(self); -#if SIZEOF_INTP <= SIZEOF_LONG - return PyInt_FromLong((long) nbytes); -#else - if (nbytes > MAX_LONG || nbytes < MIN_LONG) - return PyLong_FromLongLong(nbytes); - else - return PyInt_FromLong((long) nbytes); -#endif -} - - -/* If the type is changed. - Also needing change: strides, itemsize - - Either itemsize is exactly the same - or the array is single-segment (contiguous or fortran) with - compatibile dimensions - - The shape and strides will be adjusted in that case as well. -*/ - -static int -array_descr_set(PyArrayObject *self, PyObject *arg) -{ - PyArray_Descr *newtype=NULL; - intp newdim; - int index; - char *msg = "new type not compatible with array."; - - if (!(PyArray_DescrConverter(arg, &newtype)) || - newtype == NULL) { - PyErr_SetString(PyExc_TypeError, "invalid data-type for array"); - return -1; - } - if (PyDataType_FLAGCHK(newtype, NPY_ITEM_HASOBJECT) || - PyDataType_FLAGCHK(newtype, NPY_ITEM_IS_POINTER) || - PyDataType_FLAGCHK(self->descr, NPY_ITEM_HASOBJECT) || - PyDataType_FLAGCHK(self->descr, NPY_ITEM_IS_POINTER)) { - PyErr_SetString(PyExc_TypeError, \ - "Cannot change data-type for object " \ - "array."); - Py_DECREF(newtype); - return -1; - } - - if (newtype->elsize == 0) { - PyErr_SetString(PyExc_TypeError, - "data-type must not be 0-sized"); - Py_DECREF(newtype); - return -1; - } - - - if ((newtype->elsize != self->descr->elsize) && \ - (self->nd == 0 || !PyArray_ISONESEGMENT(self) || \ - newtype->subarray)) goto fail; - - if (PyArray_ISCONTIGUOUS(self)) index = self->nd - 1; - else index = 0; - - if (newtype->elsize < self->descr->elsize) { - /* if it is compatible increase the size of the - dimension at end (or at the front for FORTRAN) - */ - if (self->descr->elsize % newtype->elsize != 0) - goto fail; - newdim = self->descr->elsize / newtype->elsize; - self->dimensions[index] *= newdim; - self->strides[index] = newtype->elsize; - } - - else if (newtype->elsize > self->descr->elsize) { - - /* Determine if last (or first if FORTRAN) dimension - is compatible */ - - newdim = self->dimensions[index] * self->descr->elsize; - if ((newdim % newtype->elsize) != 0) goto fail; - - self->dimensions[index] = newdim / newtype->elsize; - self->strides[index] = newtype->elsize; - } - - /* fall through -- adjust type*/ - - Py_DECREF(self->descr); - if (newtype->subarray) { - /* create new array object from data and update - dimensions, strides and descr from it */ - PyArrayObject *temp; - - /* We would decref newtype here --- temp will - steal a reference to it */ - temp = (PyArrayObject *) \ - PyArray_NewFromDescr(&PyArray_Type, newtype, self->nd, - self->dimensions, self->strides, - self->data, self->flags, NULL); - if (temp == NULL) return -1; - PyDimMem_FREE(self->dimensions); - self->dimensions = temp->dimensions; - self->nd = temp->nd; - self->strides = temp->strides; - newtype = temp->descr; - Py_INCREF(temp->descr); - /* Fool deallocator not to delete these*/ - temp->nd = 0; - temp->dimensions = NULL; - Py_DECREF(temp); - } - - self->descr = newtype; - PyArray_UpdateFlags(self, UPDATE_ALL); - - return 0; - - fail: - PyErr_SetString(PyExc_ValueError, msg); - Py_DECREF(newtype); - return -1; -} - -static PyObject * -array_struct_get(PyArrayObject *self) -{ - PyArrayInterface *inter; - - inter = (PyArrayInterface *)_pya_malloc(sizeof(PyArrayInterface)); - if (inter==NULL) return PyErr_NoMemory(); - inter->two = 2; - inter->nd = self->nd; - inter->typekind = self->descr->kind; - inter->itemsize = self->descr->elsize; - inter->flags = self->flags; - /* reset unused flags */ - inter->flags &= ~(UPDATEIFCOPY | OWNDATA); - if (PyArray_ISNOTSWAPPED(self)) inter->flags |= NOTSWAPPED; - /* Copy shape and strides over since these can be reset - when the array is "reshaped". - */ - if (self->nd > 0) { - inter->shape = (intp *)_pya_malloc(2*sizeof(intp)*self->nd); - if (inter->shape == NULL) { - _pya_free(inter); - return PyErr_NoMemory(); - } - inter->strides = inter->shape + self->nd; - memcpy(inter->shape, self->dimensions, sizeof(intp)*self->nd); - memcpy(inter->strides, self->strides, sizeof(intp)*self->nd); - } - else { - inter->shape = NULL; - inter->strides = NULL; - } - inter->data = self->data; - if (self->descr->names) { - inter->descr = arraydescr_protocol_descr_get(self->descr); - if (inter->descr == NULL) PyErr_Clear(); - else inter->flags &= ARR_HAS_DESCR; - } - else inter->descr = NULL; - Py_INCREF(self); - return PyCObject_FromVoidPtrAndDesc(inter, self, gentype_struct_free); -} - -static PyObject * -array_base_get(PyArrayObject *self) -{ - if (self->base == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - else { - Py_INCREF(self->base); - return self->base; - } -} - -/* Create a view of a complex array with an equivalent data-type - except it is real instead of complex. -*/ - -static PyArrayObject * -_get_part(PyArrayObject *self, int imag) -{ - PyArray_Descr *type; - PyArrayObject *ret; - int offset; - - type = PyArray_DescrFromType(self->descr->type_num - - PyArray_NUM_FLOATTYPE); - offset = (imag ? type->elsize : 0); - - if (!PyArray_ISNBO(self->descr->byteorder)) { - PyArray_Descr *new; - new = PyArray_DescrNew(type); - new->byteorder = self->descr->byteorder; - Py_DECREF(type); - type = new; - } - ret = (PyArrayObject *) \ - PyArray_NewFromDescr(self->ob_type, - type, - self->nd, - self->dimensions, - self->strides, - self->data + offset, - self->flags, (PyObject *)self); - if (ret == NULL) return NULL; - ret->flags &= ~CONTIGUOUS; - ret->flags &= ~FORTRAN; - Py_INCREF(self); - ret->base = (PyObject *)self; - return ret; -} - -static PyObject * -array_real_get(PyArrayObject *self) -{ - PyArrayObject *ret; - - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 0); - return (PyObject *)ret; - } - else { - Py_INCREF(self); - return (PyObject *)self; - } -} - - -static int -array_real_set(PyArrayObject *self, PyObject *val) -{ - PyArrayObject *ret; - PyArrayObject *new; - int rint; - - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 0); - if (ret == NULL) return -1; - } - else { - Py_INCREF(self); - ret = self; - } - new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0, NULL); - if (new == NULL) {Py_DECREF(ret); return -1;} - rint = PyArray_MoveInto(ret, new); - Py_DECREF(ret); - Py_DECREF(new); - return rint; -} - -static PyObject * -array_imag_get(PyArrayObject *self) -{ - PyArrayObject *ret; - PyArray_Descr *type; - - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 1); - return (PyObject *) ret; - } - else { - type = self->descr; - Py_INCREF(type); - ret = (PyArrayObject *)PyArray_Zeros(self->nd, - self->dimensions, - type, - PyArray_ISFORTRAN(self)); - ret->flags &= ~WRITEABLE; - if (PyArray_CheckExact(self)) - return (PyObject *)ret; - else { - PyObject *newret; - newret = PyArray_View(ret, NULL, self->ob_type); - Py_DECREF(ret); - return newret; - } - } -} - -static int -array_imag_set(PyArrayObject *self, PyObject *val) -{ - if (PyArray_ISCOMPLEX(self)) { - PyArrayObject *ret; - PyArrayObject *new; - int rint; - - ret = _get_part(self, 1); - if (ret == NULL) return -1; - new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0, NULL); - if (new == NULL) {Py_DECREF(ret); return -1;} - rint = PyArray_MoveInto(ret, new); - Py_DECREF(ret); - Py_DECREF(new); - return rint; - } - else { - PyErr_SetString(PyExc_TypeError, "array does not have "\ - "imaginary part to set"); - return -1; - } -} - -static PyObject * -array_flat_get(PyArrayObject *self) -{ - return PyArray_IterNew((PyObject *)self); -} - -static int -array_flat_set(PyArrayObject *self, PyObject *val) -{ - PyObject *arr=NULL; - int retval = -1; - PyArrayIterObject *selfit=NULL, *arrit=NULL; - PyArray_Descr *typecode; - int swap; - PyArray_CopySwapFunc *copyswap; - - typecode = self->descr; - Py_INCREF(typecode); - arr = PyArray_FromAny(val, typecode, - 0, 0, FORCECAST | FORTRAN_IF(self), NULL); - if (arr == NULL) return -1; - arrit = (PyArrayIterObject *)PyArray_IterNew(arr); - if (arrit == NULL) goto exit; - selfit = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (selfit == NULL) goto exit; - - if (arrit->size == 0) {retval = 0; goto exit;} - - swap = PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(arr); - copyswap = self->descr->f->copyswap; - if (PyDataType_REFCHK(self->descr)) { - while(selfit->index < selfit->size) { - PyArray_Item_XDECREF(selfit->dataptr, self->descr); - PyArray_Item_INCREF(arrit->dataptr, PyArray_DESCR(arr)); - memmove(selfit->dataptr, arrit->dataptr, - sizeof(PyObject **)); - if (swap) - copyswap(selfit->dataptr, NULL, swap, self); - PyArray_ITER_NEXT(selfit); - PyArray_ITER_NEXT(arrit); - if (arrit->index == arrit->size) - PyArray_ITER_RESET(arrit); - } - retval = 0; - goto exit; - } - - while(selfit->index < selfit->size) { - memmove(selfit->dataptr, arrit->dataptr, self->descr->elsize); - if (swap) - copyswap(selfit->dataptr, NULL, swap, self); - PyArray_ITER_NEXT(selfit); - PyArray_ITER_NEXT(arrit); - if (arrit->index == arrit->size) - PyArray_ITER_RESET(arrit); - } - retval = 0; - exit: - Py_XDECREF(selfit); - Py_XDECREF(arrit); - Py_XDECREF(arr); - return retval; -} - -static PyObject * -array_transpose_get(PyArrayObject *self) -{ - return PyArray_Transpose(self, NULL); -} - -/* If this is None, no function call is made - --- default sub-class behavior -*/ -static PyObject * -array_finalize_get(PyArrayObject *self) -{ - Py_INCREF(Py_None); - return Py_None; -} - -static PyGetSetDef array_getsetlist[] = { - {"ndim", - (getter)array_ndim_get, - NULL, NULL}, - {"flags", - (getter)array_flags_get, - NULL, NULL}, - {"shape", - (getter)array_shape_get, - (setter)array_shape_set, - NULL}, - {"strides", - (getter)array_strides_get, - (setter)array_strides_set, - NULL}, - {"data", - (getter)array_data_get, - (setter)array_data_set, - NULL}, - {"itemsize", - (getter)array_itemsize_get, - NULL, NULL}, - {"size", - (getter)array_size_get, - NULL, NULL}, - {"nbytes", - (getter)array_nbytes_get, - NULL, NULL}, - {"base", - (getter)array_base_get, - NULL, NULL}, - {"dtype", - (getter)array_descr_get, - (setter)array_descr_set, - NULL}, - {"real", - (getter)array_real_get, - (setter)array_real_set, - NULL}, - {"imag", - (getter)array_imag_get, - (setter)array_imag_set, - NULL}, - {"flat", - (getter)array_flat_get, - (setter)array_flat_set, - NULL}, - {"ctypes", - (getter)array_ctypes_get, - NULL, NULL}, - {"T", - (getter)array_transpose_get, - NULL, NULL}, - {"__array_interface__", - (getter)array_interface_get, - NULL, NULL}, - {"__array_struct__", - (getter)array_struct_get, - NULL, NULL}, - {"__array_priority__", - (getter)array_priority_get, - NULL, NULL}, - {"__array_finalize__", - (getter)array_finalize_get, - NULL, NULL}, - {NULL, NULL, NULL, NULL}, /* Sentinel */ -}; - -/****************** end of attribute get and set routines *******************/ - - -static PyObject * -array_alloc(PyTypeObject *type, Py_ssize_t nitems) -{ - PyObject *obj; - /* nitems will always be 0 */ - obj = (PyObject *)_pya_malloc(sizeof(PyArrayObject)); - PyObject_Init(obj, type); - return obj; -} - - -static PyTypeObject PyArray_Type = { - PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.ndarray", /*tp_name*/ - sizeof(PyArrayObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - /* methods */ - (destructor)array_dealloc, /*tp_dealloc */ - (printfunc)NULL, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - (cmpfunc)0, /*tp_compare*/ - (reprfunc)array_repr, /*tp_repr*/ - &array_as_number, /*tp_as_number*/ - &array_as_sequence, /*tp_as_sequence*/ - &array_as_mapping, /*tp_as_mapping*/ - (hashfunc)0, /*tp_hash*/ - (ternaryfunc)0, /*tp_call*/ - (reprfunc)array_str, /*tp_str*/ - - (getattrofunc)0, /*tp_getattro*/ - (setattrofunc)0, /*tp_setattro*/ - &array_as_buffer, /*tp_as_buffer*/ - (Py_TPFLAGS_DEFAULT - | Py_TPFLAGS_BASETYPE - | Py_TPFLAGS_CHECKTYPES), /*tp_flags*/ - /*Documentation string */ - 0, /*tp_doc*/ - - (traverseproc)0, /*tp_traverse */ - (inquiry)0, /*tp_clear */ - (richcmpfunc)array_richcompare, /*tp_richcompare */ - offsetof(PyArrayObject, weakreflist), /*tp_weaklistoffset */ - - /* Iterator support (use standard) */ - - (getiterfunc)array_iter, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ - - /* Sub-classing (new-style object) support */ - - array_methods, /* tp_methods */ - 0, /* tp_members */ - array_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - array_alloc, /* tp_alloc */ - (newfunc)array_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0 /* tp_weaklist */ -}; - -/* The rest of this code is to build the right kind of array from a python */ -/* object. */ - -static int -discover_depth(PyObject *s, int max, int stop_at_string, int stop_at_tuple) -{ - int d=0; - PyObject *e; - - if(max < 1) return -1; - - if(! PySequence_Check(s) || PyInstance_Check(s) || \ - PySequence_Length(s) < 0) { - PyErr_Clear(); return 0; - } - if (PyArray_Check(s)) - return PyArray_NDIM(s); - if (PyArray_IsScalar(s, Generic)) return 0; - if (PyString_Check(s) || PyBuffer_Check(s) || PyUnicode_Check(s)) - return stop_at_string ? 0:1; - if (stop_at_tuple && PyTuple_Check(s)) return 0; - if ((e=PyObject_GetAttrString(s, "__array_struct__")) != NULL) { - d = -1; - if (PyCObject_Check(e)) { - PyArrayInterface *inter; - inter = (PyArrayInterface *)PyCObject_AsVoidPtr(e); - if (inter->two == 2) { - d = inter->nd; - } - } - Py_DECREF(e); - if (d > -1) return d; - } - else PyErr_Clear(); - if ((e=PyObject_GetAttrString(s, "__array_interface__")) != NULL) { - d = -1; - if (PyDict_Check(e)) { - PyObject *new; - new = PyDict_GetItemString(e, "shape"); - if (new && PyTuple_Check(new)) - d = PyTuple_GET_SIZE(new); - } - Py_DECREF(e); - if (d>-1) return d; - } - else PyErr_Clear(); - - if (PySequence_Length(s) == 0) - return 1; - if ((e=PySequence_GetItem(s,0)) == NULL) return -1; - if(e!=s) { - d=discover_depth(e, max-1, stop_at_string, stop_at_tuple); - if(d >= 0) d++; - } - Py_DECREF(e); - return d; -} - -static int -discover_itemsize(PyObject *s, int nd, int *itemsize) -{ - int n, r, i; - PyObject *e; - - n = PyObject_Length(s); - - if ((nd == 0) || PyString_Check(s) || \ - PyUnicode_Check(s) || PyBuffer_Check(s)) { - *itemsize = MAX(*itemsize, n); - return 0; - } - for (i=0; i n_lower) n_lower = d[1]; - } - d[1] = n_lower; - - return 0; -} - -/* new reference */ -/* doesn't alter refcount of chktype or mintype --- - unless one of them is returned */ -static PyArray_Descr * -_array_small_type(PyArray_Descr *chktype, PyArray_Descr* mintype) -{ - PyArray_Descr *outtype; - int outtype_num, save_num; - - if (PyArray_EquivTypes(chktype, mintype)) { - Py_INCREF(mintype); - return mintype; - } - - - if (chktype->type_num > mintype->type_num) - outtype_num = chktype->type_num; - else { - if (PyDataType_ISOBJECT(chktype) && \ - PyDataType_ISSTRING(mintype)) { - return PyArray_DescrFromType(NPY_OBJECT); - } - else { - outtype_num = mintype->type_num; - } - } - - save_num = outtype_num; - while(outtype_num < PyArray_NTYPES && - !(PyArray_CanCastSafely(chktype->type_num, outtype_num) - && PyArray_CanCastSafely(mintype->type_num, outtype_num))) - outtype_num++; - if (outtype_num == PyArray_NTYPES) { - outtype = PyArray_DescrFromType(save_num); - } - else { - outtype = PyArray_DescrFromType(outtype_num); - } - if (PyTypeNum_ISEXTENDED(outtype->type_num)) { - int testsize = outtype->elsize; - register int chksize, minsize; - chksize = chktype->elsize; - minsize = mintype->elsize; - /* Handle string->unicode case separately - because string itemsize is 4* as large */ - if (outtype->type_num == PyArray_UNICODE && - mintype->type_num == PyArray_STRING) { - testsize = MAX(chksize, 4*minsize); - } - else { - testsize = MAX(chksize, minsize); - } - if (testsize != outtype->elsize) { - PyArray_DESCR_REPLACE(outtype); - outtype->elsize = testsize; - Py_XDECREF(outtype->fields); - outtype->fields = NULL; - Py_XDECREF(outtype->names); - outtype->names = NULL; - } - } - return outtype; -} - -static PyArray_Descr * -_array_find_python_scalar_type(PyObject *op) -{ - if (PyFloat_Check(op)) { - return PyArray_DescrFromType(PyArray_DOUBLE); - } - else if (PyComplex_Check(op)) { - return PyArray_DescrFromType(PyArray_CDOUBLE); - } - else if (PyInt_Check(op)) { - /* bools are a subclass of int */ - if (PyBool_Check(op)) { - return PyArray_DescrFromType(PyArray_BOOL); - } else { - return PyArray_DescrFromType(PyArray_LONG); - } - } - else if (PyLong_Check(op)) { - /* if integer can fit into a longlong then return that - */ - if ((PyLong_AsLongLong(op) == -1) && PyErr_Occurred()) { - PyErr_Clear(); - return PyArray_DescrFromType(PyArray_OBJECT); - } - return PyArray_DescrFromType(PyArray_LONGLONG); - } - return NULL; -} - -static PyArray_Descr * -_use_default_type(PyObject *op) -{ - int typenum, l; - PyObject *type; - - typenum = -1; - l = 0; - type = (PyObject *)op->ob_type; - while (l < PyArray_NUMUSERTYPES) { - if (type == (PyObject *)(userdescrs[l]->typeobj)) { - typenum = l + PyArray_USERDEF; - break; - } - l++; - } - if (typenum == -1) { - typenum = PyArray_OBJECT; - } - return PyArray_DescrFromType(typenum); -} - - -/* op is an object to be converted to an ndarray. - - minitype is the minimum type-descriptor needed. - - max is the maximum number of dimensions -- used for recursive call - to avoid infinite recursion... - -*/ - -static PyArray_Descr * -_array_find_type(PyObject *op, PyArray_Descr *minitype, int max) -{ - int l; - PyObject *ip; - PyArray_Descr *chktype=NULL; - PyArray_Descr *outtype; - - /* These need to come first because if op already carries - a descr structure, then we want it to be the result if minitype - is NULL. - */ - - if (PyArray_Check(op)) { - chktype = PyArray_DESCR(op); - Py_INCREF(chktype); - if (minitype == NULL) return chktype; - Py_INCREF(minitype); - goto finish; - } - - if (PyArray_IsScalar(op, Generic)) { - chktype = PyArray_DescrFromScalar(op); - if (minitype == NULL) return chktype; - Py_INCREF(minitype); - goto finish; - } - - if (minitype == NULL) { - minitype = PyArray_DescrFromType(PyArray_BOOL); - } - else Py_INCREF(minitype); - - if (max < 0) goto deflt; - - chktype = _array_find_python_scalar_type(op); - if (chktype) { - goto finish; - } - - if ((ip=PyObject_GetAttrString(op, "__array_interface__"))!=NULL) { - if (PyDict_Check(ip)) { - PyObject *new; - new = PyDict_GetItemString(ip, "typestr"); - if (new && PyString_Check(new)) { - chktype =_array_typedescr_fromstr \ - (PyString_AS_STRING(new)); - } - } - Py_DECREF(ip); - if (chktype) goto finish; - } - else PyErr_Clear(); - - if ((ip=PyObject_GetAttrString(op, "__array_struct__")) != NULL) { - PyArrayInterface *inter; - char buf[40]; - if (PyCObject_Check(ip)) { - inter=(PyArrayInterface *)PyCObject_AsVoidPtr(ip); - if (inter->two == 2) { - snprintf(buf, 40, "|%c%d", inter->typekind, - inter->itemsize); - chktype = _array_typedescr_fromstr(buf); - } - } - Py_DECREF(ip); - if (chktype) goto finish; - } - else PyErr_Clear(); - - if (PyString_Check(op)) { - chktype = PyArray_DescrNewFromType(PyArray_STRING); - chktype->elsize = PyString_GET_SIZE(op); - goto finish; - } - - if (PyUnicode_Check(op)) { - chktype = PyArray_DescrNewFromType(PyArray_UNICODE); - chktype->elsize = PyUnicode_GET_DATA_SIZE(op); -#ifndef Py_UNICODE_WIDE - chktype->elsize <<= 1; -#endif - goto finish; - } - - if (PyBuffer_Check(op)) { - chktype = PyArray_DescrNewFromType(PyArray_VOID); - chktype->elsize = op->ob_type->tp_as_sequence->sq_length(op); - PyErr_Clear(); - goto finish; - } - - if (PyObject_HasAttrString(op, "__array__")) { - ip = PyObject_CallMethod(op, "__array__", NULL); - if(ip && PyArray_Check(ip)) { - chktype = PyArray_DESCR(ip); - Py_INCREF(chktype); - Py_DECREF(ip); - goto finish; - } - Py_XDECREF(ip); - if (PyErr_Occurred()) PyErr_Clear(); - } - - if (PyInstance_Check(op)) goto deflt; - - if (PySequence_Check(op)) { - - l = PyObject_Length(op); - if (l < 0 && PyErr_Occurred()) { - PyErr_Clear(); - goto deflt; - } - if (l == 0 && minitype->type_num == PyArray_BOOL) { - Py_DECREF(minitype); - minitype = PyArray_DescrFromType(PyArray_DEFAULT); - } - while (--l >= 0) { - PyArray_Descr *newtype; - ip = PySequence_GetItem(op, l); - if (ip==NULL) { - PyErr_Clear(); - goto deflt; - } - chktype = _array_find_type(ip, minitype, max-1); - newtype = _array_small_type(chktype, minitype); - Py_DECREF(minitype); - minitype = newtype; - Py_DECREF(chktype); - Py_DECREF(ip); - } - chktype = minitype; - Py_INCREF(minitype); - goto finish; - } - - - deflt: - chktype = _use_default_type(op); - - finish: - - outtype = _array_small_type(chktype, minitype); - Py_DECREF(chktype); - Py_DECREF(minitype); - /* VOID Arrays should not occur by "default" - unless input was already a VOID */ - if (outtype->type_num == PyArray_VOID && \ - minitype->type_num != PyArray_VOID) { - Py_DECREF(outtype); - return PyArray_DescrFromType(PyArray_OBJECT); - } - return outtype; -} - -/* adapted from Numarray */ -static int -setArrayFromSequence(PyArrayObject *a, PyObject *s, int dim, intp offset) -{ - Py_ssize_t i, slen = PySequence_Length(s); - int res = 0; - - if (dim > a->nd) { - PyErr_Format(PyExc_ValueError, - "setArrayFromSequence: sequence/array dimensions mismatch."); - return -1; - } - - if (slen != a->dimensions[dim]) { - PyErr_Format(PyExc_ValueError, - "setArrayFromSequence: sequence/array shape mismatch."); - return -1; - } - - for(i=0; ind - dim) > 1) { - res = setArrayFromSequence(a, o, dim+1, offset); - } - else { - res = a->descr->f->setitem(o, (a->data + offset), a); - } - Py_DECREF(o); - if (res < 0) return res; - offset += a->strides[dim]; - } - return 0; -} - - -static int -Assign_Array(PyArrayObject *self, PyObject *v) -{ - if (!PySequence_Check(v)) { - PyErr_SetString(PyExc_ValueError, - "assignment from non-sequence"); - return -1; - } - if (self->nd == 0) { - PyErr_SetString(PyExc_ValueError, - "assignment to 0-d array"); - return -1; - } - - return setArrayFromSequence(self, v, 0, 0); -} - -/* "Array Scalars don't call this code" */ -/* steals reference to typecode -- no NULL*/ -static PyObject * -Array_FromPyScalar(PyObject *op, PyArray_Descr *typecode) -{ - PyArrayObject *ret; - int itemsize; - int type; - - itemsize = typecode->elsize; - type = typecode->type_num; - - if (itemsize == 0 && PyTypeNum_ISEXTENDED(type)) { - itemsize = PyObject_Length(op); - if (type == PyArray_UNICODE) itemsize *= 4; - - if (itemsize != typecode->elsize) { - PyArray_DESCR_REPLACE(typecode); - typecode->elsize = itemsize; - } - } - - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, typecode, - 0, NULL, - NULL, NULL, 0, NULL); - if (ret == NULL) return NULL; - if (ret->nd > 0) { - PyErr_SetString(PyExc_ValueError, - "shape-mismatch on array construction"); - Py_DECREF(ret); - return NULL; - } - - ret->descr->f->setitem(op, ret->data, ret); - - if (PyErr_Occurred()) { - Py_DECREF(ret); - return NULL; - } else { - return (PyObject *)ret; - } -} - - -/* If s is not a list, return 0 - Otherwise: - - run object_depth_and_dimension on all the elements - and make sure the returned shape and size - is the same for each element - -*/ -static int -object_depth_and_dimension(PyObject *s, int max, intp *dims) -{ - intp *newdims, *test_dims; - int nd, test_nd; - int i, islist; - intp size; - PyObject *obj; - - islist = PyList_Check(s); - if (!(islist || PyTuple_Check(s)) || - ((size = PySequence_Size(s)) == 0)) - return 0; - if (max < 2) { - if (max < 1) return 0; - dims[0] = size; - return 1; - } - newdims = PyDimMem_NEW(2*(max-1)); - test_dims = newdims + (max-1); - if (islist) obj = PyList_GET_ITEM(s, 0); - else obj = PyTuple_GET_ITEM(s, 0); - nd = object_depth_and_dimension(obj, max-1, newdims); - for (i=1; itype_num; - int itemsize = typecode->elsize; - - check_it = (typecode->type != PyArray_CHARLTR); - - stop_at_string = ((type == PyArray_OBJECT) || - (type == PyArray_STRING && - typecode->type == PyArray_STRINGLTR) || - (type == PyArray_UNICODE) || - (type == PyArray_VOID)); - - stop_at_tuple = (type == PyArray_VOID && (typecode->names \ - || typecode->subarray)); - - if (!((nd=discover_depth(s, MAX_DIMS+1, stop_at_string, - stop_at_tuple)) > 0)) { - if (nd==0) - return Array_FromPyScalar(s, typecode); - PyErr_SetString(PyExc_ValueError, - "invalid input sequence"); - goto fail; - } - - if (max_depth && PyTypeNum_ISOBJECT(type) && (nd > max_depth)) { - nd = max_depth; - } - - if ((max_depth && nd > max_depth) || \ - (min_depth && nd < min_depth)) { - PyErr_SetString(PyExc_ValueError, - "invalid number of dimensions"); - goto fail; - } - - if(discover_dimensions(s,nd,d, check_it) == -1) goto fail; - - if (typecode->type == PyArray_CHARLTR && nd > 0 && d[nd-1]==1) { - nd = nd-1; - } - - if (itemsize == 0 && PyTypeNum_ISEXTENDED(type)) { - if (discover_itemsize(s, nd, &itemsize) == -1) goto fail; - if (type == PyArray_UNICODE) itemsize*=4; - } - - if (itemsize != typecode->elsize) { - PyArray_DESCR_REPLACE(typecode); - typecode->elsize = itemsize; - } - - r=(PyArrayObject*)PyArray_NewFromDescr(&PyArray_Type, typecode, - nd, d, - NULL, NULL, - fortran, NULL); - - if(!r) return NULL; - if(Assign_Array(r,s) == -1) { - Py_DECREF(r); - return NULL; - } - return (PyObject*)r; - - fail: - Py_DECREF(typecode); - return NULL; -} - - -/*OBJECT_API - Is the typenum valid? -*/ -static int -PyArray_ValidType(int type) -{ - PyArray_Descr *descr; - int res=TRUE; - - descr = PyArray_DescrFromType(type); - if (descr==NULL) res = FALSE; - Py_DECREF(descr); - return res; -} - -/* For backward compatibility */ - -/* steals reference to at --- cannot be NULL*/ -/*OBJECT_API - Cast an array using typecode structure. -*/ -static PyObject * -PyArray_CastToType(PyArrayObject *mp, PyArray_Descr *at, int fortran) -{ - PyObject *out; - int ret; - PyArray_Descr *mpd; - - mpd = mp->descr; - - if (((mpd == at) || ((mpd->type_num == at->type_num) && \ - PyArray_EquivByteorders(mpd->byteorder,\ - at->byteorder) && \ - ((mpd->elsize == at->elsize) || \ - (at->elsize==0)))) && \ - PyArray_ISBEHAVED_RO(mp)) { - Py_DECREF(at); - Py_INCREF(mp); - return (PyObject *)mp; - } - - if (at->elsize == 0) { - PyArray_DESCR_REPLACE(at); - if (at == NULL) return NULL; - if (mpd->type_num == PyArray_STRING && \ - at->type_num == PyArray_UNICODE) - at->elsize = mpd->elsize << 2; - if (mpd->type_num == PyArray_UNICODE && - at->type_num == PyArray_STRING) - at->elsize = mpd->elsize >> 2; - if (at->type_num == PyArray_VOID) - at->elsize = mpd->elsize; - } - - out = PyArray_NewFromDescr(mp->ob_type, at, - mp->nd, - mp->dimensions, - NULL, NULL, - fortran, - (PyObject *)mp); - - if (out == NULL) return NULL; - ret = PyArray_CastTo((PyArrayObject *)out, mp); - if (ret != -1) return out; - - Py_DECREF(out); - return NULL; - -} - -/*OBJECT_API - Get a cast function to cast from the input descriptor to the - output type_number (must be a registered data-type). - Returns NULL if un-successful. -*/ -static PyArray_VectorUnaryFunc * -PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) -{ - PyArray_VectorUnaryFunc *castfunc=NULL; - if (type_num < PyArray_NTYPES) { - castfunc = descr->f->cast[type_num]; - } - if (castfunc == NULL) { - PyObject *obj = descr->f->castdict; - if (obj && PyDict_Check(obj)) { - PyObject *key; - PyObject *cobj; - key = PyInt_FromLong(type_num); - cobj = PyDict_GetItem(obj, key); - Py_DECREF(key); - if (PyCObject_Check(cobj)) { - castfunc = PyCObject_AsVoidPtr(cobj); - } - } - if (castfunc) return castfunc; - } - else return castfunc; - - PyErr_SetString(PyExc_ValueError, - "No cast function available."); - return NULL; -} - -/* Reference counts: - copyswapn is used which increases and decreases reference counts for OBJECT arrays. - All that needs to happen is for any reference counts in the buffers to be - decreased when completely finished with the buffers. - - buffers[0] is the destination - buffers[1] is the source -*/ -static void -_strided_buffered_cast(char *dptr, intp dstride, int delsize, int dswap, - PyArray_CopySwapNFunc *dcopyfunc, - char *sptr, intp sstride, int selsize, int sswap, - PyArray_CopySwapNFunc *scopyfunc, - intp N, char **buffers, int bufsize, - PyArray_VectorUnaryFunc *castfunc, - PyArrayObject *dest, PyArrayObject *src) -{ - int i; - if (N <= bufsize) { - /* 1. copy input to buffer and swap - 2. cast input to output - 3. swap output if necessary and copy from output buffer - */ - scopyfunc(buffers[1], selsize, sptr, sstride, N, sswap, src); - castfunc(buffers[1], buffers[0], N, src, dest); - dcopyfunc(dptr, dstride, buffers[0], delsize, N, dswap, dest); - return; - } - - /* otherwise we need to divide up into bufsize pieces */ - i = 0; - while(N > 0) { - int newN; - newN = MIN(N, bufsize); - _strided_buffered_cast(dptr+i*dstride, dstride, delsize, - dswap, dcopyfunc, - sptr+i*sstride, sstride, selsize, - sswap, scopyfunc, - newN, buffers, bufsize, castfunc, dest, src); - i += newN; - N -= bufsize; - } - return; -} - -static int -_broadcast_cast(PyArrayObject *out, PyArrayObject *in, - PyArray_VectorUnaryFunc *castfunc, int iswap, int oswap) -{ - int delsize, selsize, maxaxis, i, N; - PyArrayMultiIterObject *multi; - intp maxdim, ostrides, istrides; - char *buffers[2]; - PyArray_CopySwapNFunc *ocopyfunc, *icopyfunc; - char *obptr; - - NPY_BEGIN_THREADS_DEF - - delsize = PyArray_ITEMSIZE(out); - selsize = PyArray_ITEMSIZE(in); - multi = (PyArrayMultiIterObject *)PyArray_MultiIterNew(2, out, in); - if (multi == NULL) return -1; - - if (multi->size != PyArray_SIZE(out)) { - PyErr_SetString(PyExc_ValueError, - "array dimensions are not "\ - "compatible for copy"); - Py_DECREF(multi); - return -1; - } - - icopyfunc = in->descr->f->copyswapn; - ocopyfunc = out->descr->f->copyswapn; - maxaxis = PyArray_RemoveSmallest(multi); - if (maxaxis < 0) { /* cast 1 0-d array to another */ - N = 1; - maxdim = 1; - ostrides = delsize; - istrides = selsize; - } - else { - maxdim = multi->dimensions[maxaxis]; - N = (int) (MIN(maxdim, PyArray_BUFSIZE)); - ostrides = multi->iters[0]->strides[maxaxis]; - istrides = multi->iters[1]->strides[maxaxis]; - - } - buffers[0] = _pya_malloc(N*delsize); - if (buffers[0] == NULL) { - PyErr_NoMemory(); - return -1; - } - buffers[1] = _pya_malloc(N*selsize); - if (buffers[1] == NULL) { - _pya_free(buffers[0]); - PyErr_NoMemory(); - return -1; - } - if (PyDataType_FLAGCHK(out->descr, NPY_NEEDS_INIT)) - memset(buffers[0], 0, N*delsize); - if (PyDataType_FLAGCHK(in->descr, NPY_NEEDS_INIT)) - memset(buffers[1], 0, N*selsize); - -#if NPY_ALLOW_THREADS - if (PyArray_ISNUMBER(in) && PyArray_ISNUMBER(out)) { - NPY_BEGIN_THREADS - } -#endif - - while(multi->index < multi->size) { - _strided_buffered_cast(multi->iters[0]->dataptr, - ostrides, - delsize, oswap, ocopyfunc, - multi->iters[1]->dataptr, - istrides, - selsize, iswap, icopyfunc, - maxdim, buffers, N, - castfunc, out, in); - PyArray_MultiIter_NEXT(multi); - } -#if NPY_ALLOW_THREADS - if (PyArray_ISNUMBER(in) && PyArray_ISNUMBER(out)) { - NPY_END_THREADS - } -#endif - Py_DECREF(multi); - if (PyDataType_REFCHK(in->descr)) { - obptr = buffers[1]; - for (i=0; idescr); - } - if (PyDataType_REFCHK(out->descr)) { - obptr = buffers[0]; - for (i=0; idescr); - } - _pya_free(buffers[0]); - _pya_free(buffers[1]); - if (PyErr_Occurred()) return -1; - return 0; -} - - - -/* Must be broadcastable. - This code is very similar to PyArray_CopyInto/PyArray_MoveInto - except casting is done --- PyArray_BUFSIZE is used - as the size of the casting buffer. -*/ - -/*OBJECT_API - Cast to an already created array. -*/ -static int -PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp) -{ - - int simple; - int same; - PyArray_VectorUnaryFunc *castfunc=NULL; - int mpsize = PyArray_SIZE(mp); - int iswap, oswap; - - NPY_BEGIN_THREADS_DEF - - if (mpsize == 0) return 0; - if (!PyArray_ISWRITEABLE(out)) { - PyErr_SetString(PyExc_ValueError, - "output array is not writeable"); - return -1; - } - - castfunc = PyArray_GetCastFunc(mp->descr, out->descr->type_num); - if (castfunc == NULL) return -1; - - - same = PyArray_SAMESHAPE(out, mp); - simple = same && ((PyArray_ISCARRAY_RO(mp) && PyArray_ISCARRAY(out)) || - (PyArray_ISFARRAY_RO(mp) && PyArray_ISFARRAY(out))); - - if (simple) { - -#if NPY_ALLOW_THREADS - if (PyArray_ISNUMBER(mp) && PyArray_ISNUMBER(out)) { - NPY_BEGIN_THREADS } -#endif - castfunc(mp->data, out->data, mpsize, mp, out); - -#if NPY_ALLOW_THREADS - if (PyArray_ISNUMBER(mp) && PyArray_ISNUMBER(out)) { - NPY_END_THREADS } -#endif - if (!PyArray_ISNUMBER(mp) && PyErr_Occurred()) return -1; - } - - /* If the input or output is OBJECT, STRING, UNICODE, or VOID */ - /* then getitem and setitem are used for the cast */ - /* and byteswapping is handled by those methods */ - - if (PyArray_ISFLEXIBLE(mp) || PyArray_ISOBJECT(mp) || PyArray_ISOBJECT(out) || - PyArray_ISFLEXIBLE(out)) { - iswap = oswap = 0; - } - else { - iswap = PyArray_ISBYTESWAPPED(mp); - oswap = PyArray_ISBYTESWAPPED(out); - } - - return _broadcast_cast(out, mp, castfunc, iswap, oswap); -} - - -static int -_bufferedcast(PyArrayObject *out, PyArrayObject *in, - PyArray_VectorUnaryFunc *castfunc) -{ - char *inbuffer, *bptr, *optr; - char *outbuffer=NULL; - PyArrayIterObject *it_in=NULL, *it_out=NULL; - register intp i, index; - intp ncopies = PyArray_SIZE(out) / PyArray_SIZE(in); - int elsize=in->descr->elsize; - int nels = PyArray_BUFSIZE; - int el; - int inswap, outswap=0; - int obuf=!PyArray_ISCARRAY(out); - int oelsize = out->descr->elsize; - PyArray_CopySwapFunc *in_csn; - PyArray_CopySwapFunc *out_csn; - int retval = -1; - - in_csn = in->descr->f->copyswap; - out_csn = out->descr->f->copyswap; - - /* If the input or output is STRING, UNICODE, or VOID */ - /* then getitem and setitem are used for the cast */ - /* and byteswapping is handled by those methods */ - - inswap = !(PyArray_ISFLEXIBLE(in) || PyArray_ISNOTSWAPPED(in)); - - inbuffer = PyDataMem_NEW(PyArray_BUFSIZE*elsize); - if (inbuffer == NULL) return -1; - if (PyArray_ISOBJECT(in)) - memset(inbuffer, 0, PyArray_BUFSIZE*elsize); - it_in = (PyArrayIterObject *)PyArray_IterNew((PyObject *)in); - if (it_in == NULL) goto exit; - - if (obuf) { - outswap = !(PyArray_ISFLEXIBLE(out) || \ - PyArray_ISNOTSWAPPED(out)); - outbuffer = PyDataMem_NEW(PyArray_BUFSIZE*oelsize); - if (outbuffer == NULL) goto exit; - if (PyArray_ISOBJECT(out)) - memset(outbuffer, 0, PyArray_BUFSIZE*oelsize); - - it_out = (PyArrayIterObject *)PyArray_IterNew((PyObject *)out); - if (it_out == NULL) goto exit; - - nels = MIN(nels, PyArray_BUFSIZE); - } - - optr = (obuf) ? outbuffer: out->data; - bptr = inbuffer; - el = 0; - while(ncopies--) { - index = it_in->size; - PyArray_ITER_RESET(it_in); - while(index--) { - in_csn(bptr, it_in->dataptr, inswap, in); - bptr += elsize; - PyArray_ITER_NEXT(it_in); - el += 1; - if ((el == nels) || (index == 0)) { - /* buffer filled, do cast */ - - castfunc(inbuffer, optr, el, in, out); - - if (obuf) { - /* Copy from outbuffer to array */ - for(i=0; idataptr, - optr, outswap, - out); - optr += oelsize; - PyArray_ITER_NEXT(it_out); - } - optr = outbuffer; - } - else { - optr += out->descr->elsize * nels; - } - el = 0; - bptr = inbuffer; - } - } - } - retval = 0; - exit: - Py_XDECREF(it_in); - PyDataMem_FREE(inbuffer); - PyDataMem_FREE(outbuffer); - if (obuf) { - Py_XDECREF(it_out); - } - return retval; -} - -/*OBJECT_API - Cast to an already created array. Arrays don't have to be "broadcastable" - Only requirement is they have the same number of elements. -*/ -static int -PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) -{ - int simple; - PyArray_VectorUnaryFunc *castfunc=NULL; - int mpsize = PyArray_SIZE(mp); - - if (mpsize == 0) return 0; - if (!PyArray_ISWRITEABLE(out)) { - PyErr_SetString(PyExc_ValueError, - "output array is not writeable"); - return -1; - } - - if (!(mpsize == PyArray_SIZE(out))) { - PyErr_SetString(PyExc_ValueError, - "arrays must have the same number of" - " elements for the cast."); - return -1; - } - - castfunc = PyArray_GetCastFunc(mp->descr, out->descr->type_num); - if (castfunc == NULL) return -1; - - - simple = ((PyArray_ISCARRAY_RO(mp) && PyArray_ISCARRAY(out)) || - (PyArray_ISFARRAY_RO(mp) && PyArray_ISFARRAY(out))); - - if (simple) { - castfunc(mp->data, out->data, mpsize, mp, out); - return 0; - } - - if (PyArray_SAMESHAPE(out, mp)) { - int iswap, oswap; - iswap = PyArray_ISBYTESWAPPED(mp) && !PyArray_ISFLEXIBLE(mp); - oswap = PyArray_ISBYTESWAPPED(out) && !PyArray_ISFLEXIBLE(out); - return _broadcast_cast(out, mp, castfunc, iswap, oswap); - } - - return _bufferedcast(out, mp, castfunc); -} - - - -/* steals reference to newtype --- acc. NULL */ -/*OBJECT_API*/ -static PyObject * -PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) -{ - - PyArrayObject *ret=NULL; - int itemsize; - int copy = 0; - int arrflags; - PyArray_Descr *oldtype; - char *msg = "cannot copy back to a read-only array"; - PyTypeObject *subtype; - - oldtype = PyArray_DESCR(arr); - - subtype = arr->ob_type; - - if (newtype == NULL) {newtype = oldtype; Py_INCREF(oldtype);} - itemsize = newtype->elsize; - if (itemsize == 0) { - PyArray_DESCR_REPLACE(newtype); - if (newtype == NULL) return NULL; - newtype->elsize = oldtype->elsize; - itemsize = newtype->elsize; - } - - /* Can't cast unless ndim-0 array, FORCECAST is specified - or the cast is safe. - */ - if (!(flags & FORCECAST) && !PyArray_NDIM(arr)==0 && - !PyArray_CanCastTo(oldtype, newtype)) { - Py_DECREF(newtype); - PyErr_SetString(PyExc_TypeError, - "array cannot be safely cast " \ - "to required type"); - return NULL; - } - - /* Don't copy if sizes are compatible */ - if ((flags & ENSURECOPY) || PyArray_EquivTypes(oldtype, newtype)) { - arrflags = arr->flags; - - copy = (flags & ENSURECOPY) || \ - ((flags & CONTIGUOUS) && (!(arrflags & CONTIGUOUS))) \ - || ((flags & ALIGNED) && (!(arrflags & ALIGNED))) \ - || (arr->nd > 1 && \ - ((flags & FORTRAN) && (!(arrflags & FORTRAN)))) \ - || ((flags & WRITEABLE) && (!(arrflags & WRITEABLE))); - - if (copy) { - if ((flags & UPDATEIFCOPY) && \ - (!PyArray_ISWRITEABLE(arr))) { - Py_DECREF(newtype); - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - if ((flags & ENSUREARRAY)) { - subtype = &PyArray_Type; - } - ret = (PyArrayObject *) \ - PyArray_NewFromDescr(subtype, newtype, - arr->nd, - arr->dimensions, - NULL, NULL, - flags & FORTRAN, - (PyObject *)arr); - if (ret == NULL) return NULL; - if (PyArray_CopyInto(ret, arr) == -1) - {Py_DECREF(ret); return NULL;} - if (flags & UPDATEIFCOPY) { - ret->flags |= UPDATEIFCOPY; - ret->base = (PyObject *)arr; - PyArray_FLAGS(ret->base) &= ~WRITEABLE; - Py_INCREF(arr); - } - } - /* If no copy then just increase the reference - count and return the input */ - else { - Py_DECREF(newtype); - if ((flags & ENSUREARRAY) && - !PyArray_CheckExact(arr)) { - Py_INCREF(arr->descr); - ret = (PyArrayObject *) \ - PyArray_NewFromDescr(&PyArray_Type, - arr->descr, - arr->nd, - arr->dimensions, - arr->strides, - arr->data, - arr->flags,NULL); - if (ret == NULL) return NULL; - ret->base = (PyObject *)arr; - } - else { - ret = arr; - } - Py_INCREF(arr); - } - } - - /* The desired output type is different than the input - array type and copy was not specified */ - else { - if ((flags & UPDATEIFCOPY) && \ - (!PyArray_ISWRITEABLE(arr))) { - Py_DECREF(newtype); - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - if ((flags & ENSUREARRAY)) { - subtype = &PyArray_Type; - } - ret = (PyArrayObject *) \ - PyArray_NewFromDescr(subtype, newtype, - arr->nd, arr->dimensions, - NULL, NULL, - flags & FORTRAN, - (PyObject *)arr); - if (ret == NULL) return NULL; - if (PyArray_CastTo(ret, arr) < 0) { - Py_DECREF(ret); - return NULL; - } - if (flags & UPDATEIFCOPY) { - ret->flags |= UPDATEIFCOPY; - ret->base = (PyObject *)arr; - PyArray_FLAGS(ret->base) &= ~WRITEABLE; - Py_INCREF(arr); - } - } - return (PyObject *)ret; -} - -/* new reference */ -static PyArray_Descr * -_array_typedescr_fromstr(char *str) -{ - PyArray_Descr *descr; - int type_num; - char typechar; - int size; - char msg[] = "unsupported typestring"; - int swap; - char swapchar; - - swapchar = str[0]; - str += 1; - -#define _MY_FAIL { \ - PyErr_SetString(PyExc_ValueError, msg); \ - return NULL; \ - } - - typechar = str[0]; - size = atoi(str + 1); - switch (typechar) { - case 'b': - if (size == sizeof(Bool)) - type_num = PyArray_BOOL; - else _MY_FAIL - break; - case 'u': - if (size == sizeof(uintp)) - type_num = PyArray_UINTP; - else if (size == sizeof(char)) - type_num = PyArray_UBYTE; - else if (size == sizeof(short)) - type_num = PyArray_USHORT; - else if (size == sizeof(ulong)) - type_num = PyArray_ULONG; - else if (size == sizeof(int)) - type_num = PyArray_UINT; - else if (size == sizeof(ulonglong)) - type_num = PyArray_ULONGLONG; - else _MY_FAIL - break; - case 'i': - if (size == sizeof(intp)) - type_num = PyArray_INTP; - else if (size == sizeof(char)) - type_num = PyArray_BYTE; - else if (size == sizeof(short)) - type_num = PyArray_SHORT; - else if (size == sizeof(long)) - type_num = PyArray_LONG; - else if (size == sizeof(int)) - type_num = PyArray_INT; - else if (size == sizeof(longlong)) - type_num = PyArray_LONGLONG; - else _MY_FAIL - break; - case 'f': - if (size == sizeof(float)) - type_num = PyArray_FLOAT; - else if (size == sizeof(double)) - type_num = PyArray_DOUBLE; - else if (size == sizeof(longdouble)) - type_num = PyArray_LONGDOUBLE; - else _MY_FAIL - break; - case 'c': - if (size == sizeof(float)*2) - type_num = PyArray_CFLOAT; - else if (size == sizeof(double)*2) - type_num = PyArray_CDOUBLE; - else if (size == sizeof(longdouble)*2) - type_num = PyArray_CLONGDOUBLE; - else _MY_FAIL - break; - case 'O': - if (size == sizeof(PyObject *)) - type_num = PyArray_OBJECT; - else _MY_FAIL - break; - case PyArray_STRINGLTR: - type_num = PyArray_STRING; - break; - case PyArray_UNICODELTR: - type_num = PyArray_UNICODE; - size <<= 2; - break; - case 'V': - type_num = PyArray_VOID; - break; - default: - _MY_FAIL - } - -#undef _MY_FAIL - - descr = PyArray_DescrFromType(type_num); - if (descr == NULL) return NULL; - swap = !PyArray_ISNBO(swapchar); - if (descr->elsize == 0 || swap) { - /* Need to make a new PyArray_Descr */ - PyArray_DESCR_REPLACE(descr); - if (descr==NULL) return NULL; - if (descr->elsize == 0) - descr->elsize = size; - if (swap) - descr->byteorder = swapchar; - } - return descr; -} - -/*OBJECT_API */ -static PyObject * -PyArray_FromStructInterface(PyObject *input) -{ - PyArray_Descr *thetype=NULL; - char buf[40]; - PyArrayInterface *inter; - PyObject *attr, *r; - char endian = PyArray_NATBYTE; - - attr = PyObject_GetAttrString(input, "__array_struct__"); - if (attr == NULL) { - PyErr_Clear(); - return Py_NotImplemented; - } - if (!PyCObject_Check(attr)) goto fail; - inter = PyCObject_AsVoidPtr(attr); - if (inter->two != 2) goto fail; - if ((inter->flags & NOTSWAPPED) != NOTSWAPPED) { - endian = PyArray_OPPBYTE; - inter->flags &= ~NOTSWAPPED; - } - - if (inter->flags & ARR_HAS_DESCR) { - if (PyArray_DescrConverter(inter->descr, &thetype) == PY_FAIL) { - thetype = NULL; - PyErr_Clear(); - } - } - - if (thetype == NULL) { - snprintf(buf, 40, "%c%c%d", endian, inter->typekind, inter->itemsize); - if (!(thetype=_array_typedescr_fromstr(buf))) { - Py_DECREF(attr); - return NULL; - } - } - - r = PyArray_NewFromDescr(&PyArray_Type, thetype, - inter->nd, inter->shape, - inter->strides, inter->data, - inter->flags, NULL); - Py_INCREF(input); - PyArray_BASE(r) = input; - Py_DECREF(attr); - PyArray_UpdateFlags((PyArrayObject *)r, UPDATE_ALL); - return r; - - fail: - PyErr_SetString(PyExc_ValueError, "invalid __array_struct__"); - Py_DECREF(attr); - return NULL; -} - -#define PyIntOrLong_Check(obj) (PyInt_Check(obj) || PyLong_Check(obj)) - -/*OBJECT_API*/ -static PyObject * -PyArray_FromInterface(PyObject *input) -{ - PyObject *attr=NULL, *item=NULL; - PyObject *tstr=NULL, *shape=NULL; - PyObject *inter=NULL; - PyObject *base=NULL; - PyArrayObject *ret; - PyArray_Descr *type=NULL; - char *data; - Py_ssize_t buffer_len; - int res, i, n; - intp dims[MAX_DIMS], strides[MAX_DIMS]; - int dataflags = BEHAVED; - - /* Get the memory from __array_data__ and __array_offset__ */ - /* Get the shape */ - /* Get the typestring -- ignore array_descr */ - /* Get the strides */ - - inter = PyObject_GetAttrString(input, "__array_interface__"); - if (inter == NULL) {PyErr_Clear(); return Py_NotImplemented;} - if (!PyDict_Check(inter)) {Py_DECREF(inter); return Py_NotImplemented;} - - shape = PyDict_GetItemString(inter, "shape"); - if (shape == NULL) {Py_DECREF(inter); return Py_NotImplemented;} - tstr = PyDict_GetItemString(inter, "typestr"); - if (tstr == NULL) {Py_DECREF(inter); return Py_NotImplemented;} - - attr = PyDict_GetItemString(inter, "data"); - base = input; - if ((attr == NULL) || (attr==Py_None) || (!PyTuple_Check(attr))) { - if (attr && (attr != Py_None)) item=attr; - else item=input; - res = PyObject_AsWriteBuffer(item, (void **)&data, - &buffer_len); - if (res < 0) { - PyErr_Clear(); - res = PyObject_AsReadBuffer(item, (const void **)&data, - &buffer_len); - if (res < 0) goto fail; - dataflags &= ~WRITEABLE; - } - attr = PyDict_GetItemString(inter, "offset"); - if (attr) { - longlong num = PyLong_AsLongLong(attr); - if (error_converting(num)) { - PyErr_SetString(PyExc_TypeError, - "offset "\ - "must be an integer"); - goto fail; - } - data += num; - } - base = item; - } - else { - PyObject *dataptr; - if (PyTuple_GET_SIZE(attr) != 2) { - PyErr_SetString(PyExc_TypeError, - "data must return " \ - "a 2-tuple with (data pointer "\ - "integer, read-only flag)"); - goto fail; - } - dataptr = PyTuple_GET_ITEM(attr, 0); - if (PyString_Check(dataptr)) { - res = sscanf(PyString_AsString(dataptr), - "%p", (void **)&data); - if (res < 1) { - PyErr_SetString(PyExc_TypeError, - "data string cannot be " \ - "converted"); - goto fail; - } - } - else if (PyIntOrLong_Check(dataptr)) { - data = PyLong_AsVoidPtr(dataptr); - } - else { - PyErr_SetString(PyExc_TypeError, "first element " \ - "of data tuple must be integer" \ - " or string."); - goto fail; - } - if (PyObject_IsTrue(PyTuple_GET_ITEM(attr,1))) { - dataflags &= ~WRITEABLE; - } - } - attr = tstr; - if (!PyString_Check(attr)) { - PyErr_SetString(PyExc_TypeError, "typestr must be a string"); - goto fail; - } - type = _array_typedescr_fromstr(PyString_AS_STRING(attr)); - if (type==NULL) goto fail; - attr = shape; - if (!PyTuple_Check(attr)) { - PyErr_SetString(PyExc_TypeError, "shape must be a tuple"); - Py_DECREF(type); - goto fail; - } - n = PyTuple_GET_SIZE(attr); - for (i=0; ibase = base; - - attr = PyDict_GetItemString(inter, "strides"); - if (attr != NULL && attr != Py_None) { - if (!PyTuple_Check(attr)) { - PyErr_SetString(PyExc_TypeError, - "strides must be a tuple"); - Py_DECREF(ret); - return NULL; - } - if (n != PyTuple_GET_SIZE(attr)) { - PyErr_SetString(PyExc_ValueError, - "mismatch in length of "\ - "strides and shape"); - Py_DECREF(ret); - return NULL; - } - for (i=0; istrides, strides, n*sizeof(intp)); - } - else PyErr_Clear(); - PyArray_UpdateFlags(ret, UPDATE_ALL); - Py_DECREF(inter); - return (PyObject *)ret; - - fail: - Py_XDECREF(inter); - return NULL; -} - -/*OBJECT_API*/ -static PyObject * -PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) -{ - PyObject *new; - PyObject *array_meth; - - array_meth = PyObject_GetAttrString(op, "__array__"); - if (array_meth == NULL) {PyErr_Clear(); return Py_NotImplemented;} - if (context == NULL) { - if (typecode == NULL) new = PyObject_CallFunction(array_meth, - NULL); - else new = PyObject_CallFunction(array_meth, "O", typecode); - } - else { - if (typecode == NULL) { - new = PyObject_CallFunction(array_meth, "OO", Py_None, - context); - if (new == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - new = PyObject_CallFunction(array_meth, ""); - } - } - else { - new = PyObject_CallFunction(array_meth, "OO", - typecode, context); - if (new == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - new = PyObject_CallFunction(array_meth, "O", - typecode); - } - } - } - Py_DECREF(array_meth); - if (new == NULL) return NULL; - if (!PyArray_Check(new)) { - PyErr_SetString(PyExc_ValueError, - "object __array__ method not " \ - "producing an array"); - Py_DECREF(new); - return NULL; - } - return new; -} - -/* Does not check for ENSURECOPY and NOTSWAPPED in flags */ -/* Steals a reference to newtype --- which can be NULL */ -/*OBJECT_API*/ -static PyObject * -PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, - int max_depth, int flags, PyObject *context) -{ - /* This is the main code to make a NumPy array from a Python - Object. It is called from lot's of different places which - is why there are so many checks. The comments try to - explain some of the checks. */ - - PyObject *r=NULL; - int seq = FALSE; - - /* Is input object already an array? */ - /* This is where the flags are used */ - if (PyArray_Check(op)) - r = PyArray_FromArray((PyArrayObject *)op, newtype, flags); - else if (PyArray_IsScalar(op, Generic)) { - if (flags & UPDATEIFCOPY) goto err; - r = PyArray_FromScalar(op, newtype); - } else if (newtype == NULL && - (newtype = _array_find_python_scalar_type(op))) { - if (flags & UPDATEIFCOPY) goto err; - r = Array_FromPyScalar(op, newtype); - } - else if (PyArray_HasArrayInterfaceType(op, newtype, context, r)) { - PyObject *new; - if (r == NULL) {Py_XDECREF(newtype); return NULL;} - if (newtype != NULL || flags != 0) { - new = PyArray_FromArray((PyArrayObject *)r, newtype, - flags); - Py_DECREF(r); - r = new; - } - } - else { - int isobject=0; - if (flags & UPDATEIFCOPY) goto err; - if (newtype == NULL) { - newtype = _array_find_type(op, NULL, MAX_DIMS); - } - else if (newtype->type_num == PyArray_OBJECT) { - isobject = 1; - } - if (PySequence_Check(op)) { - PyObject *thiserr=NULL; - /* necessary but not sufficient */ - Py_INCREF(newtype); - r = Array_FromSequence(op, newtype, flags & FORTRAN, - min_depth, max_depth); - if (r == NULL && (thiserr=PyErr_Occurred())) { - if (PyErr_GivenExceptionMatches(thiserr, - PyExc_MemoryError)) - return NULL; - /* If object was explicitly requested, - then try nested list object array creation - */ - PyErr_Clear(); - if (isobject) { - Py_INCREF(newtype); - r = ObjectArray_FromNestedList \ - (op, newtype, flags & FORTRAN); - seq = TRUE; - Py_DECREF(newtype); - } - } - else { - seq = TRUE; - Py_DECREF(newtype); - } - } - if (!seq) - r = Array_FromPyScalar(op, newtype); - } - - /* If we didn't succeed return NULL */ - if (r == NULL) return NULL; - - /* Be sure we succeed here */ - - if(!PyArray_Check(r)) { - PyErr_SetString(PyExc_RuntimeError, - "internal error: PyArray_FromAny "\ - "not producing an array"); - Py_DECREF(r); - return NULL; - } - - if (min_depth != 0 && ((PyArrayObject *)r)->nd < min_depth) { - PyErr_SetString(PyExc_ValueError, - "object of too small depth for desired array"); - Py_DECREF(r); - return NULL; - } - if (max_depth != 0 && ((PyArrayObject *)r)->nd > max_depth) { - PyErr_SetString(PyExc_ValueError, - "object too deep for desired array"); - Py_DECREF(r); - return NULL; - } - return r; - - err: - Py_XDECREF(newtype); - PyErr_SetString(PyExc_TypeError, - "UPDATEIFCOPY used for non-array input."); - return NULL; -} - -/* new reference -- accepts NULL for mintype*/ -/*OBJECT_API*/ -static PyArray_Descr * -PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) -{ - return _array_find_type(op, mintype, MAX_DIMS); -} - -/*OBJECT_API - Return the typecode of the array a Python object would be converted - to -*/ -static int -PyArray_ObjectType(PyObject *op, int minimum_type) -{ - PyArray_Descr *intype; - PyArray_Descr *outtype; - int ret; - - intype = PyArray_DescrFromType(minimum_type); - if (intype == NULL) PyErr_Clear(); - outtype = _array_find_type(op, intype, MAX_DIMS); - ret = outtype->type_num; - Py_DECREF(outtype); - Py_XDECREF(intype); - return ret; -} - - -/* flags is any of - CONTIGUOUS, - FORTRAN, - ALIGNED, - WRITEABLE, - NOTSWAPPED, - ENSURECOPY, - UPDATEIFCOPY, - FORCECAST, - ENSUREARRAY, - ELEMENTSTRIDES - - or'd (|) together - - Any of these flags present means that the returned array should - guarantee that aspect of the array. Otherwise the returned array - won't guarantee it -- it will depend on the object as to whether or - not it has such features. - - Note that ENSURECOPY is enough - to guarantee CONTIGUOUS, ALIGNED and WRITEABLE - and therefore it is redundant to include those as well. - - BEHAVED == ALIGNED | WRITEABLE - CARRAY = CONTIGUOUS | BEHAVED - FARRAY = FORTRAN | BEHAVED - - FORTRAN can be set in the FLAGS to request a FORTRAN array. - Fortran arrays are always behaved (aligned, - notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). - - UPDATEIFCOPY flag sets this flag in the returned array if a copy is - made and the base argument points to the (possibly) misbehaved array. - When the new array is deallocated, the original array held in base - is updated with the contents of the new array. - - FORCECAST will cause a cast to occur regardless of whether or not - it is safe. -*/ - - -/* steals a reference to descr -- accepts NULL */ -/*OBJECT_API*/ -static PyObject * -PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context) -{ - PyObject *obj; - if (requires & NOTSWAPPED) { - if (!descr && PyArray_Check(op) && \ - !PyArray_ISNBO(PyArray_DESCR(op)->byteorder)) { - descr = PyArray_DescrNew(PyArray_DESCR(op)); - } - else if (descr && !PyArray_ISNBO(descr->byteorder)) { - PyArray_DESCR_REPLACE(descr); - } - if (descr) { - descr->byteorder = PyArray_NATIVE; - } - } - - obj = PyArray_FromAny(op, descr, min_depth, max_depth, - requires, context); - if (obj == NULL) return NULL; - if ((requires & ELEMENTSTRIDES) && - !PyArray_ElementStrides(obj)) { - PyObject *new; - new = PyArray_NewCopy((PyArrayObject *)obj, PyArray_ANYORDER); - Py_DECREF(obj); - obj = new; - } - return obj; -} - -/* This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0, - ENSUREARRAY) */ -/* that special cases Arrays and PyArray_Scalars up front */ -/* It *steals a reference* to the object */ -/* It also guarantees that the result is PyArray_Type */ - -/* Because it decrefs op if any conversion needs to take place - so it can be used like PyArray_EnsureArray(some_function(...)) */ - -/*OBJECT_API*/ -static PyObject * -PyArray_EnsureArray(PyObject *op) -{ - PyObject *new; - - if (op == NULL) return NULL; - - if (PyArray_CheckExact(op)) return op; - - if (PyArray_Check(op)) { - new = PyArray_View((PyArrayObject *)op, NULL, &PyArray_Type); - Py_DECREF(op); - return new; - } - if (PyArray_IsScalar(op, Generic)) { - new = PyArray_FromScalar(op, NULL); - Py_DECREF(op); - return new; - } - new = PyArray_FromAny(op, NULL, 0, 0, ENSUREARRAY, NULL); - Py_DECREF(op); - return new; -} - -/*OBJECT_API*/ -static PyObject * -PyArray_EnsureAnyArray(PyObject *op) -{ - if (op && PyArray_Check(op)) return op; - return PyArray_EnsureArray(op); -} - -/*OBJECT_API - Check the type coercion rules. -*/ -static int -PyArray_CanCastSafely(int fromtype, int totype) -{ - PyArray_Descr *from, *to; - register int felsize, telsize; - - if (fromtype == totype) return 1; - if (fromtype == PyArray_BOOL) return 1; - if (totype == PyArray_BOOL) return 0; - if (totype == PyArray_OBJECT || totype == PyArray_VOID) return 1; - if (fromtype == PyArray_OBJECT || fromtype == PyArray_VOID) return 0; - - from = PyArray_DescrFromType(fromtype); - /* cancastto is a PyArray_NOTYPE terminated C-int-array of types that - the data-type can be cast to safely. - */ - if (from->f->cancastto) { - int *curtype; - curtype = from->f->cancastto; - while (*curtype != PyArray_NOTYPE) { - if (*curtype++ == totype) return 1; - } - } - if (PyTypeNum_ISUSERDEF(totype)) return 0; - - to = PyArray_DescrFromType(totype); - telsize = to->elsize; - felsize = from->elsize; - Py_DECREF(from); - Py_DECREF(to); - - switch(fromtype) { - case PyArray_BYTE: - case PyArray_SHORT: - case PyArray_INT: - case PyArray_LONG: - case PyArray_LONGLONG: - if (PyTypeNum_ISINTEGER(totype)) { - if (PyTypeNum_ISUNSIGNED(totype)) { - return 0; - } - else { - return (telsize >= felsize); - } - } - else if (PyTypeNum_ISFLOAT(totype)) { - if (felsize < 8) - return (telsize > felsize); - else - return (telsize >= felsize); - } - else if (PyTypeNum_ISCOMPLEX(totype)) { - if (felsize < 8) - return ((telsize >> 1) > felsize); - else - return ((telsize >> 1) >= felsize); - } - else return totype > fromtype; - case PyArray_UBYTE: - case PyArray_USHORT: - case PyArray_UINT: - case PyArray_ULONG: - case PyArray_ULONGLONG: - if (PyTypeNum_ISINTEGER(totype)) { - if (PyTypeNum_ISSIGNED(totype)) { - return (telsize > felsize); - } - else { - return (telsize >= felsize); - } - } - else if (PyTypeNum_ISFLOAT(totype)) { - if (felsize < 8) - return (telsize > felsize); - else - return (telsize >= felsize); - } - else if (PyTypeNum_ISCOMPLEX(totype)) { - if (felsize < 8) - return ((telsize >> 1) > felsize); - else - return ((telsize >> 1) >= felsize); - } - else return totype > fromtype; - case PyArray_FLOAT: - case PyArray_DOUBLE: - case PyArray_LONGDOUBLE: - if (PyTypeNum_ISCOMPLEX(totype)) - return ((telsize >> 1) >= felsize); - else - return (totype > fromtype); - case PyArray_CFLOAT: - case PyArray_CDOUBLE: - case PyArray_CLONGDOUBLE: - return (totype > fromtype); - case PyArray_STRING: - case PyArray_UNICODE: - return (totype > fromtype); - default: - return 0; - } -} - -/* leaves reference count alone --- cannot be NULL*/ -/*OBJECT_API*/ -static Bool -PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) -{ - int fromtype=from->type_num; - int totype=to->type_num; - Bool ret; - - ret = (Bool) PyArray_CanCastSafely(fromtype, totype); - if (ret) { /* Check String and Unicode more closely */ - if (fromtype == PyArray_STRING) { - if (totype == PyArray_STRING) { - ret = (from->elsize <= to->elsize); - } - else if (totype == PyArray_UNICODE) { - ret = (from->elsize << 2 \ - <= to->elsize); - } - } - else if (fromtype == PyArray_UNICODE) { - if (totype == PyArray_UNICODE) { - ret = (from->elsize <= to->elsize); - } - } - /* TODO: If totype is STRING or unicode - see if the length is long enough to hold the - stringified value of the object. - */ - } - return ret; -} - -/*OBJECT_API - See if array scalars can be cast. -*/ -static Bool -PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) -{ - int fromtype; - int totype; - - fromtype = _typenum_fromtypeobj((PyObject *)from, 0); - totype = _typenum_fromtypeobj((PyObject *)to, 0); - if (fromtype == PyArray_NOTYPE || totype == PyArray_NOTYPE) - return FALSE; - return (Bool) PyArray_CanCastSafely(fromtype, totype); -} - - -/*********************** Element-wise Array Iterator ***********************/ -/* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/ -/* and Python's array iterator ***/ - -/*OBJECT_API - Get Iterator. -*/ -static PyObject * -PyArray_IterNew(PyObject *obj) -{ - PyArrayIterObject *it; - int i, nd; - PyArrayObject *ao = (PyArrayObject *)obj; - - if (!PyArray_Check(ao)) { - PyErr_BadInternalCall(); - return NULL; - } - - it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); - PyObject_Init((PyObject *)it, &PyArrayIter_Type); - /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/ - if (it == NULL) - return NULL; - - nd = ao->nd; - PyArray_UpdateFlags(ao, CONTIGUOUS); - if PyArray_ISCONTIGUOUS(ao) it->contiguous = 1; - else it->contiguous = 0; - Py_INCREF(ao); - it->ao = ao; - it->size = PyArray_SIZE(ao); - it->nd_m1 = nd - 1; - it->factors[nd-1] = 1; - for (i=0; i < nd; i++) { - it->dims_m1[i] = ao->dimensions[i] - 1; - it->strides[i] = ao->strides[i]; - it->backstrides[i] = it->strides[i] * \ - it->dims_m1[i]; - if (i > 0) - it->factors[nd-i-1] = it->factors[nd-i] * \ - ao->dimensions[nd-i]; - } - PyArray_ITER_RESET(it); - - return (PyObject *)it; -} - -/*MULTIARRAY_API - Get Iterator broadcast to a particular shape -*/ -static PyObject * -PyArray_BroadcastToShape(PyObject *obj, intp *dims, int nd) -{ - PyArrayIterObject *it; - int i, diff, j, compat, k; - PyArrayObject *ao = (PyArrayObject *)obj; - - if (ao->nd > nd) goto err; - compat = 1; - diff = j = nd - ao->nd; - for (i=0; ind; i++, j++) { - if (ao->dimensions[i] == 1) continue; - if (ao->dimensions[i] != dims[j]) { - compat = 0; - break; - } - } - if (!compat) goto err; - - it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); - PyObject_Init((PyObject *)it, &PyArrayIter_Type); - - if (it == NULL) - return NULL; - - PyArray_UpdateFlags(ao, CONTIGUOUS); - if PyArray_ISCONTIGUOUS(ao) it->contiguous = 1; - else it->contiguous = 0; - Py_INCREF(ao); - it->ao = ao; - it->size = PyArray_MultiplyList(dims, nd); - it->nd_m1 = nd - 1; - it->factors[nd-1] = 1; - for (i=0; i < nd; i++) { - it->dims_m1[i] = dims[i] - 1; - k = i - diff; - if ((k < 0) || - ao->dimensions[k] != dims[i]) { - it->contiguous = 0; - it->strides[i] = 0; - } - else { - it->strides[i] = ao->strides[k]; - } - it->backstrides[i] = it->strides[i] * \ - it->dims_m1[i]; - if (i > 0) - it->factors[nd-i-1] = it->factors[nd-i] * \ - dims[nd-i]; - } - PyArray_ITER_RESET(it); - - return (PyObject *)it; - - err: - PyErr_SetString(PyExc_ValueError, "array is not broadcastable to "\ - "correct shape"); - return NULL; -} - - - - - -/*OBJECT_API - Get Iterator that iterates over all but one axis (don't use this with - PyArray_ITER_GOTO1D). The axis will be over-written if negative - with the axis having the smallest stride. -*/ -static PyObject * -PyArray_IterAllButAxis(PyObject *obj, int *inaxis) -{ - PyArrayIterObject *it; - int axis; - it = (PyArrayIterObject *)PyArray_IterNew(obj); - if (it == NULL) return NULL; - - if (PyArray_NDIM(obj)==0) - return (PyObject *)it; - if (*inaxis < 0) { - int i, minaxis=0; - intp minstride=0; - i = 0; - while (minstride==0 && i 0 && - PyArray_STRIDE(obj, i) < minstride) { - minaxis = i; - minstride = PyArray_STRIDE(obj,i); - } - } - *inaxis = minaxis; - } - axis = *inaxis; - /* adjust so that will not iterate over axis */ - it->contiguous = 0; - if (it->size != 0) { - it->size /= PyArray_DIM(obj,axis); - } - it->dims_m1[axis] = 0; - it->backstrides[axis] = 0; - - /* (won't fix factors so don't use - PyArray_ITER_GOTO1D with this iterator) */ - return (PyObject *)it; -} - - -/* don't use with PyArray_ITER_GOTO1D because factors are not - adjusted */ - -/*OBJECT_API - Adjusts previously broadcasted iterators so that the axis with - the smallest sum of iterator strides is not iterated over. - Returns dimension which is smallest in the range [0,multi->nd). - A -1 is returned if multi->nd == 0. -*/ -static int -PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) -{ - PyArrayIterObject *it; - int i, j; - int axis; - intp smallest; - intp sumstrides[NPY_MAXDIMS]; - - if (multi->nd == 0) return -1; - - - for (i=0; ind; i++) { - sumstrides[i] = 0; - for (j=0; jnumiter; j++) { - sumstrides[i] += multi->iters[j]->strides[i]; - } - } - axis=0; - smallest = sumstrides[0]; - /* Find longest dimension */ - for (i=1; ind; i++) { - if (sumstrides[i] < smallest) { - axis = i; - smallest = sumstrides[i]; - } - } - - for (i=0; inumiter; i++) { - it = multi->iters[i]; - it->contiguous = 0; - if (it->size != 0) - it->size /= (it->dims_m1[axis]+1); - it->dims_m1[axis] = 0; - it->backstrides[axis] = 0; - } - - multi->size = multi->iters[0]->size; - return axis; -} - -/* Returns an array scalar holding the element desired */ - -static PyObject * -arrayiter_next(PyArrayIterObject *it) -{ - PyObject *ret; - - if (it->index < it->size) { - ret = PyArray_ToScalar(it->dataptr, it->ao); - PyArray_ITER_NEXT(it); - return ret; - } - return NULL; -} - -static void -arrayiter_dealloc(PyArrayIterObject *it) -{ - Py_XDECREF(it->ao); - _pya_free(it); -} - -static Py_ssize_t -iter_length(PyArrayIterObject *self) -{ - return self->size; -} - - -static PyObject * -iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind) -{ - int index, strides, itemsize; - intp count=0; - char *dptr, *optr; - PyObject *r; - int swap; - PyArray_CopySwapFunc *copyswap; - - - if (ind->nd != 1) { - PyErr_SetString(PyExc_ValueError, - "boolean index array should have 1 dimension"); - return NULL; - } - index = ind->dimensions[0]; - if (index > self->size) { - PyErr_SetString(PyExc_ValueError, - "too many boolean indices"); - return NULL; - } - - strides = ind->strides[0]; - dptr = ind->data; - /* Get size of return array */ - while(index--) { - if (*((Bool *)dptr) != 0) - count++; - dptr += strides; - } - itemsize = self->ao->descr->elsize; - Py_INCREF(self->ao->descr); - r = PyArray_NewFromDescr(self->ao->ob_type, - self->ao->descr, 1, &count, - NULL, NULL, - 0, (PyObject *)self->ao); - if (r==NULL) return NULL; - - /* Set up loop */ - optr = PyArray_DATA(r); - index = ind->dimensions[0]; - dptr = ind->data; - - copyswap = self->ao->descr->f->copyswap; - /* Loop over Boolean array */ - swap = (PyArray_ISNOTSWAPPED(self->ao) != PyArray_ISNOTSWAPPED(r)); - while(index--) { - if (*((Bool *)dptr) != 0) { - copyswap(optr, self->dataptr, swap, self->ao); - optr += itemsize; - } - dptr += strides; - PyArray_ITER_NEXT(self); - } - PyArray_ITER_RESET(self); - return r; -} - -static PyObject * -iter_subscript_int(PyArrayIterObject *self, PyArrayObject *ind) -{ - intp num; - PyObject *r; - PyArrayIterObject *ind_it; - int itemsize; - int swap; - char *optr; - int index; - PyArray_CopySwapFunc *copyswap; - - itemsize = self->ao->descr->elsize; - if (ind->nd == 0) { - num = *((intp *)ind->data); - if (num < 0) num += self->size; - if (num < 0 || num >= self->size) { - PyErr_Format(PyExc_IndexError, - "index %d out of bounds" \ - " 0<=index<%d", (int) num, - (int) self->size); - r = NULL; - } - else { - PyArray_ITER_GOTO1D(self, num); - r = PyArray_ToScalar(self->dataptr, self->ao); - } - PyArray_ITER_RESET(self); - return r; - } - - Py_INCREF(self->ao->descr); - r = PyArray_NewFromDescr(self->ao->ob_type, self->ao->descr, - ind->nd, ind->dimensions, - NULL, NULL, - 0, (PyObject *)self->ao); - if (r==NULL) return NULL; - - optr = PyArray_DATA(r); - ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind); - if (ind_it == NULL) {Py_DECREF(r); return NULL;} - index = ind_it->size; - copyswap = PyArray_DESCR(r)->f->copyswap; - swap = (PyArray_ISNOTSWAPPED(r) != PyArray_ISNOTSWAPPED(self->ao)); - while(index--) { - num = *((intp *)(ind_it->dataptr)); - if (num < 0) num += self->size; - if (num < 0 || num >= self->size) { - PyErr_Format(PyExc_IndexError, - "index %d out of bounds" \ - " 0<=index<%d", (int) num, - (int) self->size); - Py_DECREF(ind_it); - Py_DECREF(r); - PyArray_ITER_RESET(self); - return NULL; - } - PyArray_ITER_GOTO1D(self, num); - copyswap(optr, self->dataptr, swap, r); - optr += itemsize; - PyArray_ITER_NEXT(ind_it); - } - Py_DECREF(ind_it); - PyArray_ITER_RESET(self); - return r; -} - - -static PyObject * -iter_subscript(PyArrayIterObject *self, PyObject *ind) -{ - PyArray_Descr *indtype=NULL; - intp start, step_size; - intp n_steps; - PyObject *r; - char *dptr; - int size; - PyObject *obj = NULL; - int swap; - PyArray_CopySwapFunc *copyswap; - - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - obj = iter_subscript(self, ind); - Py_DECREF(ind); - return obj; - } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) goto fail; - if (len == 0) { - Py_INCREF(self->ao); - return (PyObject *)self->ao; - } - ind = PyTuple_GET_ITEM(ind, 0); - } - - /* Tuples >1d not accepted --- i.e. no newaxis */ - /* Could implement this with adjusted strides - and dimensions in iterator */ - - /* Check for Boolean -- this is first becasue - Bool is a subclass of Int */ - PyArray_ITER_RESET(self); - - if (PyBool_Check(ind)) { - if (PyObject_IsTrue(ind)) { - return PyArray_ToScalar(self->dataptr, self->ao); - } - else { /* empty array */ - intp ii = 0; - Py_INCREF(self->ao->descr); - r = PyArray_NewFromDescr(self->ao->ob_type, - self->ao->descr, - 1, &ii, - NULL, NULL, 0, - (PyObject *)self->ao); - return r; - } - } - - /* Check for Integer or Slice */ - - if (PyLong_Check(ind) || PyInt_Check(ind) || PySlice_Check(ind)) { - start = parse_subindex(ind, &step_size, &n_steps, - self->size); - if (start == -1) - goto fail; - if (n_steps == RubberIndex || n_steps == PseudoIndex) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); - goto fail; - } - PyArray_ITER_GOTO1D(self, start) - if (n_steps == SingleIndex) { /* Integer */ - r = PyArray_ToScalar(self->dataptr, self->ao); - PyArray_ITER_RESET(self); - return r; - } - size = self->ao->descr->elsize; - Py_INCREF(self->ao->descr); - r = PyArray_NewFromDescr(self->ao->ob_type, - self->ao->descr, - 1, &n_steps, - NULL, NULL, - 0, (PyObject *)self->ao); - if (r==NULL) goto fail; - dptr = PyArray_DATA(r); - swap = !PyArray_ISNOTSWAPPED(self->ao); - copyswap = PyArray_DESCR(r)->f->copyswap; - while(n_steps--) { - copyswap(dptr, self->dataptr, swap, r); - start += step_size; - PyArray_ITER_GOTO1D(self, start) - dptr += size; - } - PyArray_ITER_RESET(self); - return r; - } - - /* convert to INTP array if Integer array scalar or List */ - - indtype = PyArray_DescrFromType(PyArray_INTP); - if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, FORCECAST, NULL); - if (obj == NULL) goto fail; - } - else { - Py_INCREF(ind); - obj = ind; - } - - if (PyArray_Check(obj)) { - /* Check for Boolean object */ - if (PyArray_TYPE(obj)==PyArray_BOOL) { - r = iter_subscript_Bool(self, (PyArrayObject *)obj); - Py_DECREF(indtype); - } - /* Check for integer array */ - else if (PyArray_ISINTEGER(obj)) { - PyObject *new; - new = PyArray_FromAny(obj, indtype, 0, 0, - FORCECAST | ALIGNED, NULL); - if (new==NULL) goto fail; - Py_DECREF(obj); - obj = new; - r = iter_subscript_int(self, (PyArrayObject *)obj); - } - else { - goto fail; - } - Py_DECREF(obj); - return r; - } - else Py_DECREF(indtype); - - - fail: - if (!PyErr_Occurred()) - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); - Py_XDECREF(indtype); - Py_XDECREF(obj); - return NULL; - -} - - -static int -iter_ass_sub_Bool(PyArrayIterObject *self, PyArrayObject *ind, - PyArrayIterObject *val, int swap) -{ - int index, strides; - char *dptr; - PyArray_CopySwapFunc *copyswap; - - if (ind->nd != 1) { - PyErr_SetString(PyExc_ValueError, - "boolean index array should have 1 dimension"); - return -1; - } - - index = ind->dimensions[0]; - if (index > self->size) { - PyErr_SetString(PyExc_ValueError, - "boolean index array has too many values"); - return -1; - } - - strides = ind->strides[0]; - dptr = ind->data; - PyArray_ITER_RESET(self); - /* Loop over Boolean array */ - copyswap = self->ao->descr->f->copyswap; - while(index--) { - if (*((Bool *)dptr) != 0) { - copyswap(self->dataptr, val->dataptr, swap, self->ao); - PyArray_ITER_NEXT(val); - if (val->index==val->size) - PyArray_ITER_RESET(val); - } - dptr += strides; - PyArray_ITER_NEXT(self); - } - PyArray_ITER_RESET(self); - return 0; -} - -static int -iter_ass_sub_int(PyArrayIterObject *self, PyArrayObject *ind, - PyArrayIterObject *val, int swap) -{ - PyArray_Descr *typecode; - intp num; - PyArrayIterObject *ind_it; - int index; - PyArray_CopySwapFunc *copyswap; - - typecode = self->ao->descr; - copyswap = self->ao->descr->f->copyswap; - if (ind->nd == 0) { - num = *((intp *)ind->data); - PyArray_ITER_GOTO1D(self, num); - copyswap(self->dataptr, val->dataptr, swap, self->ao); - return 0; - } - ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind); - if (ind_it == NULL) return -1; - index = ind_it->size; - while(index--) { - num = *((intp *)(ind_it->dataptr)); - if (num < 0) num += self->size; - if ((num < 0) || (num >= self->size)) { - PyErr_Format(PyExc_IndexError, - "index %d out of bounds" \ - " 0<=index<%d", (int) num, - (int) self->size); - Py_DECREF(ind_it); - return -1; - } - PyArray_ITER_GOTO1D(self, num); - copyswap(self->dataptr, val->dataptr, swap, self->ao); - PyArray_ITER_NEXT(ind_it); - PyArray_ITER_NEXT(val); - if (val->index == val->size) - PyArray_ITER_RESET(val); - } - Py_DECREF(ind_it); - return 0; -} - -static int -iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) -{ - PyObject *arrval=NULL; - PyArrayIterObject *val_it=NULL; - PyArray_Descr *type; - PyArray_Descr *indtype=NULL; - int swap, retval=-1; - intp start, step_size; - intp n_steps; - PyObject *obj=NULL; - PyArray_CopySwapFunc *copyswap; - - - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - retval = iter_ass_subscript(self, ind, val); - Py_DECREF(ind); - return retval; - } - - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) goto finish; - ind = PyTuple_GET_ITEM(ind, 0); - } - - type = self->ao->descr; - - /* Check for Boolean -- this is first becasue - Bool is a subclass of Int */ - - if (PyBool_Check(ind)) { - retval = 0; - if (PyObject_IsTrue(ind)) { - retval = type->f->setitem(val, self->dataptr, self->ao); - } - goto finish; - } - - if (PySequence_Check(ind) || PySlice_Check(ind)) goto skip; - start = PyArray_PyIntAsIntp(ind); - if (start==-1 && PyErr_Occurred()) PyErr_Clear(); - else { - if (start < -self->size || start >= self->size) { - PyErr_Format(PyExc_ValueError, - "index (%" NPY_INTP_FMT \ - ") out of range", start); - goto finish; - } - retval = 0; - PyArray_ITER_GOTO1D(self, start); - retval = type->f->setitem(val, self->dataptr, self->ao); - PyArray_ITER_RESET(self); - if (retval < 0) { - PyErr_SetString(PyExc_ValueError, - "Error setting single item of array."); - } - goto finish; - } - - skip: - Py_INCREF(type); - arrval = PyArray_FromAny(val, type, 0, 0, 0, NULL); - if (arrval==NULL) return -1; - val_it = (PyArrayIterObject *)PyArray_IterNew(arrval); - if (val_it==NULL) goto finish; - if (val_it->size == 0) {retval = 0; goto finish;} - - copyswap = PyArray_DESCR(arrval)->f->copyswap; - swap = (PyArray_ISNOTSWAPPED(self->ao)!=PyArray_ISNOTSWAPPED(arrval)); - - /* Check Slice */ - - if (PySlice_Check(ind)) { - start = parse_subindex(ind, &step_size, &n_steps, - self->size); - if (start == -1) goto finish; - if (n_steps == RubberIndex || n_steps == PseudoIndex) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); - goto finish; - } - PyArray_ITER_GOTO1D(self, start); - if (n_steps == SingleIndex) { /* Integer */ - copyswap(self->dataptr, PyArray_DATA(arrval), - swap, arrval); - PyArray_ITER_RESET(self); - retval=0; - goto finish; - } - while(n_steps--) { - copyswap(self->dataptr, val_it->dataptr, - swap, arrval); - start += step_size; - PyArray_ITER_GOTO1D(self, start) - PyArray_ITER_NEXT(val_it); - if (val_it->index == val_it->size) - PyArray_ITER_RESET(val_it); - } - PyArray_ITER_RESET(self); - retval = 0; - goto finish; - } - - /* convert to INTP array if Integer array scalar or List */ - - indtype = PyArray_DescrFromType(PyArray_INTP); - if (PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, FORCECAST, NULL); - } - else { - Py_INCREF(ind); - obj = ind; - } - - if (obj != NULL && PyArray_Check(obj)) { - /* Check for Boolean object */ - if (PyArray_TYPE(obj)==PyArray_BOOL) { - if (iter_ass_sub_Bool(self, (PyArrayObject *)obj, - val_it, swap) < 0) - goto finish; - retval=0; - } - /* Check for integer array */ - else if (PyArray_ISINTEGER(obj)) { - PyObject *new; - Py_INCREF(indtype); - new = PyArray_CheckFromAny(obj, indtype, 0, 0, - FORCECAST | BEHAVED_NS, NULL); - Py_DECREF(obj); - obj = new; - if (new==NULL) goto finish; - if (iter_ass_sub_int(self, (PyArrayObject *)obj, - val_it, swap) < 0) - goto finish; - retval=0; - } - } - - finish: - if (!PyErr_Occurred() && retval < 0) - PyErr_SetString(PyExc_IndexError, - "unsupported iterator index"); - Py_XDECREF(indtype); - Py_XDECREF(obj); - Py_XDECREF(val_it); - Py_XDECREF(arrval); - return retval; - -} - - -static PyMappingMethods iter_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)iter_length, /*mp_length*/ -#else - (inquiry)iter_length, /*mp_length*/ -#endif - (binaryfunc)iter_subscript, /*mp_subscript*/ - (objobjargproc)iter_ass_subscript, /*mp_ass_subscript*/ -}; - - - -static PyObject * -iter_array(PyArrayIterObject *it, PyObject *op) -{ - - PyObject *r; - intp size; - - /* Any argument ignored */ - - /* Two options: - 1) underlying array is contiguous - -- return 1-d wrapper around it - 2) underlying array is not contiguous - -- make new 1-d contiguous array with updateifcopy flag set - to copy back to the old array - */ - - size = PyArray_SIZE(it->ao); - Py_INCREF(it->ao->descr); - if (PyArray_ISCONTIGUOUS(it->ao)) { - r = PyArray_NewFromDescr(&PyArray_Type, - it->ao->descr, - 1, &size, - NULL, it->ao->data, - it->ao->flags, - (PyObject *)it->ao); - if (r==NULL) return NULL; - } - else { - r = PyArray_NewFromDescr(&PyArray_Type, - it->ao->descr, - 1, &size, - NULL, NULL, - 0, (PyObject *)it->ao); - if (r==NULL) return NULL; - if (_flat_copyinto(r, (PyObject *)it->ao, - PyArray_CORDER) < 0) { - Py_DECREF(r); - return NULL; - } - PyArray_FLAGS(r) |= UPDATEIFCOPY; - it->ao->flags &= ~WRITEABLE; - } - Py_INCREF(it->ao); - PyArray_BASE(r) = (PyObject *)it->ao; - return r; - -} - -static PyObject * -iter_copy(PyArrayIterObject *it, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) return NULL; - return PyArray_Flatten(it->ao, 0); -} - -static PyMethodDef iter_methods[] = { - /* to get array */ - {"__array__", (PyCFunction)iter_array, 1, NULL}, - {"copy", (PyCFunction)iter_copy, 1, NULL}, - {NULL, NULL} /* sentinel */ -}; - -static PyObject * -iter_richcompare(PyArrayIterObject *self, PyObject *other, int cmp_op) -{ - PyArrayObject *new; - PyObject *ret; - new = (PyArrayObject *)iter_array(self, NULL); - if (new == NULL) return NULL; - ret = array_richcompare(new, other, cmp_op); - Py_DECREF(new); - return ret; -} - - -static PyMemberDef iter_members[] = { - {"base", T_OBJECT, offsetof(PyArrayIterObject, ao), RO, NULL}, - {"index", T_INT, offsetof(PyArrayIterObject, index), RO, NULL}, - {NULL}, -}; - -static PyObject * -iter_coords_get(PyArrayIterObject *self) -{ - int nd; - nd = self->ao->nd; - if (self->contiguous) { /* coordinates not kept track of --- need to generate - from index */ - intp val; - int i; - val = self->index; - for (i=0;icoordinates[i] = val / self->factors[i]; - val = val % self->factors[i]; - } - } - return PyArray_IntTupleFromIntp(nd, self->coordinates); -} - -static PyGetSetDef iter_getsets[] = { - {"coords", - (getter)iter_coords_get, - NULL, - NULL}, - {NULL, NULL, NULL, NULL}, -}; - -static PyTypeObject PyArrayIter_Type = { - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.flatiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)arrayiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - &iter_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)iter_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arrayiter_next, /* tp_iternext */ - iter_methods, /* tp_methods */ - iter_members, /* tp_members */ - iter_getsets, /* tp_getset */ - -}; - -/** END of Array Iterator **/ - - - -/*********************** Subscript Array Iterator ************************* - * * - * This object handles subscript behavior for array objects. * - * It is an iterator object with a next method * - * It abstracts the n-dimensional mapping behavior to make the looping * - * code more understandable (maybe) * - * and so that indexing can be set up ahead of time * - */ - - -static int _nonzero_indices(PyObject *myBool, PyArrayIterObject **iters); -/* convert an indexing object to an INTP indexing array iterator - if possible -- otherwise, it is a Slice or Ellipsis object - and has to be interpreted on bind to a particular - array so leave it NULL for now. -*/ -static int -_convert_obj(PyObject *obj, PyArrayIterObject **iter) -{ - PyArray_Descr *indtype; - PyObject *arr; - - if (PySlice_Check(obj) || (obj == Py_Ellipsis)) - return 0; - else if (PyArray_Check(obj) && PyArray_ISBOOL(obj)) { - return _nonzero_indices(obj, iter); - } - else { - indtype = PyArray_DescrFromType(PyArray_INTP); - arr = PyArray_FromAny(obj, indtype, 0, 0, FORCECAST, NULL); - if (arr == NULL) return -1; - *iter = (PyArrayIterObject *)PyArray_IterNew(arr); - Py_DECREF(arr); - if (*iter == NULL) return -1; - } - return 1; -} - -/* Adjust dimensionality and strides for index object iterators - --- i.e. broadcast -*/ -/*OBJECT_API*/ -static int -PyArray_Broadcast(PyArrayMultiIterObject *mit) -{ - int i, nd, k, j; - intp tmp; - PyArrayIterObject *it; - - /* Discover the broadcast number of dimensions */ - for (i=0, nd=0; inumiter; i++) - nd = MAX(nd, mit->iters[i]->ao->nd); - mit->nd = nd; - - /* Discover the broadcast shape in each dimension */ - for (i=0; idimensions[i] = 1; - for (j=0; jnumiter; j++) { - it = mit->iters[j]; - /* This prepends 1 to shapes not already - equal to nd */ - k = i + it->ao->nd - nd; - if (k>=0) { - tmp = it->ao->dimensions[k]; - if (tmp == 1) continue; - if (mit->dimensions[i] == 1) - mit->dimensions[i] = tmp; - else if (mit->dimensions[i] != tmp) { - PyErr_SetString(PyExc_ValueError, - "shape mismatch: objects" \ - " cannot be broadcast" \ - " to a single shape"); - return -1; - } - } - } - } - - /* Reset the iterator dimensions and strides of each iterator - object -- using 0 valued strides for broadcasting */ - - tmp = PyArray_MultiplyList(mit->dimensions, mit->nd); - mit->size = tmp; - for (i=0; inumiter; i++) { - it = mit->iters[i]; - it->nd_m1 = mit->nd - 1; - it->size = tmp; - nd = it->ao->nd; - it->factors[mit->nd-1] = 1; - for (j=0; j < mit->nd; j++) { - it->dims_m1[j] = mit->dimensions[j] - 1; - k = j + nd - mit->nd; - /* If this dimension was added or shape - of underlying array was 1 */ - if ((k < 0) || \ - it->ao->dimensions[k] != mit->dimensions[j]) { - it->contiguous = 0; - it->strides[j] = 0; - } - else { - it->strides[j] = it->ao->strides[k]; - } - it->backstrides[j] = it->strides[j] * \ - it->dims_m1[j]; - if (j > 0) - it->factors[mit->nd-j-1] = \ - it->factors[mit->nd-j] * \ - mit->dimensions[mit->nd-j]; - } - PyArray_ITER_RESET(it); - } - return 0; -} - -/* Reset the map iterator to the beginning */ -static void -PyArray_MapIterReset(PyArrayMapIterObject *mit) -{ - int i,j; intp coord[MAX_DIMS]; - PyArrayIterObject *it; - PyArray_CopySwapFunc *copyswap; - - mit->index = 0; - - copyswap = mit->iters[0]->ao->descr->f->copyswap; - - if (mit->subspace != NULL) { - memcpy(coord, mit->bscoord, sizeof(intp)*mit->ait->ao->nd); - PyArray_ITER_RESET(mit->subspace); - for (i=0; inumiter; i++) { - it = mit->iters[i]; - PyArray_ITER_RESET(it); - j = mit->iteraxes[i]; - copyswap(coord+j,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), - it->ao); - } - PyArray_ITER_GOTO(mit->ait, coord); - mit->subspace->dataptr = mit->ait->dataptr; - mit->dataptr = mit->subspace->dataptr; - } - else { - for (i=0; inumiter; i++) { - it = mit->iters[i]; - if (it->size != 0) { - PyArray_ITER_RESET(it); - copyswap(coord+i,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), - it->ao); - } - else coord[i] = 0; - } - PyArray_ITER_GOTO(mit->ait, coord); - mit->dataptr = mit->ait->dataptr; - } - return; -} - -/* This function needs to update the state of the map iterator - and point mit->dataptr to the memory-location of the next object -*/ -static void -PyArray_MapIterNext(PyArrayMapIterObject *mit) -{ - int i, j; - intp coord[MAX_DIMS]; - PyArrayIterObject *it; - PyArray_CopySwapFunc *copyswap; - - mit->index += 1; - if (mit->index >= mit->size) return; - copyswap = mit->iters[0]->ao->descr->f->copyswap; - /* Sub-space iteration */ - if (mit->subspace != NULL) { - PyArray_ITER_NEXT(mit->subspace); - if (mit->subspace->index >= mit->subspace->size) { - /* reset coord to coordinates of - beginning of the subspace */ - memcpy(coord, mit->bscoord, - sizeof(intp)*mit->ait->ao->nd); - PyArray_ITER_RESET(mit->subspace); - for (i=0; inumiter; i++) { - it = mit->iters[i]; - PyArray_ITER_NEXT(it); - j = mit->iteraxes[i]; - copyswap(coord+j,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), - it->ao); - } - PyArray_ITER_GOTO(mit->ait, coord); - mit->subspace->dataptr = mit->ait->dataptr; - } - mit->dataptr = mit->subspace->dataptr; - } - else { - for (i=0; inumiter; i++) { - it = mit->iters[i]; - PyArray_ITER_NEXT(it); - copyswap(coord+i,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), - it->ao); - } - PyArray_ITER_GOTO(mit->ait, coord); - mit->dataptr = mit->ait->dataptr; - } - return; -} - -/* Bind a mapiteration to a particular array */ - -/* Determine if subspace iteration is necessary. If so, - 1) Fill in mit->iteraxes - 2) Create subspace iterator - 3) Update nd, dimensions, and size. - - Subspace iteration is necessary if: arr->nd > mit->numiter -*/ - -/* Need to check for index-errors somewhere. - - Let's do it at bind time and also convert all <0 values to >0 here - as well. -*/ -static void -PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr) -{ - int subnd; - PyObject *sub, *obj=NULL; - int i, j, n, curraxis, ellipexp, noellip; - PyArrayIterObject *it; - intp dimsize; - intp *indptr; - - subnd = arr->nd - mit->numiter; - if (subnd < 0) { - PyErr_SetString(PyExc_ValueError, - "too many indices for array"); - return; - } - - mit->ait = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arr); - if (mit->ait == NULL) return; - - /* no subspace iteration needed. Finish up and Return */ - if (subnd == 0) { - n = arr->nd; - for (i=0; iiteraxes[i] = i; - } - goto finish; - } - - /* all indexing arrays have been converted to 0 - therefore we can extract the subspace with a simple - getitem call which will use view semantics - */ - /* But, be sure to do it with a true array. - */ - if (PyArray_CheckExact(arr)) { - sub = array_subscript_simple(arr, mit->indexobj); - } - else { - Py_INCREF(arr); - obj = PyArray_EnsureArray((PyObject *)arr); - if (obj == NULL) goto fail; - sub = array_subscript_simple((PyArrayObject *)obj, mit->indexobj); - Py_DECREF(obj); - } - - if (sub == NULL) goto fail; - mit->subspace = (PyArrayIterObject *)PyArray_IterNew(sub); - Py_DECREF(sub); - if (mit->subspace == NULL) goto fail; - - /* Expand dimensions of result */ - n = mit->subspace->ao->nd; - for (i=0; idimensions[mit->nd+i] = mit->subspace->ao->dimensions[i]; - mit->nd += n; - - /* Now, we still need to interpret the ellipsis and slice objects - to determine which axes the indexing arrays are referring to - */ - n = PyTuple_GET_SIZE(mit->indexobj); - - /* The number of dimensions an ellipsis takes up */ - ellipexp = arr->nd - n + 1; - /* Now fill in iteraxes -- remember indexing arrays have been - converted to 0's in mit->indexobj */ - curraxis = 0; - j = 0; - noellip = 1; /* Only expand the first ellipsis */ - memset(mit->bscoord, 0, sizeof(intp)*arr->nd); - for (i=0; iindexobj, i); - if (PyInt_Check(obj) || PyLong_Check(obj)) - mit->iteraxes[j++] = curraxis++; - else if (noellip && obj == Py_Ellipsis) { - curraxis += ellipexp; - noellip = 0; - } - else { - intp start=0; - intp stop, step; - /* Should be slice object or - another Ellipsis */ - if (obj == Py_Ellipsis) { - mit->bscoord[curraxis] = 0; - } - else if (!PySlice_Check(obj) || \ - (slice_GetIndices((PySliceObject *)obj, - arr->dimensions[curraxis], - &start, &stop, &step, - &dimsize) < 0)) { - PyErr_Format(PyExc_ValueError, - "unexpected object " \ - "(%s) in selection position %d", - obj->ob_type->tp_name, i); - goto fail; - } - else { - mit->bscoord[curraxis] = start; - } - curraxis += 1; - } - } - finish: - /* Here check the indexes (now that we have iteraxes) */ - mit->size = PyArray_MultiplyList(mit->dimensions, mit->nd); - if (mit->ait->size == 0 && mit->size != 0) { - PyErr_SetString(PyExc_ValueError, - "invalid index into a 0-size array"); - goto fail; - } - - for (i=0; inumiter; i++) { - intp indval; - it = mit->iters[i]; - PyArray_ITER_RESET(it); - dimsize = arr->dimensions[mit->iteraxes[i]]; - while(it->index < it->size) { - indptr = ((intp *)it->dataptr); - indval = *indptr; - if (indval < 0) indval += dimsize; - if (indval < 0 || indval >= dimsize) { - PyErr_Format(PyExc_IndexError, - "index (%d) out of range "\ - "(0<=index<=%d) in dimension %d", - (int) indval, (int) (dimsize-1), - mit->iteraxes[i]); - goto fail; - } - PyArray_ITER_NEXT(it); - } - PyArray_ITER_RESET(it); - } - return; - - fail: - Py_XDECREF(mit->subspace); - Py_XDECREF(mit->ait); - mit->subspace = NULL; - mit->ait = NULL; - return; -} - -/* This function takes a Boolean array and constructs index objects and - iterators as if nonzero(Bool) had been called -*/ -static int -_nonzero_indices(PyObject *myBool, PyArrayIterObject **iters) -{ - PyArray_Descr *typecode; - PyArrayObject *ba =NULL, *new=NULL; - int nd, j; - intp size, i, count; - Bool *ptr; - intp coords[MAX_DIMS], dims_m1[MAX_DIMS]; - intp *dptr[MAX_DIMS]; - - typecode=PyArray_DescrFromType(PyArray_BOOL); - ba = (PyArrayObject *)PyArray_FromAny(myBool, typecode, 0, 0, - CARRAY, NULL); - if (ba == NULL) return -1; - nd = ba->nd; - for (j=0; jdata; - count = 0; - - /* pre-determine how many nonzero entries there are */ - for (i=0; iao->data; - coords[j] = 0; - dims_m1[j] = ba->dimensions[j]-1; - } - - ptr = (Bool *)ba->data; - - if (count == 0) goto finish; - - /* Loop through the Boolean array and copy coordinates - for non-zero entries */ - for (i=0; i=0; j--) { - if (coords[j] < dims_m1[j]) { - coords[j]++; - break; - } - else { - coords[j] = 0; - } - } - } - - finish: - Py_DECREF(ba); - return nd; - - fail: - for (j=0; jiters[i] = NULL; - mit->index = 0; - mit->ait = NULL; - mit->subspace = NULL; - mit->numiter = 0; - mit->consec = 1; - Py_INCREF(indexobj); - mit->indexobj = indexobj; - - if (fancy == SOBJ_LISTTUP) { - PyObject *newobj; - newobj = PySequence_Tuple(indexobj); - if (newobj == NULL) goto fail; - Py_DECREF(indexobj); - indexobj = newobj; - mit->indexobj = indexobj; - } - -#undef SOBJ_NOTFANCY -#undef SOBJ_ISFANCY -#undef SOBJ_BADARRAY -#undef SOBJ_TOOMANY -#undef SOBJ_LISTTUP - - if (oned) return (PyObject *)mit; - - /* Must have some kind of fancy indexing if we are here */ - /* indexobj is either a list, an arrayobject, or a tuple - (with at least 1 list or arrayobject or Bool object), */ - - /* convert all inputs to iterators */ - if (PyArray_Check(indexobj) && \ - (PyArray_TYPE(indexobj) == PyArray_BOOL)) { - mit->numiter = _nonzero_indices(indexobj, mit->iters); - if (mit->numiter < 0) goto fail; - mit->nd = 1; - mit->dimensions[0] = mit->iters[0]->dims_m1[0]+1; - Py_DECREF(mit->indexobj); - mit->indexobj = PyTuple_New(mit->numiter); - if (mit->indexobj == NULL) goto fail; - for (i=0; inumiter; i++) { - PyTuple_SET_ITEM(mit->indexobj, i, - PyInt_FromLong(0)); - } - } - - else if (PyArray_Check(indexobj) || !PyTuple_Check(indexobj)) { - mit->numiter = 1; - indtype = PyArray_DescrFromType(PyArray_INTP); - arr = PyArray_FromAny(indexobj, indtype, 0, 0, FORCECAST, NULL); - if (arr == NULL) goto fail; - mit->iters[0] = (PyArrayIterObject *)PyArray_IterNew(arr); - if (mit->iters[0] == NULL) {Py_DECREF(arr); goto fail;} - mit->nd = PyArray_NDIM(arr); - memcpy(mit->dimensions,PyArray_DIMS(arr),mit->nd*sizeof(intp)); - mit->size = PyArray_SIZE(arr); - Py_DECREF(arr); - Py_DECREF(mit->indexobj); - mit->indexobj = Py_BuildValue("(N)", PyInt_FromLong(0)); - } - else { /* must be a tuple */ - PyObject *obj; - PyArrayIterObject **iterp; - PyObject *new; - int numiters, j, n2; - /* Make a copy of the tuple -- we will be replacing - index objects with 0's */ - n = PyTuple_GET_SIZE(indexobj); - n2 = n; - new = PyTuple_New(n2); - if (new == NULL) goto fail; - started = 0; - nonindex = 0; - j = 0; - for (i=0; iiters + mit->numiter; - if ((numiters=_convert_obj(obj, iterp)) < 0) { - Py_DECREF(new); - goto fail; - } - if (numiters > 0) { - started = 1; - if (nonindex) mit->consec = 0; - mit->numiter += numiters; - if (numiters == 1) { - PyTuple_SET_ITEM(new,j++, - PyInt_FromLong(0)); - } - else { /* we need to grow the - new indexing object and fill - it with 0s for each of the iterators - produced */ - int k; - n2 += numiters - 1; - if (_PyTuple_Resize(&new, n2) < 0) - goto fail; - for (k=0;kindexobj); - mit->indexobj = new; - /* Store the number of iterators actually converted */ - /* These will be mapped to actual axes at bind time */ - if (PyArray_Broadcast((PyArrayMultiIterObject *)mit) < 0) - goto fail; - } - - return (PyObject *)mit; - - fail: - Py_DECREF(mit); - return NULL; -} - - -static void -arraymapiter_dealloc(PyArrayMapIterObject *mit) -{ - int i; - Py_XDECREF(mit->indexobj); - Py_XDECREF(mit->ait); - Py_XDECREF(mit->subspace); - for (i=0; inumiter; i++) - Py_XDECREF(mit->iters[i]); - _pya_free(mit); -} - -/* The mapiter object must be created new each time. It does not work - to bind to a new array, and continue. - - This was the orginal intention, but currently that does not work. - Do not expose the MapIter_Type to Python. - - It's not very useful anyway, since mapiter(indexobj); mapiter.bind(a); - mapiter is equivalent to a[indexobj].flat but the latter gets to use - slice syntax. -*/ - -static PyTypeObject PyArrayMapIter_Type = { - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.mapiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)arraymapiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - (traverseproc)0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0 /* tp_weaklist */ - -}; - -/** END of Subscript Iterator **/ - - -/*OBJECT_API - Get MultiIterator, -*/ -static PyObject * -PyArray_MultiIterNew(int n, ...) -{ - va_list va; - PyArrayMultiIterObject *multi; - PyObject *current; - PyObject *arr; - - int i, err=0; - - if (n < 2 || n > NPY_MAXARGS) { - PyErr_Format(PyExc_ValueError, - "Need between 2 and (%d) " \ - "array objects (inclusive).", NPY_MAXARGS); - return NULL; - } - - /* fprintf(stderr, "multi new...");*/ - - multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); - PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - - for (i=0; iiters[i] = NULL; - multi->numiter = n; - multi->index = 0; - - va_start(va, n); - for (i=0; iiters[i] = (PyArrayIterObject *)PyArray_IterNew(arr); - Py_DECREF(arr); - } - } - - va_end(va); - - if (!err && PyArray_Broadcast(multi) < 0) err=1; - - if (err) { - Py_DECREF(multi); - return NULL; - } - - PyArray_MultiIter_RESET(multi); - - return (PyObject *)multi; -} - -static PyObject * -arraymultiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) -{ - - int n, i; - PyArrayMultiIterObject *multi; - PyObject *arr; - - if (kwds != NULL) { - PyErr_SetString(PyExc_ValueError, - "keyword arguments not accepted."); - return NULL; - } - - n = PyTuple_Size(args); - if (n < 2 || n > NPY_MAXARGS) { - if (PyErr_Occurred()) return NULL; - PyErr_Format(PyExc_ValueError, - "Need at least two and fewer than (%d) " \ - "array objects.", NPY_MAXARGS); - return NULL; - } - - multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); - PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - - multi->numiter = n; - multi->index = 0; - for (i=0; iiters[i] = NULL; - for (i=0; iiters[i] = \ - (PyArrayIterObject *)PyArray_IterNew(arr))==NULL) - goto fail; - Py_DECREF(arr); - } - if (PyArray_Broadcast(multi) < 0) goto fail; - PyArray_MultiIter_RESET(multi); - - return (PyObject *)multi; - - fail: - Py_DECREF(multi); - return NULL; -} - -static PyObject * -arraymultiter_next(PyArrayMultiIterObject *multi) -{ - PyObject *ret; - int i, n; - - n = multi->numiter; - ret = PyTuple_New(n); - if (ret == NULL) return NULL; - if (multi->index < multi->size) { - for (i=0; i < n; i++) { - PyArrayIterObject *it=multi->iters[i]; - PyTuple_SET_ITEM(ret, i, - PyArray_ToScalar(it->dataptr, it->ao)); - PyArray_ITER_NEXT(it); - } - multi->index++; - return ret; - } - return NULL; -} - -static void -arraymultiter_dealloc(PyArrayMultiIterObject *multi) -{ - int i; - - for (i=0; inumiter; i++) - Py_XDECREF(multi->iters[i]); - multi->ob_type->tp_free((PyObject *)multi); -} - -static PyObject * -arraymultiter_size_get(PyArrayMultiIterObject *self) -{ -#if SIZEOF_INTP <= SIZEOF_LONG - return PyInt_FromLong((long) self->size); -#else - if (self->size < MAX_LONG) - return PyInt_FromLong((long) self->size); - else - return PyLong_FromLongLong((longlong) self->size); -#endif -} - -static PyObject * -arraymultiter_index_get(PyArrayMultiIterObject *self) -{ -#if SIZEOF_INTP <= SIZEOF_LONG - return PyInt_FromLong((long) self->index); -#else - if (self->size < MAX_LONG) - return PyInt_FromLong((long) self->index); - else - return PyLong_FromLongLong((longlong) self->index); -#endif -} - -static PyObject * -arraymultiter_shape_get(PyArrayMultiIterObject *self) -{ - return PyArray_IntTupleFromIntp(self->nd, self->dimensions); -} - -static PyObject * -arraymultiter_iters_get(PyArrayMultiIterObject *self) -{ - PyObject *res; - int i, n; - n = self->numiter; - res = PyTuple_New(n); - if (res == NULL) return res; - for (i=0; iiters[i]); - PyTuple_SET_ITEM(res, i, (PyObject *)self->iters[i]); - } - return res; -} - -static PyGetSetDef arraymultiter_getsetlist[] = { - {"size", - (getter)arraymultiter_size_get, - NULL, NULL}, - {"index", - (getter)arraymultiter_index_get, - NULL, NULL}, - {"shape", - (getter)arraymultiter_shape_get, - NULL, NULL}, - {"iters", - (getter)arraymultiter_iters_get, - NULL, NULL}, - {NULL, NULL, NULL, NULL}, -}; - -static PyMemberDef arraymultiter_members[] = { - {"numiter", T_INT, offsetof(PyArrayMultiIterObject, numiter), - RO, NULL}, - {"nd", T_INT, offsetof(PyArrayMultiIterObject, nd), RO, NULL}, - {NULL}, -}; - -static PyObject * -arraymultiter_reset(PyArrayMultiIterObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) return NULL; - - PyArray_MultiIter_RESET(self); - Py_INCREF(Py_None); - return Py_None; -} - -static PyMethodDef arraymultiter_methods[] = { - {"reset", (PyCFunction) arraymultiter_reset, METH_VARARGS, NULL}, - {NULL, NULL}, -}; - -static PyTypeObject PyArrayMultiIter_Type = { - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.broadcast", /* tp_name */ - sizeof(PyArrayMultiIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)arraymultiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arraymultiter_next, /* tp_iternext */ - arraymultiter_methods, /* tp_methods */ - arraymultiter_members, /* tp_members */ - arraymultiter_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - arraymultiter_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0 /* tp_weaklist */ -}; - -/*OBJECT_API*/ -static PyArray_Descr * -PyArray_DescrNewFromType(int type_num) -{ - PyArray_Descr *old; - PyArray_Descr *new; - - old = PyArray_DescrFromType(type_num); - new = PyArray_DescrNew(old); - Py_DECREF(old); - return new; -} - -/*** Array Descr Objects for dynamic types **/ - -/** There are some statically-defined PyArray_Descr objects corresponding - to the basic built-in types. - These can and should be DECREF'd and INCREF'd as appropriate, anyway. - If a mistake is made in reference counting, deallocation on these - builtins will be attempted leading to problems. - - This let's us deal with all PyArray_Descr objects using reference - counting (regardless of whether they are statically or dynamically - allocated). -**/ - -/* base cannot be NULL */ -/*OBJECT_API*/ -static PyArray_Descr * -PyArray_DescrNew(PyArray_Descr *base) -{ - PyArray_Descr *new; - - new = PyObject_New(PyArray_Descr, &PyArrayDescr_Type); - if (new == NULL) return NULL; - /* Don't copy PyObject_HEAD part */ - memcpy((char *)new+sizeof(PyObject), - (char *)base+sizeof(PyObject), - sizeof(PyArray_Descr)-sizeof(PyObject)); - - if (new->fields == Py_None) new->fields = NULL; - Py_XINCREF(new->fields); - Py_XINCREF(new->names); - if (new->subarray) { - new->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr)); - memcpy(new->subarray, base->subarray, - sizeof(PyArray_ArrayDescr)); - Py_INCREF(new->subarray->shape); - Py_INCREF(new->subarray->base); - } - Py_XINCREF(new->typeobj); - return new; -} - -/* should never be called for builtin-types unless - there is a reference-count problem -*/ -static void -arraydescr_dealloc(PyArray_Descr *self) -{ - if (self->fields == Py_None) { - fprintf(stderr, "*** Reference count error detected: \n" \ - "an attempt was made to deallocate %d (%c) ***\n", - self->type_num, self->type); - Py_INCREF(self); - Py_INCREF(self); - return; - } - Py_XDECREF(self->typeobj); - Py_XDECREF(self->names); - Py_XDECREF(self->fields); - if (self->subarray) { - Py_DECREF(self->subarray->shape); - Py_DECREF(self->subarray->base); - _pya_free(self->subarray); - } - self->ob_type->tp_free((PyObject *)self); -} - -/* we need to be careful about setting attributes because these - objects are pointed to by arrays that depend on them for interpreting - data. Currently no attributes of dtype objects can be set. -*/ -static PyMemberDef arraydescr_members[] = { - {"type", T_OBJECT, offsetof(PyArray_Descr, typeobj), RO, NULL}, - {"kind", T_CHAR, offsetof(PyArray_Descr, kind), RO, NULL}, - {"char", T_CHAR, offsetof(PyArray_Descr, type), RO, NULL}, - {"num", T_INT, offsetof(PyArray_Descr, type_num), RO, NULL}, - {"byteorder", T_CHAR, offsetof(PyArray_Descr, byteorder), RO, NULL}, - {"itemsize", T_INT, offsetof(PyArray_Descr, elsize), RO, NULL}, - {"alignment", T_INT, offsetof(PyArray_Descr, alignment), RO, NULL}, - {"flags", T_UBYTE, offsetof(PyArray_Descr, hasobject), RO, NULL}, - {"names", T_OBJECT, offsetof(PyArray_Descr, names), RO, NULL}, - {NULL}, -}; - -static PyObject * -arraydescr_subdescr_get(PyArray_Descr *self) -{ - if (self->subarray == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - return Py_BuildValue("OO", (PyObject *)self->subarray->base, - self->subarray->shape); -} - -static PyObject * -arraydescr_protocol_typestr_get(PyArray_Descr *self) -{ - char basic_=self->kind; - char endian = self->byteorder; - int size=self->elsize; - - if (endian == '=') { - endian = '<'; - if (!PyArray_IsNativeByteOrder(endian)) endian = '>'; - } - - if (self->type_num == PyArray_UNICODE) { - size >>= 2; - } - return PyString_FromFormat("%c%c%d", endian, basic_, size); -} - -static PyObject * -arraydescr_typename_get(PyArray_Descr *self) -{ - int len; - PyTypeObject *typeobj = self->typeobj; - PyObject *res; - char *s; - static int prefix_len=0; - - if (PyTypeNum_ISUSERDEF(self->type_num)) { - s = strrchr(typeobj->tp_name, '.'); - if (s == NULL) { - res = PyString_FromString(typeobj->tp_name); - } - else { - res = PyString_FromStringAndSize(s+1, strlen(s)-1); - } - return res; - } - else { - if (prefix_len == 0) - prefix_len = strlen("numpy."); - - len = strlen(typeobj->tp_name); - if (*(typeobj->tp_name + (len-1)) == '_') - len-=1; - len -= prefix_len; - res = PyString_FromStringAndSize(typeobj->tp_name+prefix_len, len); - } - if (PyTypeNum_ISFLEXIBLE(self->type_num) && self->elsize != 0) { - PyObject *p; - p = PyString_FromFormat("%d", self->elsize * 8); - PyString_ConcatAndDel(&res, p); - } - return res; -} - -static PyObject * -arraydescr_base_get(PyArray_Descr *self) -{ - if (self->subarray == NULL) { - Py_INCREF(self); - return (PyObject *)self; - } - Py_INCREF(self->subarray->base); - return (PyObject *)(self->subarray->base); -} - -static PyObject * -arraydescr_shape_get(PyArray_Descr *self) -{ - if (self->subarray == NULL) { - return PyTuple_New(0); - } - if (PyTuple_Check(self->subarray->shape)) { - Py_INCREF(self->subarray->shape); - return (PyObject *)(self->subarray->shape); - } - return Py_BuildValue("(O)", self->subarray->shape); -} - -static PyObject * -arraydescr_protocol_descr_get(PyArray_Descr *self) -{ - PyObject *dobj, *res; - PyObject *_numpy_internal; - - if (self->names == NULL) { - /* get default */ - dobj = PyTuple_New(2); - if (dobj == NULL) return NULL; - PyTuple_SET_ITEM(dobj, 0, PyString_FromString("")); - PyTuple_SET_ITEM(dobj, 1, \ - arraydescr_protocol_typestr_get(self)); - res = PyList_New(1); - if (res == NULL) {Py_DECREF(dobj); return NULL;} - PyList_SET_ITEM(res, 0, dobj); - return res; - } - - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - res = PyObject_CallMethod(_numpy_internal, "_array_descr", - "O", self); - Py_DECREF(_numpy_internal); - return res; -} - -/* returns 1 for a builtin type - and 2 for a user-defined data-type descriptor - return 0 if neither (i.e. it's a copy of one) -*/ -static PyObject * -arraydescr_isbuiltin_get(PyArray_Descr *self) -{ - long val; - val = 0; - if (self->fields == Py_None) val = 1; - if (PyTypeNum_ISUSERDEF(self->type_num)) val = 2; - return PyInt_FromLong(val); -} - -static int -_arraydescr_isnative(PyArray_Descr *self) -{ - if (self->names == NULL) { - return PyArray_ISNBO(self->byteorder); - } - else { - PyObject *key, *value, *title=NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos=0; - while(PyDict_Next(self->fields, &pos, &key, &value)) { - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return -1; - if (!_arraydescr_isnative(new)) return 0; - } - } - return 1; -} - -/* return Py_True if this data-type descriptor - has native byteorder if no fields are defined - - or if all sub-fields have native-byteorder if - fields are defined -*/ -static PyObject * -arraydescr_isnative_get(PyArray_Descr *self) -{ - PyObject *ret; - int retval; - retval = _arraydescr_isnative(self); - if (retval == -1) return NULL; - ret = (retval ? Py_True : Py_False); - Py_INCREF(ret); - return ret; -} - -static PyObject * -arraydescr_fields_get(PyArray_Descr *self) -{ - if (self->names == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - return PyDictProxy_New(self->fields); -} - -static PyObject * -arraydescr_hasobject_get(PyArray_Descr *self) -{ - PyObject *res; - if (PyDataType_FLAGCHK(self, NPY_ITEM_HASOBJECT)) - res = Py_True; - else - res = Py_False; - Py_INCREF(res); - return res; -} - -static PyGetSetDef arraydescr_getsets[] = { - {"subdtype", - (getter)arraydescr_subdescr_get, - NULL, NULL}, - {"descr", - (getter)arraydescr_protocol_descr_get, - NULL, NULL}, - {"str", - (getter)arraydescr_protocol_typestr_get, - NULL, NULL}, - {"name", - (getter)arraydescr_typename_get, - NULL, NULL}, - {"base", - (getter)arraydescr_base_get, - NULL, NULL}, - {"shape", - (getter)arraydescr_shape_get, - NULL, NULL}, - {"isbuiltin", - (getter)arraydescr_isbuiltin_get, - NULL, NULL}, - {"isnative", - (getter)arraydescr_isnative_get, - NULL, NULL}, - {"fields", - (getter)arraydescr_fields_get, - NULL, NULL}, - {"hasobject", - (getter)arraydescr_hasobject_get, - NULL, NULL}, - {NULL, NULL, NULL, NULL}, -}; - -static PyObject * -arraydescr_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) -{ - PyObject *odescr; - PyArray_Descr *descr, *conv; - Bool align=FALSE; - Bool copy=FALSE; - static char *kwlist[] = {"dtype", "align", "copy", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&", - kwlist, &odescr, - PyArray_BoolConverter, &align, - PyArray_BoolConverter, ©)) - return NULL; - - if (align) { - if (!PyArray_DescrAlignConverter(odescr, &conv)) - return NULL; - } - else if (!PyArray_DescrConverter(odescr, &conv)) - return NULL; - /* Get a new copy of it unless it's already a copy */ - if (copy && conv->fields == Py_None) { - descr = PyArray_DescrNew(conv); - Py_DECREF(conv); - conv = descr; - } - return (PyObject *)conv; -} - - -/* return a tuple of (callable object, args, state). */ -static PyObject * -arraydescr_reduce(PyArray_Descr *self, PyObject *args) -{ - /* version number of this pickle type. Increment if we need to - change the format. Be sure to handle the old versions in - arraydescr_setstate. */ - const int version = 3; - PyObject *ret, *mod, *obj; - PyObject *state; - char endian; - int elsize, alignment; - - ret = PyTuple_New(3); - if (ret == NULL) return NULL; - mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) {Py_DECREF(ret); return NULL;} - obj = PyObject_GetAttrString(mod, "dtype"); - Py_DECREF(mod); - if (obj == NULL) {Py_DECREF(ret); return NULL;} - PyTuple_SET_ITEM(ret, 0, obj); - if (PyTypeNum_ISUSERDEF(self->type_num) || \ - ((self->type_num == PyArray_VOID && \ - self->typeobj != &PyVoidArrType_Type))) { - obj = (PyObject *)self->typeobj; - Py_INCREF(obj); - } - else { - elsize = self->elsize; - if (self->type_num == PyArray_UNICODE) { - elsize >>= 2; - } - obj = PyString_FromFormat("%c%d",self->kind, elsize); - } - PyTuple_SET_ITEM(ret, 1, Py_BuildValue("(Nii)", obj, 0, 1)); - - /* Now return the state which is at least - byteorder, subarray, and fields */ - endian = self->byteorder; - if (endian == '=') { - endian = '<'; - if (!PyArray_IsNativeByteOrder(endian)) endian = '>'; - } - state = PyTuple_New(8); - PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version)); - PyTuple_SET_ITEM(state, 1, PyString_FromFormat("%c", endian)); - PyTuple_SET_ITEM(state, 2, arraydescr_subdescr_get(self)); - if (self->names) { - Py_INCREF(self->names); - Py_INCREF(self->fields); - PyTuple_SET_ITEM(state, 3, self->names); - PyTuple_SET_ITEM(state, 4, self->fields); - } - else { - PyTuple_SET_ITEM(state, 3, Py_None); - PyTuple_SET_ITEM(state, 4, Py_None); - Py_INCREF(Py_None); - Py_INCREF(Py_None); - } - - /* for extended types it also includes elsize and alignment */ - if (PyTypeNum_ISEXTENDED(self->type_num)) { - elsize = self->elsize; - alignment = self->alignment; - } - else {elsize = -1; alignment = -1;} - - PyTuple_SET_ITEM(state, 5, PyInt_FromLong(elsize)); - PyTuple_SET_ITEM(state, 6, PyInt_FromLong(alignment)); - PyTuple_SET_ITEM(state, 7, PyInt_FromLong(self->hasobject)); - - PyTuple_SET_ITEM(ret, 2, state); - return ret; -} - -/* returns 1 if this data-type has an object portion - used when setting the state because hasobject is not stored. -*/ -static int -_descr_find_object(PyArray_Descr *self) -{ - if (self->hasobject || self->type_num == PyArray_OBJECT || - self->kind == 'O') - return NPY_OBJECT_DTYPE_FLAGS; - if (PyDescr_HASFIELDS(self)) { - PyObject *key, *value, *title=NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos=0; - while (PyDict_Next(self->fields, &pos, &key, &value)) { - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) { - PyErr_Clear(); - return 0; - } - if (_descr_find_object(new)) { - new->hasobject = NPY_OBJECT_DTYPE_FLAGS; - return NPY_OBJECT_DTYPE_FLAGS; - } - } - } - return 0; -} - -/* state is at least byteorder, subarray, and fields but could include elsize - and alignment for EXTENDED arrays -*/ - -static PyObject * -arraydescr_setstate(PyArray_Descr *self, PyObject *args) -{ - int elsize = -1, alignment = -1; - int version = 3; - char endian; - PyObject *subarray, *fields, *names=NULL; - int incref_names = 1; - int dtypeflags=0; - - if (self->fields == Py_None) {Py_INCREF(Py_None); return Py_None;} - - if (PyTuple_GET_SIZE(args) != 1 || - !(PyTuple_Check(PyTuple_GET_ITEM(args, 0)))) { - PyErr_BadInternalCall(); - return NULL; - } - switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { - case 8: - if (!PyArg_ParseTuple(args, "(icOOOiii)", &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment, &dtypeflags)) { - return NULL; - } - break; - case 7: - if (!PyArg_ParseTuple(args, "(icOOOii)", &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment)) { - return NULL; - } - break; - case 6: - if (!PyArg_ParseTuple(args, "(icOOii)", &version, - &endian, &subarray, &fields, - &elsize, &alignment)) { - PyErr_Clear(); - } - break; - case 5: - version = 0; - if (!PyArg_ParseTuple(args, "(cOOii)", - &endian, &subarray, &fields, &elsize, - &alignment)) { - return NULL; - } - break; - default: - version = -1; /* raise an error */ - } - - /* If we ever need another pickle format, increment the version - number. But we should still be able to handle the old versions. - */ - if (version < 0 || version > 3) { - PyErr_Format(PyExc_ValueError, - "can't handle version %d of numpy.dtype pickle", - version); - return NULL; - } - - if (version == 1 || version == 0) { - if (fields != Py_None) { - PyObject *key, *list; - key = PyInt_FromLong(-1); - list = PyDict_GetItem(fields, key); - if (!list) return NULL; - Py_INCREF(list); - names = list; - PyDict_DelItem(fields, key); - incref_names = 0; - } - else { - names = Py_None; - } - } - - - if ((fields == Py_None && names != Py_None) || \ - (names == Py_None && fields != Py_None)) { - PyErr_Format(PyExc_ValueError, - "inconsistent fields and names"); - return NULL; - } - - if (endian != '|' && - PyArray_IsNativeByteOrder(endian)) endian = '='; - - self->byteorder = endian; - if (self->subarray) { - Py_XDECREF(self->subarray->base); - Py_XDECREF(self->subarray->shape); - _pya_free(self->subarray); - } - self->subarray = NULL; - - if (subarray != Py_None) { - self->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr)); - self->subarray->base = (PyArray_Descr *)PyTuple_GET_ITEM(subarray, 0); - Py_INCREF(self->subarray->base); - self->subarray->shape = PyTuple_GET_ITEM(subarray, 1); - Py_INCREF(self->subarray->shape); - } - - if (fields != Py_None) { - Py_XDECREF(self->fields); - self->fields = fields; - Py_INCREF(fields); - Py_XDECREF(self->names); - self->names = names; - if (incref_names) - Py_INCREF(names); - } - - if (PyTypeNum_ISEXTENDED(self->type_num)) { - self->elsize = elsize; - self->alignment = alignment; - } - - self->hasobject = dtypeflags; - if (version < 3) { - self->hasobject = _descr_find_object(self); - } - Py_INCREF(Py_None); - return Py_None; -} - - -/* returns a copy of the PyArray_Descr structure with the byteorder - altered: - no arguments: The byteorder is swapped (in all subfields as well) - single argument: The byteorder is forced to the given state - (in all subfields as well) - - Valid states: ('big', '>') or ('little' or '<') - ('native', or '=') - - If a descr structure with | is encountered it's own - byte-order is not changed but any fields are: -*/ - -/*OBJECT_API - Deep bytorder change of a data-type descriptor - *** Leaves reference count of self unchanged --- does not DECREF self *** - */ -static PyArray_Descr * -PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) -{ - PyArray_Descr *new; - char endian; - - new = PyArray_DescrNew(self); - endian = new->byteorder; - if (endian != PyArray_IGNORE) { - if (newendian == PyArray_SWAP) { /* swap byteorder */ - if PyArray_ISNBO(endian) endian = PyArray_OPPBYTE; - else endian = PyArray_NATBYTE; - new->byteorder = endian; - } - else if (newendian != PyArray_IGNORE) { - new->byteorder = newendian; - } - } - if (new->names) { - PyObject *newfields; - PyObject *key, *value; - PyObject *newvalue; - PyObject *old; - PyArray_Descr *newdescr; - Py_ssize_t pos = 0; - int len, i; - newfields = PyDict_New(); - /* make new dictionary with replaced */ - /* PyArray_Descr Objects */ - while(PyDict_Next(self->fields, &pos, &key, &value)) { - if (!PyString_Check(key) || \ - !PyTuple_Check(value) || \ - ((len=PyTuple_GET_SIZE(value)) < 2)) - continue; - - old = PyTuple_GET_ITEM(value, 0); - if (!PyArray_DescrCheck(old)) continue; - newdescr = PyArray_DescrNewByteorder \ - ((PyArray_Descr *)old, newendian); - if (newdescr == NULL) { - Py_DECREF(newfields); Py_DECREF(new); - return NULL; - } - newvalue = PyTuple_New(len); - PyTuple_SET_ITEM(newvalue, 0, \ - (PyObject *)newdescr); - for(i=1; ifields); - new->fields = newfields; - } - if (new->subarray) { - Py_DECREF(new->subarray->base); - new->subarray->base = PyArray_DescrNewByteorder \ - (self->subarray->base, newendian); - } - return new; -} - - -static PyObject * -arraydescr_newbyteorder(PyArray_Descr *self, PyObject *args) -{ - char endian=PyArray_SWAP; - - if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter, - &endian)) return NULL; - - return (PyObject *)PyArray_DescrNewByteorder(self, endian); -} - -static PyMethodDef arraydescr_methods[] = { - /* for pickling */ - {"__reduce__", (PyCFunction)arraydescr_reduce, METH_VARARGS, - NULL}, - {"__setstate__", (PyCFunction)arraydescr_setstate, METH_VARARGS, - NULL}, - {"newbyteorder", (PyCFunction)arraydescr_newbyteorder, METH_VARARGS, - NULL}, - {NULL, NULL} /* sentinel */ -}; - -static PyObject * -arraydescr_str(PyArray_Descr *self) -{ - PyObject *sub; - - if (self->names) { - PyObject *lst; - lst = arraydescr_protocol_descr_get(self); - if (!lst) { - sub = PyString_FromString(""); - PyErr_Clear(); - } - else sub = PyObject_Str(lst); - Py_XDECREF(lst); - if (self->type_num != PyArray_VOID) { - PyObject *p; - PyObject *t=PyString_FromString("'"); - p = arraydescr_protocol_typestr_get(self); - PyString_Concat(&p, t); - PyString_ConcatAndDel(&t, p); - p = PyString_FromString("("); - PyString_ConcatAndDel(&p, t); - PyString_ConcatAndDel(&p, PyString_FromString(", ")); - PyString_ConcatAndDel(&p, sub); - PyString_ConcatAndDel(&p, PyString_FromString(")")); - sub = p; - } - } - else if (self->subarray) { - PyObject *p; - PyObject *t = PyString_FromString("("); - PyObject *sh; - p = arraydescr_str(self->subarray->base); - if (!self->subarray->base->names && !self->subarray->base->subarray) { - PyObject *t=PyString_FromString("'"); - PyString_Concat(&p, t); - PyString_ConcatAndDel(&t, p); - p = t; - } - PyString_ConcatAndDel(&t, p); - PyString_ConcatAndDel(&t, PyString_FromString(",")); - if (!PyTuple_Check(self->subarray->shape)) { - sh = Py_BuildValue("(O)", self->subarray->shape); - } - else { - sh = self->subarray->shape; - Py_INCREF(sh); - } - PyString_ConcatAndDel(&t, PyObject_Str(sh)); - Py_DECREF(sh); - PyString_ConcatAndDel(&t, PyString_FromString(")")); - sub = t; - } - else if (PyDataType_ISFLEXIBLE(self) || !PyArray_ISNBO(self->byteorder)) { - sub = arraydescr_protocol_typestr_get(self); - } - else { - sub = arraydescr_typename_get(self); - } - return sub; -} - -static PyObject * -arraydescr_repr(PyArray_Descr *self) -{ - PyObject *sub, *s; - s = PyString_FromString("dtype("); - sub = arraydescr_str(self); - if (!self->names && !self->subarray) { - PyObject *t=PyString_FromString("'"); - PyString_Concat(&sub, t); - PyString_ConcatAndDel(&t, sub); - sub = t; - } - PyString_ConcatAndDel(&s, sub); - sub = PyString_FromString(")"); - PyString_ConcatAndDel(&s, sub); - return s; -} - -static PyObject * -arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op) -{ - PyArray_Descr *new=NULL; - PyObject *result = Py_NotImplemented; - if (!PyArray_DescrCheck(other)) { - if (PyArray_DescrConverter(other, &new) == PY_FAIL) - return NULL; - } - else { - new = (PyArray_Descr *)other; - Py_INCREF(new); - } - switch (cmp_op) { - case Py_LT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) - result = Py_True; - else - result = Py_False; - break; - case Py_LE: - if (PyArray_CanCastTo(self, new)) - result = Py_True; - else - result = Py_False; - break; - case Py_EQ: - if (PyArray_EquivTypes(self, new)) - result = Py_True; - else - result = Py_False; - break; - case Py_NE: - if (PyArray_EquivTypes(self, new)) - result = Py_False; - else - result = Py_True; - break; - case Py_GT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) - result = Py_True; - else - result = Py_False; - break; - case Py_GE: - if (PyArray_CanCastTo(new, self)) - result = Py_True; - else - result = Py_False; - break; - default: - result = Py_NotImplemented; - } - - Py_XDECREF(new); - Py_INCREF(result); - return result; -} - -/************************************************************************* - **************** Implement Mapping Protocol *************************** - *************************************************************************/ - -static Py_ssize_t -descr_length(PyObject *self0) -{ - - PyArray_Descr *self = (PyArray_Descr *)self0; - - if (self->names) - return PyTuple_GET_SIZE(self->names); - else return 0; -} - -static PyObject * -descr_repeat(PyObject *self, Py_ssize_t length) -{ - PyObject *tup; - PyArray_Descr *new; - if (length < 0) - return PyErr_Format(PyExc_ValueError, -#if (PY_VERSION_HEX < 0x02050000) - "Array length must be >= 0, not %d", -#else - "Array length must be >= 0, not %zd", -#endif - length); - tup = Py_BuildValue("O" NPY_SSIZE_T_PYFMT, self, length); - if (tup == NULL) return NULL; - PyArray_DescrConverter(tup, &new); - Py_DECREF(tup); - return (PyObject *)new; -} - -static PyObject * -descr_subscript(PyArray_Descr *self, PyObject *op) -{ - - if (self->names) { - if (PyString_Check(op) || PyUnicode_Check(op)) { - PyObject *obj; - obj = PyDict_GetItem(self->fields, op); - if (obj != NULL) { - PyObject *descr; - descr = PyTuple_GET_ITEM(obj, 0); - Py_INCREF(descr); - return descr; - } - else { - PyErr_Format(PyExc_KeyError, - "field named \'%s\' not found.", - PyString_AsString(op)); - } - } - else { - PyObject *name; - int value; - value = PyArray_PyIntAsInt(op); - if (!PyErr_Occurred()) { - int size; - size = PyTuple_GET_SIZE(self->names); - if (value < 0) value += size; - if (value < 0 || value >= size) { - PyErr_Format(PyExc_IndexError, - "0<=index<%d not %d", - size, value); - return NULL; - } - name = PyTuple_GET_ITEM(self->names, value); - return descr_subscript(self, name); - } - } - PyErr_SetString(PyExc_ValueError, - "only integers, strings or unicode values " - "allowed for getting fields."); - } - else { - PyObject *astr; - astr = arraydescr_str(self); - PyErr_Format(PyExc_KeyError, - "there are no fields in dtype %s.", - PyString_AsString(astr)); - Py_DECREF(astr); - } - return NULL; -} - -static PySequenceMethods descr_as_sequence = { - descr_length, - (binaryfunc)NULL, - descr_repeat, -}; - -static PyMappingMethods descr_as_mapping = { - descr_length, /*mp_length*/ - (binaryfunc)descr_subscript, /*mp_subscript*/ - (objobjargproc)NULL, /*mp_ass_subscript*/ -}; - -/****************** End of Mapping Protocol ******************************/ - - -static PyTypeObject PyArrayDescr_Type = { - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.dtype", /* tp_name */ - sizeof(PyArray_Descr), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)arraydescr_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - (reprfunc)arraydescr_repr, /* tp_repr */ - 0, /* tp_as_number */ - &descr_as_sequence, /* tp_as_sequence */ - &descr_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc)arraydescr_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - arraydescr_methods, /* tp_methods */ - arraydescr_members, /* tp_members */ - arraydescr_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arraydescr_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0 /* tp_weaklist */ -}; - - -/** Array Flags Object **/ - -/*OBJECT_API - Get New ArrayFlagsObject -*/ -static PyObject * -PyArray_NewFlagsObject(PyObject *obj) -{ - PyObject *flagobj; - int flags; - if (obj == NULL) { - flags = CONTIGUOUS | OWNDATA | FORTRAN | ALIGNED; - } - else { - flags = PyArray_FLAGS(obj); - } - flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); - if (flagobj == NULL) return NULL; - Py_XINCREF(obj); - ((PyArrayFlagsObject *)flagobj)->arr = obj; - ((PyArrayFlagsObject *)flagobj)->flags = flags; - - return flagobj; -} - -static void -arrayflags_dealloc(PyArrayFlagsObject *self) -{ - Py_XDECREF(self->arr); - self->ob_type->tp_free((PyObject *)self); -} - - -#define _define_get(UPPER, lower) \ - static PyObject * \ - arrayflags_ ## lower ## _get(PyArrayFlagsObject *self) \ - { \ - PyObject *item; \ - item = ((self->flags & (UPPER)) == (UPPER)) ? Py_True : Py_False; \ - Py_INCREF(item); \ - return item; \ - } - -_define_get(CONTIGUOUS, contiguous) -_define_get(FORTRAN, fortran) -_define_get(UPDATEIFCOPY, updateifcopy) -_define_get(OWNDATA, owndata) -_define_get(ALIGNED, aligned) -_define_get(WRITEABLE, writeable) - -_define_get(ALIGNED|WRITEABLE, behaved) -_define_get(ALIGNED|WRITEABLE|CONTIGUOUS, carray) - -static PyObject * -arrayflags_forc_get(PyArrayFlagsObject *self) -{ - PyObject *item; - - if (((self->flags & FORTRAN) == FORTRAN) || - ((self->flags & CONTIGUOUS) == CONTIGUOUS)) - item = Py_True; - else - item = Py_False; - - Py_INCREF(item); - return item; -} - -static PyObject * -arrayflags_fnc_get(PyArrayFlagsObject *self) -{ - PyObject *item; - - if (((self->flags & FORTRAN) == FORTRAN) && - !((self->flags & CONTIGUOUS) == CONTIGUOUS)) - item = Py_True; - else - item = Py_False; - - Py_INCREF(item); - return item; -} - -static PyObject * -arrayflags_farray_get(PyArrayFlagsObject *self) -{ - PyObject *item; - - if (((self->flags & (ALIGNED|WRITEABLE|FORTRAN)) == \ - (ALIGNED|WRITEABLE|FORTRAN)) && - !((self->flags & CONTIGUOUS) == CONTIGUOUS)) - item = Py_True; - else - item = Py_False; - - Py_INCREF(item); - return item; -} - -static PyObject * -arrayflags_num_get(PyArrayFlagsObject *self) -{ - return PyInt_FromLong(self->flags); -} - -/* relies on setflags order being write, align, uic */ -static int -arrayflags_updateifcopy_set(PyArrayFlagsObject *self, PyObject *obj) -{ - PyObject *res; - if (self->arr == NULL) { - PyErr_SetString(PyExc_ValueError, "Cannot set flags on array scalars."); - return -1; - } - res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None, - (PyObject_IsTrue(obj) ? Py_True : Py_False)); - if (res == NULL) return -1; - Py_DECREF(res); - return 0; -} - -static int -arrayflags_aligned_set(PyArrayFlagsObject *self, PyObject *obj) -{ - PyObject *res; - if (self->arr == NULL) { - PyErr_SetString(PyExc_ValueError, "Cannot set flags on array scalars."); - return -1; - } - res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, - (PyObject_IsTrue(obj) ? Py_True : Py_False), - Py_None); - if (res == NULL) return -1; - Py_DECREF(res); - return 0; -} - -static int -arrayflags_writeable_set(PyArrayFlagsObject *self, PyObject *obj) -{ - PyObject *res; - if (self->arr == NULL) { - PyErr_SetString(PyExc_ValueError, "Cannot set flags on array scalars."); - return -1; - } - res = PyObject_CallMethod(self->arr, "setflags", "OOO", - (PyObject_IsTrue(obj) ? Py_True : Py_False), - Py_None, Py_None); - if (res == NULL) return -1; - Py_DECREF(res); - return 0; -} - - -static PyGetSetDef arrayflags_getsets[] = { - {"contiguous", - (getter)arrayflags_contiguous_get, - NULL, - ""}, - {"c_contiguous", - (getter)arrayflags_contiguous_get, - NULL, - ""}, - {"f_contiguous", - (getter)arrayflags_fortran_get, - NULL, - ""}, - {"fortran", - (getter)arrayflags_fortran_get, - NULL, - ""}, - {"updateifcopy", - (getter)arrayflags_updateifcopy_get, - (setter)arrayflags_updateifcopy_set, - ""}, - {"owndata", - (getter)arrayflags_owndata_get, - NULL, - ""}, - {"aligned", - (getter)arrayflags_aligned_get, - (setter)arrayflags_aligned_set, - ""}, - {"writeable", - (getter)arrayflags_writeable_get, - (setter)arrayflags_writeable_set, - ""}, - {"fnc", - (getter)arrayflags_fnc_get, - NULL, - ""}, - {"forc", - (getter)arrayflags_forc_get, - NULL, - ""}, - {"behaved", - (getter)arrayflags_behaved_get, - NULL, - ""}, - {"carray", - (getter)arrayflags_carray_get, - NULL, - ""}, - {"farray", - (getter)arrayflags_farray_get, - NULL, - ""}, - {"num", - (getter)arrayflags_num_get, - NULL, - ""}, - {NULL, NULL, NULL, NULL}, -}; - -static PyObject * -arrayflags_getitem(PyArrayFlagsObject *self, PyObject *ind) -{ - char *key; - int n; - if (!PyString_Check(ind)) goto fail; - key = PyString_AS_STRING(ind); - n = PyString_GET_SIZE(ind); - switch(n) { - case 1: - switch(key[0]) { - case 'C': - return arrayflags_contiguous_get(self); - case 'F': - return arrayflags_fortran_get(self); - case 'W': - return arrayflags_writeable_get(self); - case 'B': - return arrayflags_behaved_get(self); - case 'O': - return arrayflags_owndata_get(self); - case 'A': - return arrayflags_aligned_get(self); - case 'U': - return arrayflags_updateifcopy_get(self); - default: - goto fail; - } - break; - case 2: - if (strncmp(key, "CA", n)==0) - return arrayflags_carray_get(self); - if (strncmp(key, "FA", n)==0) - return arrayflags_farray_get(self); - break; - case 3: - if (strncmp(key, "FNC", n)==0) - return arrayflags_fnc_get(self); - break; - case 4: - if (strncmp(key, "FORC", n)==0) - return arrayflags_forc_get(self); - break; - case 6: - if (strncmp(key, "CARRAY", n)==0) - return arrayflags_carray_get(self); - if (strncmp(key, "FARRAY", n)==0) - return arrayflags_farray_get(self); - break; - case 7: - if (strncmp(key,"FORTRAN",n)==0) - return arrayflags_fortran_get(self); - if (strncmp(key,"BEHAVED",n)==0) - return arrayflags_behaved_get(self); - if (strncmp(key,"OWNDATA",n)==0) - return arrayflags_owndata_get(self); - if (strncmp(key,"ALIGNED",n)==0) - return arrayflags_aligned_get(self); - break; - case 9: - if (strncmp(key,"WRITEABLE",n)==0) - return arrayflags_writeable_get(self); - break; - case 10: - if (strncmp(key,"CONTIGUOUS",n)==0) - return arrayflags_contiguous_get(self); - break; - case 12: - if (strncmp(key, "UPDATEIFCOPY", n)==0) - return arrayflags_updateifcopy_get(self); - if (strncmp(key, "C_CONTIGUOUS", n)==0) - return arrayflags_contiguous_get(self); - if (strncmp(key, "F_CONTIGUOUS", n)==0) - return arrayflags_fortran_get(self); - break; - } - - fail: - PyErr_SetString(PyExc_KeyError, "Unknown flag"); - return NULL; -} - -static int -arrayflags_setitem(PyArrayFlagsObject *self, PyObject *ind, PyObject *item) -{ - char *key; - int n; - if (!PyString_Check(ind)) goto fail; - key = PyString_AS_STRING(ind); - n = PyString_GET_SIZE(ind); - if (((n==9) && (strncmp(key, "WRITEABLE", n)==0)) || - ((n==1) && (strncmp(key, "W", n)==0))) - return arrayflags_writeable_set(self, item); - else if (((n==7) && (strncmp(key, "ALIGNED", n)==0)) || - ((n==1) && (strncmp(key, "A", n)==0))) - return arrayflags_aligned_set(self, item); - else if (((n==12) && (strncmp(key, "UPDATEIFCOPY", n)==0)) || - ((n==1) && (strncmp(key, "U", n)==0))) - return arrayflags_updateifcopy_set(self, item); - - fail: - PyErr_SetString(PyExc_KeyError, "Unknown flag"); - return -1; -} - -static char * -_torf_(int flags, int val) -{ - if ((flags & val) == val) return "True"; - else return "False"; -} - -static PyObject * -arrayflags_print(PyArrayFlagsObject *self) -{ - int fl = self->flags; - - return PyString_FromFormat(" %s : %s\n %s : %s\n %s : %s\n"\ - " %s : %s\n %s : %s\n %s : %s", - "C_CONTIGUOUS", _torf_(fl, CONTIGUOUS), - "F_CONTIGUOUS", _torf_(fl, FORTRAN), - "OWNDATA", _torf_(fl, OWNDATA), - "WRITEABLE", _torf_(fl, WRITEABLE), - "ALIGNED", _torf_(fl, ALIGNED), - "UPDATEIFCOPY", _torf_(fl, UPDATEIFCOPY)); -} - - -static int -arrayflags_compare(PyArrayFlagsObject *self, PyArrayFlagsObject *other) -{ - if (self->flags == other->flags) - return 0; - else if (self->flags < other->flags) - return -1; - else - return 1; -} - -static PyMappingMethods arrayflags_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)NULL, /*mp_length*/ -#else - (inquiry)NULL, /*mp_length*/ -#endif - (binaryfunc)arrayflags_getitem, /*mp_subscript*/ - (objobjargproc)arrayflags_setitem, /*mp_ass_subscript*/ -}; - - -static PyObject * -arrayflags_new(PyTypeObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *arg=NULL; - if (!PyArg_UnpackTuple(args, "flagsobj", 0, 1, &arg)) - return NULL; - - if ((arg != NULL) && PyArray_Check(arg)) { - return PyArray_NewFlagsObject(arg); - } - else { - return PyArray_NewFlagsObject(NULL); - } -} - -static PyTypeObject PyArrayFlags_Type = { - PyObject_HEAD_INIT(NULL) - 0, - "numpy.flagsobj", - sizeof(PyArrayFlagsObject), - 0, /* tp_itemsize */ - /* methods */ - (destructor)arrayflags_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - (cmpfunc)arrayflags_compare, /* tp_compare */ - (reprfunc)arrayflags_print, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - &arrayflags_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc)arrayflags_print, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - arrayflags_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arrayflags_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0 /* tp_weaklist */ -}; diff --git a/numpy/core/src/arraytypes.inc.src b/numpy/core/src/arraytypes.inc.src deleted file mode 100644 index 2499fc059..000000000 --- a/numpy/core/src/arraytypes.inc.src +++ /dev/null @@ -1,2545 +0,0 @@ -/* -*- c -*- */ - -static longlong -MyPyLong_AsLongLong(PyObject *vv) -{ - longlong ret; - - if (!PyLong_Check(vv)) { - PyObject *mylong; - mylong = PyNumber_Long(vv); - if (mylong == NULL) return (longlong) -1; - vv = mylong; - } - else Py_INCREF(vv); - - ret = PyLong_AsLongLong(vv); - Py_DECREF(vv); - return ret; -} - -static ulong -MyPyLong_AsUnsignedLong(PyObject *vv) -{ - longlong val; - - if (!PyLong_Check(vv)) { - PyObject *mylong; - mylong = PyNumber_Long(vv); - if (mylong == NULL) return (ulong) -1; - vv = mylong; - } - else Py_INCREF(vv); - - val = PyLong_AsLongLong(vv); - Py_DECREF(vv); - return (ulong) val; -} - -static ulonglong -MyPyLong_AsUnsignedLongLong(PyObject *vv) -{ - ulonglong ret; - - if (!PyLong_Check(vv)) { - PyObject *mylong; - mylong = PyNumber_Long(vv); - if (mylong == NULL) return (ulonglong) -1; - vv = mylong; - } - else Py_INCREF(vv); - - ret = PyLong_AsUnsignedLongLong(vv); - if (PyErr_Occurred()) { - longlong new; - PyErr_Clear(); - new = PyLong_AsLongLong(vv); - if (!PyErr_Occurred() && new < 0) - ret = (ulonglong) new; - ret = NPY_MAX_ULONGLONG; - } - Py_DECREF(vv); - return ret; -} - - -static double -_getNAN(void) { -#ifdef NAN - return NAN; -#else - static double nan=0; - - if (nan == 0) { - double mul = 1e100; - double tmp = 0.0; - double pinf=0; - pinf = mul; - for (;;) { - pinf *= mul; - if (pinf == tmp) break; - tmp = pinf; - } - nan = pinf / pinf; - } - return nan; -#endif -} - -static double -MyPyFloat_AsDouble(PyObject *obj) -{ - if (obj == Py_None) return _getNAN(); - return PyFloat_AsDouble(obj); -} - - -/****************** getitem and setitem **********************/ - -/**begin repeat - -#TYP=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,LONG,UINT,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE# -#func1=PyBool_FromLong, PyInt_FromLong*6, PyLong_FromUnsignedLong*2, PyLong_FromLongLong, PyLong_FromUnsignedLongLong, PyFloat_FromDouble*2# -#func2=PyObject_IsTrue, PyInt_AsLong*6, MyPyLong_AsUnsignedLong*2, MyPyLong_AsLongLong, MyPyLong_AsUnsignedLongLong, MyPyFloat_AsDouble*2# -#typ=Bool, byte, ubyte, short, ushort, int, long, uint, ulong, longlong, ulonglong, float, double# -#typ1=long*7, ulong*2, longlong, ulonglong, float, double# -#kind=Bool, Byte, UByte, Short, UShort, Int, Long, UInt, ULong, LongLong, ULongLong, Float, Double# -*/ - -static PyObject * -@TYP@_getitem(char *ip, PyArrayObject *ap) { - @typ@ t1; - - if ((ap==NULL) || PyArray_ISBEHAVED_RO(ap)) { - t1 = *((@typ@ *)ip); - return @func1@((@typ1@)t1); - } - else { - ap->descr->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), - ap); - return @func1@((@typ1@)t1); - } -} - -static int -@TYP@_setitem(PyObject *op, char *ov, PyArrayObject *ap) { - @typ@ temp; /* ensures alignment */ - - - if (PyArray_IsScalar(op, @kind@)) { - temp = ((Py@kind@ScalarObject *)op)->obval; - } - else { - temp = (@typ@)@func2@(op); - } - if (PyErr_Occurred()) { - if (PySequence_Check(op)) { - PyErr_Clear(); - PyErr_SetString(PyExc_ValueError, "setting an array" \ - " element with a sequence."); - } - return -1; - } - if (ap == NULL || PyArray_ISBEHAVED(ap)) - *((@typ@ *)ov)=temp; - else { - ap->descr->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), - ap); - } - - return 0; -} - -/**end repeat**/ - - -/**begin repeat - -#TYP=CFLOAT,CDOUBLE# -#typ=float, double# -*/ - -static PyObject * -@TYP@_getitem(char *ip, PyArrayObject *ap) { - @typ@ t1, t2; - - if ((ap==NULL) || PyArray_ISBEHAVED_RO(ap)) { - return PyComplex_FromDoubles((double)((@typ@ *)ip)[0], - (double)((@typ@ *)ip)[1]); - } - else { - int size = sizeof(@typ@); - Bool swap = !PyArray_ISNOTSWAPPED(ap); - copy_and_swap(&t1, ip, size, 1, 0, swap); - copy_and_swap(&t2, ip+size, size, 1, 0, swap); - return PyComplex_FromDoubles((double)t1, (double)t2); - } -} -/**end repeat**/ - -/**begin repeat - -#TYP=CFLOAT, CDOUBLE, CLONGDOUBLE# -#typ=float, double, longdouble# -#kind=CFloat, CDouble, CLongDouble# -*/ -static int -@TYP@_setitem(PyObject *op, char *ov, PyArrayObject *ap) -{ - Py_complex oop; - PyObject *op2; - c@typ@ temp; - int rsize; - - if (!(PyArray_IsScalar(op, @kind@))) { - if (PyArray_Check(op) && (PyArray_NDIM(op)==0)) { - op2 = ((PyArrayObject *)op)->descr->f->getitem \ - (((PyArrayObject *)op)->data, - (PyArrayObject *)op); - } - else { - op2 = op; Py_INCREF(op); - } - if (op2 == Py_None) { - oop.real = oop.imag = _getNAN(); - } - else { - oop = PyComplex_AsCComplex (op2); - } - Py_DECREF(op2); - if (PyErr_Occurred()) return -1; - temp.real = (@typ@) oop.real; - temp.imag = (@typ@) oop.imag; - } - else { - temp = ((Py@kind@ScalarObject *)op)->obval; - } - - memcpy(ov, &temp, ap->descr->elsize); - if (!PyArray_ISNOTSWAPPED(ap)) - byte_swap_vector(ov, 2, sizeof(@typ@)); - - rsize = sizeof(@typ@); - copy_and_swap(ov, &temp, rsize, 2, rsize, !PyArray_ISNOTSWAPPED(ap)); - return 0; -} -/**end repeat**/ - -static PyObject * -LONGDOUBLE_getitem(char *ip, PyArrayObject *ap) -{ - return PyArray_Scalar(ip, ap->descr, NULL); -} - -static int -LONGDOUBLE_setitem(PyObject *op, char *ov, PyArrayObject *ap) { - longdouble temp; /* ensures alignment */ - - if (PyArray_IsScalar(op, LongDouble)) { - temp = ((PyLongDoubleScalarObject *)op)->obval; - } - else { - temp = (longdouble) MyPyFloat_AsDouble(op); - } - if (PyErr_Occurred()) return -1; - if (ap == NULL || PyArray_ISBEHAVED(ap)) - *((longdouble *)ov)=temp; - else { - copy_and_swap(ov, &temp, ap->descr->elsize, 1, 0, - !PyArray_ISNOTSWAPPED(ap)); - } - return 0; -} - -static PyObject * -CLONGDOUBLE_getitem(char *ip, PyArrayObject *ap) -{ - return PyArray_Scalar(ip, ap->descr, NULL); -} - - -/* UNICODE */ -static PyObject * -UNICODE_getitem(char *ip, PyArrayObject *ap) -{ - PyObject *obj; - int mysize; - PyArray_UCS4 *dptr; - char *buffer; - int alloc=0; - - mysize = ap->descr->elsize >> 2; - dptr = (PyArray_UCS4 *)ip + mysize-1; - while(mysize > 0 && *dptr-- == 0) mysize--; - if (!PyArray_ISBEHAVED(ap)) { - buffer = _pya_malloc(mysize << 2); - if (buffer == NULL) - return PyErr_NoMemory(); - alloc = 1; - memcpy(buffer, ip, mysize << 2); - if (!PyArray_ISNOTSWAPPED(ap)) { - byte_swap_vector(buffer, mysize, 4); - } - } - else buffer = ip; -#ifdef Py_UNICODE_WIDE - obj = PyUnicode_FromUnicode((const Py_UNICODE *)buffer, mysize); -#else - /* create new empty unicode object of length mysize*2 */ - obj = MyPyUnicode_New(mysize*2); - if (obj == NULL) {if (alloc) _pya_free(buffer); return obj;} - mysize = PyUCS2Buffer_FromUCS4(((PyUnicodeObject *)obj)->str, - (PyArray_UCS4 *)buffer, mysize); - /* reset length of unicode object to ucs2size */ - if (MyPyUnicode_Resize((PyUnicodeObject *)obj, mysize) < 0) { - if (alloc) _pya_free(buffer); - Py_DECREF(obj); - return NULL; - } -#endif - if (alloc) _pya_free(buffer); - - return obj; -} - -static int -UNICODE_setitem(PyObject *op, char *ov, PyArrayObject *ap) -{ - PyObject *temp; - Py_UNICODE *ptr; - int datalen; -#ifndef Py_UNICODE_WIDE - char *buffer; -#endif - - if (!PyString_Check(op) && !PyUnicode_Check(op) && - PySequence_Check(op) && PySequence_Size(op) > 0) { - PyErr_SetString(PyExc_ValueError, - "setting an array element with a sequence"); - return -1; - } - /* Sequence_Size might have returned an error */ - if (PyErr_Occurred()) PyErr_Clear(); - if ((temp=PyObject_Unicode(op)) == NULL) return -1; - ptr = PyUnicode_AS_UNICODE(temp); - if ((ptr == NULL) || (PyErr_Occurred())) { - Py_DECREF(temp); - return -1; - } - datalen = PyUnicode_GET_DATA_SIZE(temp); - -#ifdef Py_UNICODE_WIDE - memcpy(ov, ptr, MIN(ap->descr->elsize, datalen)); -#else - if (!PyArray_ISALIGNED(ap)) { - buffer = _pya_malloc(ap->descr->elsize); - if (buffer == NULL) { - Py_DECREF(temp); - PyErr_NoMemory(); - return -1; - } - } - else buffer = ov; - datalen = PyUCS2Buffer_AsUCS4(ptr, (PyArray_UCS4 *)buffer, - datalen >> 1, - ap->descr->elsize >> 2); - datalen <<= 2; - if (!PyArray_ISALIGNED(ap)) { - memcpy(ov, buffer, datalen); - _pya_free(buffer); - } -#endif - /* Fill in the rest of the space with 0 */ - if (ap->descr->elsize > datalen) { - memset(ov + datalen, 0, (ap->descr->elsize - datalen)); - } - - if (!PyArray_ISNOTSWAPPED(ap)) - byte_swap_vector(ov, ap->descr->elsize >> 2, 4); - Py_DECREF(temp); - return 0; -} - -/* STRING -- can handle both NULL-terminated and not NULL-terminated cases - will truncate all ending NULLs in returned string. -*/ -static PyObject * -STRING_getitem(char *ip, PyArrayObject *ap) -{ - /* Will eliminate NULLs at the end */ - char *ptr; - int size = ap->descr->elsize; - - ptr = ip + size-1; - while (*ptr-- == '\0' && size > 0) size--; - return PyString_FromStringAndSize(ip,size); -} - -static int -STRING_setitem(PyObject *op, char *ov, PyArrayObject *ap) -{ - char *ptr; - Py_ssize_t len; - PyObject *temp=NULL; - - if (!PyString_Check(op) && !PyUnicode_Check(op) && - PySequence_Check(op) && PySequence_Size(op) > 0) { - PyErr_SetString(PyExc_ValueError, - "setting an array element with a sequence"); - return -1; - } - /* Sequence_Size might have returned an error */ - if (PyErr_Occurred()) PyErr_Clear(); - if ((temp = PyObject_Str(op)) == NULL) return -1; - - if (PyString_AsStringAndSize(temp, &ptr, &len) == -1) { - Py_DECREF(temp); - return -1; - } - memcpy(ov, ptr, MIN(ap->descr->elsize,len)); - /* If string lenth is smaller than room in array - Then fill the rest of the element size - with NULL */ - if (ap->descr->elsize > len) { - memset(ov + len, 0, (ap->descr->elsize - len)); - } - Py_DECREF(temp); - return 0; -} - -/* OBJECT */ - -static PyObject * -OBJECT_getitem(char *ip, PyArrayObject *ap) -{ - if (*(PyObject **)ip == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - if (!ap || PyArray_ISALIGNED(ap)) { - Py_INCREF(*(PyObject **)ip); - return *(PyObject **)ip; - } - else { - PyObject **obj; - obj = (PyObject **)ip; - Py_INCREF(*obj); - return *obj; - } -} - - -static int -OBJECT_setitem(PyObject *op, char *ov, PyArrayObject *ap) -{ - Py_INCREF(op); - if (!ap || PyArray_ISALIGNED(ap)) { - Py_XDECREF(*(PyObject **)ov); - *(PyObject **)ov = op; - } - else { - PyObject **obj; - obj = (PyObject **)ov; - Py_XDECREF(*obj); - memcpy(ov, &op, sizeof(PyObject *)); - } - return PyErr_Occurred() ? -1:0; -} - -/* VOID */ - -static PyObject * -VOID_getitem(char *ip, PyArrayObject *ap) -{ - PyObject *u=NULL; - PyArray_Descr* descr; - int itemsize; - - descr = ap->descr; - if (descr->names) { - PyObject *key; - PyObject *names; - int i, n; - PyObject *ret; - PyObject *tup, *title; - PyArray_Descr *new; - int offset; - int savedflags; - - /* get the names from the fields dictionary*/ - names = descr->names; - if (!names) goto finish; - n = PyTuple_GET_SIZE(names); - ret = PyTuple_New(n); - savedflags = ap->flags; - for (i=0; ifields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, - &title)) { - Py_DECREF(ret); - ap->descr = descr; - return NULL; - } - ap->descr = new; - /* update alignment based on offset */ - if ((new->alignment > 1) && \ - ((((intp)(ip+offset)) % new->alignment) != 0)) - ap->flags &= ~ALIGNED; - else - ap->flags |= ALIGNED; - - PyTuple_SET_ITEM(ret, i, \ - new->f->getitem(ip+offset, ap)); - ap->flags = savedflags; - } - ap->descr = descr; - return ret; - } - - if (descr->subarray) { - /* return an array of the basic type */ - PyArray_Dims shape={NULL,-1}; - PyObject *ret; - if (!(PyArray_IntpConverter(descr->subarray->shape, - &shape))) { - PyDimMem_FREE(shape.ptr); - PyErr_SetString(PyExc_ValueError, - "invalid shape in fixed-type tuple."); - return NULL; - } - Py_INCREF(descr->subarray->base); - ret = PyArray_NewFromDescr(&PyArray_Type, - descr->subarray->base, - shape.len, shape.ptr, - NULL, ip, ap->flags, NULL); - PyDimMem_FREE(shape.ptr); - if (!ret) return NULL; - PyArray_BASE(ret) = (PyObject *)ap; - Py_INCREF(ap); - PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); - return ret; - } - - finish: - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || - PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { - PyErr_SetString(PyExc_ValueError, - "tried to get void-array with object" - " members as buffer."); - return NULL; - } - - itemsize=ap->descr->elsize; - if (PyArray_ISWRITEABLE(ap)) - u = PyBuffer_FromReadWriteMemory(ip, itemsize); - else - u = PyBuffer_FromMemory(ip, itemsize); - if (u==NULL) goto fail; - - /* default is to return buffer object pointing to current item */ - /* a view of it */ - return u; - - fail: - return NULL; -} - - - -static int PyArray_CopyObject(PyArrayObject *, PyObject *); - -static int -VOID_setitem(PyObject *op, char *ip, PyArrayObject *ap) -{ - PyArray_Descr* descr; - int itemsize=ap->descr->elsize; - int res; - - descr = ap->descr; - if (descr->names && PyTuple_Check(op)) { - PyObject *key; - PyObject *names; - int i, n; - PyObject *tup, *title; - PyArray_Descr *new; - int offset; - int savedflags; - res = -1; - /* get the names from the fields dictionary*/ - names = descr->names; - n = PyTuple_GET_SIZE(names); - if (PyTuple_GET_SIZE(op) != n) { - PyErr_SetString(PyExc_ValueError, - "size of tuple must match "\ - "number of fields."); - return -1; - } - savedflags = ap->flags; - for (i=0; ifields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, - &title)) { - ap->descr = descr; - return -1; - } - ap->descr = new; - /* remember to update alignment flags */ - if ((new->alignment > 1) && \ - ((((intp)(ip+offset)) % new->alignment) != 0)) - ap->flags &= ~ALIGNED; - else - ap->flags |= ALIGNED; - - res = new->f->setitem(PyTuple_GET_ITEM(op, i), - ip+offset, ap); - ap->flags = savedflags; - if (res < 0) break; - } - ap->descr = descr; - return res; - } - - if (descr->subarray) { - /* copy into an array of the same basic type */ - PyArray_Dims shape={NULL,-1}; - PyObject *ret; - if (!(PyArray_IntpConverter(descr->subarray->shape, - &shape))) { - PyDimMem_FREE(shape.ptr); - PyErr_SetString(PyExc_ValueError, - "invalid shape in fixed-type tuple."); - return -1; - } - Py_INCREF(descr->subarray->base); - ret = PyArray_NewFromDescr(&PyArray_Type, - descr->subarray->base, - shape.len, shape.ptr, - NULL, ip, ap->flags, NULL); - PyDimMem_FREE(shape.ptr); - if (!ret) return -1; - PyArray_BASE(ret) = (PyObject *)ap; - Py_INCREF(ap); - PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); - res = PyArray_CopyObject((PyArrayObject *)ret, op); - Py_DECREF(ret); - return res; - } - - /* Default is to use buffer interface to set item */ - { - const void *buffer; - Py_ssize_t buflen; - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || - PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { - PyErr_SetString(PyExc_ValueError, - "tried to set void-array with object" - " members using buffer."); - return -1; - } - res = PyObject_AsReadBuffer(op, &buffer, &buflen); - if (res == -1) goto fail; - memcpy(ip, buffer, NPY_MIN(buflen, itemsize)); - if (itemsize > buflen) { - memset(ip+buflen, 0, (itemsize-buflen)); - } - } - return 0; - - fail: - return -1; -} - - -/****************** XXX_to_YYY *******************************/ - -/* Assumes contiguous, and aligned, from and to */ - - -/**begin repeat -#to=(BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*16# -#from=BYTE*13,UBYTE*13,SHORT*13,USHORT*13,INT*13,UINT*13,LONG*13,ULONG*13,LONGLONG*13,ULONGLONG*13,FLOAT*13,DOUBLE*13,LONGDOUBLE*13,CFLOAT*13,CDOUBLE*13,CLONGDOUBLE*13# -#totyp=(byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*16# -#fromtyp=byte*13, ubyte*13, short*13, ushort*13, int*13, uint*13, long*13, ulong*13, longlong*13, ulonglong*13, float*13, double*13, longdouble*13, float*13, double*13, longdouble*13# -#incr= ip++*169,ip+=2*39# -*/ -static void -@from@_to_@to@(register @fromtyp@ *ip, register @totyp@ *op, register intp n, - PyArrayObject *aip, PyArrayObject *aop) -{ - while (n--) { - *op++ = (@totyp@)*ip; - @incr@; - } -} -/**end repeat**/ - -/**begin repeat -#from=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#fromtyp=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# -*/ -static void -@from@_to_BOOL(register @fromtyp@ *ip, register Bool *op, register intp n, - PyArrayObject *aip, PyArrayObject *aop) -{ - while (n--) { - *op++ = (Bool)(*ip++ != FALSE); - } -} -/**end repeat**/ - -/**begin repeat -#from=CFLOAT, CDOUBLE, CLONGDOUBLE# -#fromtyp=cfloat, cdouble, clongdouble# -*/ -static void -@from@_to_BOOL(register @fromtyp@ *ip, register Bool *op, register intp n, - PyArrayObject *aip, PyArrayObject *aop) -{ - while (n--) { - *op = (Bool)(((*ip).real != FALSE) || ((*ip).imag != FALSE)); - op++; ip++; - } -} -/**end repeat**/ - -/**begin repeat -#to=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#totyp=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# -*/ -static void -BOOL_to_@to@(register Bool *ip, register @totyp@ *op, register intp n, - PyArrayObject *aip, PyArrayObject *aop) -{ - while (n--) { - *op++ = (@totyp@)(*ip++ != FALSE); - } -} -/**end repeat**/ - -/**begin repeat - -#to=(CFLOAT,CDOUBLE,CLONGDOUBLE)*14# -#from=BOOL*3,BYTE*3,UBYTE*3,SHORT*3,USHORT*3,INT*3,UINT*3,LONG*3,ULONG*3,LONGLONG*3,ULONGLONG*3,FLOAT*3,DOUBLE*3,LONGDOUBLE*3# -#fromtyp=Bool*3,byte*3, ubyte*3, short*3, ushort*3, int*3, uint*3, long*3, ulong*3, longlong*3, ulonglong*3, float*3, double*3, longdouble*3# -#totyp= (float, double, longdouble)*14# -*/ -static void -@from@_to_@to@(register @fromtyp@ *ip, register @totyp@ *op, register intp n, - PyArrayObject *aip, PyArrayObject *aop) -{ - while (n--) { - *op++ = (@totyp@)*ip++; - *op++ = 0.0; - } - -} -/**end repeat**/ - -/**begin repeat - -#to=(CFLOAT,CDOUBLE,CLONGDOUBLE)*3# -#from=CFLOAT*3,CDOUBLE*3,CLONGDOUBLE*3# -#totyp=(float, double, longdouble)*3# -#fromtyp=float*3, double*3, longdouble*3# -*/ -static void -@from@_to_@to@(register @fromtyp@ *ip, register @totyp@ *op, register intp n, - PyArrayObject *aip, PyArrayObject *aop) -{ - n <<= 1; - while (n--) { - *op++ = (@totyp@)*ip++; - } - -} -/**end repeat**/ - -/**begin repeat - -#from=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE, STRING, UNICODE, VOID, OBJECT# -#fromtyp=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, char, char, char, PyObject *# -#skip= 1*17, aip->descr->elsize*3, 1# -*/ -static void -@from@_to_OBJECT(@fromtyp@ *ip, PyObject **op, intp n, PyArrayObject *aip, - PyArrayObject *aop) -{ - register intp i; - int skip=@skip@; - for(i=0;idescr->elsize*3# -*/ -static void -OBJECT_to_@to@(PyObject **ip, @totyp@ *op, intp n, PyArrayObject *aip, - PyArrayObject *aop) -{ - register intp i; - int skip=@skip@; - for(i=0;idescr->elsize*3)*3# -#convert=1*17,0*3,1*17,0*3,0*20# -#convstr=(Int*9,Long*2,Float*3,Complex*3,Tuple*3)*3# -*/ -static void -@from@_to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip, - PyArrayObject *aop) -{ - register intp i; - PyObject *temp=NULL; - int skip=aip->descr->elsize; - int oskip=@oskip@; - for(i=0; idescr->elsize; - for(i=0; i= 0x02040000) || defined(PyOS_ascii_strtod) -static int -@fname@_fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *ignore) -{ - double result; - - result = PyOS_ascii_strtod(str, endptr); - *ip = (@type@) result; - return 0; -} -#else -#define @fname@_fromstr NULL -#endif -/**end repeat**/ - - - -/**begin repeat -#fname=BOOL,CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT,STRING,UNICODE,VOID# -*/ -#define @fname@_fromstr NULL -/**end repeat**/ - - -/****************** copyswapn *************************************/ - -/**begin repeat - -#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#fsize=SHORT,SHORT,INT,INT,LONG,LONG,LONGLONG,LONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#type=short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble# -*/ -static void -@fname@_copyswapn (void *dst, intp dstride, void *src, intp sstride, - intp n, int swap, void *arr) -{ - if (src != NULL) { - if (sstride == sizeof(@type@) && dstride == sizeof(@type@)) { - memcpy(dst, src, n*sizeof(@type@)); - } - else { - _unaligned_strided_byte_copy(dst, dstride, src, sstride, - n, sizeof(@type@)); - } - } - if (swap) { - _strided_byte_swap(dst, dstride, n, sizeof(@type@)); - } -} - -static void -@fname@_copyswap (void *dst, void *src, int swap, void *arr) -{ - - if (src != NULL) /* copy first if needed */ - memcpy(dst, src, sizeof(@type@)); - - if (swap) { - register char *a, *b, c; - a = (char *)dst; -#if SIZEOF_@fsize@ == 2 - b = a + 1; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 4 - b = a + 3; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 8 - b = a + 7; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 10 - b = a + 9; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 12 - b = a + 11; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 16 - b = a + 15; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#else - { - register int i, nn; - b = a + (SIZEOF_@fsize@-1); - nn = SIZEOF_@fsize@ / 2; - for (i=0; idescr->elsize; - if (dstride == itemsize && sstride == itemsize) { - memcpy(dst, src, itemsize * n); - } - else { - _unaligned_strided_byte_copy(dst, dstride, src, sstride, n, itemsize); - } - } - return; -} - -/* */ -static void -VOID_copyswapn (char *dst, intp dstride, char *src, intp sstride, - intp n, int swap, PyArrayObject *arr) -{ - if (arr == NULL) return; - if (PyArray_HASFIELDS(arr)) { - PyObject *key, *value, *title=NULL; - PyArray_Descr *new, *descr; - int offset; - Py_ssize_t pos=0; - descr = arr->descr; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) { - arr->descr=descr;return; - } - arr->descr = new; - new->f->copyswapn(dst+offset, dstride, - (src != NULL ? src+offset : NULL), - sstride, n, swap, arr); - } - arr->descr = descr; - return; - } - if (swap && arr->descr->subarray != NULL) { - PyArray_Descr *descr, *new; - npy_intp num; - npy_intp i; - int subitemsize; - char *dstptr, *srcptr; - descr = arr->descr; - new = descr->subarray->base; - arr->descr = new; - dstptr = dst; - srcptr = src; - subitemsize = new->elsize; - num = descr->elsize / subitemsize; - for (i=0; if->copyswapn(dstptr, subitemsize, srcptr, - subitemsize, num, swap, arr); - dstptr += dstride; - if (srcptr) srcptr += sstride; - } - arr->descr = descr; - return; - } - if (src != NULL) { - memcpy(dst, src, arr->descr->elsize * n); - } - return; -} - -static void -VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) -{ - if (arr==NULL) return; - if (PyArray_HASFIELDS(arr)) { - PyObject *key, *value, *title=NULL; - PyArray_Descr *new, *descr; - int offset; - Py_ssize_t pos=0; - descr = arr->descr; /* Save it */ - while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) { - arr->descr=descr;return; - } - arr->descr = new; - new->f->copyswap(dst+offset, - (src != NULL ? src+offset : NULL), - swap, arr); - } - arr->descr = descr; - return; - } - if (swap && arr->descr->subarray != NULL) { - PyArray_Descr *descr, *new; - npy_intp num; - int itemsize; - descr = arr->descr; - new = descr->subarray->base; - arr->descr = new; - itemsize = new->elsize; - num = descr->elsize / itemsize; - new->f->copyswapn(dst, itemsize, src, - itemsize, num, swap, arr); - arr->descr = descr; - return; - } - if (src != NULL) { - memcpy(dst, src, arr->descr->elsize); - } - return; -} - - -static void -UNICODE_copyswapn (char *dst, intp dstride, char *src, intp sstride, - intp n, int swap, PyArrayObject *arr) -{ - int itemsize; - if (arr==NULL) return; - itemsize = arr->descr->elsize; - if (src != NULL) { - if (dstride == itemsize && sstride == itemsize) - memcpy(dst, src, n * itemsize); - else - _unaligned_strided_byte_copy(dst, dstride, src, - sstride, n, itemsize); - } - - n *= itemsize; - if (swap) { - register char *a, *b, c; - n >>= 2; /* n is the number of unicode characters to swap */ - for (a = (char *)dst; n>0; n--) { - b = a + 3; - c=*a; *a++ = *b; *b-- = c; - c=*a; *a++ = *b; *b-- = c; - a += 2; - } - } -} - - -static void -STRING_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) -{ - if (src != NULL && arr != NULL) { - memcpy(dst, src, arr->descr->elsize); - } -} - -static void -UNICODE_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) -{ - int itemsize; - if (arr == NULL) return; - itemsize = arr->descr->elsize; - if (src != NULL) { - memcpy(dst, src, itemsize); - } - - if (swap) { - register char *a, *b, c; - itemsize >>= 2; - for (a = (char *)dst; itemsize>0; itemsize--) { - b = a + 3; - c=*a; *a++ = *b; *b-- = c; - c=*a; *a++ = *b; *b-- = c; - a += 2; - } - } -} - - -/****************** nonzero **********************************/ - -/**begin repeat -#fname=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#type=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# -*/ -static Bool -@fname@_nonzero (@type@ *ip, PyArrayObject *ap) -{ - @type@ t1; - if (ap==NULL || PyArray_ISBEHAVED_RO(ap)) - return (Bool) (*ip != 0); - else { - /* don't worry about swap, since we are just testing - whether or not equal to 0 */ - memcpy(&t1, ip, sizeof(@type@)); - return (Bool) (t1 != 0); - } -} -/**end repeat**/ - -/**begin repeat -#fname=CFLOAT,CDOUBLE,CLONGDOUBLE# -#type=cfloat, cdouble, clongdouble# -*/ -static Bool -@fname@_nonzero (@type@ *ip, PyArrayObject *ap) -{ - @type@ t1; - if (ap==NULL || PyArray_ISBEHAVED_RO(ap)) - return (Bool) ((ip->real != 0) || (ip->imag != 0)); - else { - /* don't worry about swap, since we are just testing - whether or not equal to 0 */ - memcpy(&t1, ip, sizeof(@type@)); - return (Bool) ((t1.real != 0) || (t1.imag != 0)); - } -} -/**end repeat**/ - - -#define WHITESPACE " \t\n\r\v\f" -#define WHITELEN 6 - -static Bool -Py_STRING_ISSPACE(char ch) -{ - char white[] = WHITESPACE; - int j; - Bool space=FALSE; - for (j=0; jdescr->elsize; - int i; - Bool nonz = FALSE; - - for (i=0; idescr->elsize >> 2; - int i; - Bool nonz = FALSE; - char *buffer=NULL; - - if ((!PyArray_ISNOTSWAPPED(ap)) || \ - (!PyArray_ISALIGNED(ap))) { - buffer = _pya_malloc(ap->descr->elsize); - if (buffer == NULL) { - return nonz; - } - memcpy(buffer, ip, ap->descr->elsize); - if (!PyArray_ISNOTSWAPPED(ap)) { - byte_swap_vector(buffer, len, 4); - } - ip = (PyArray_UCS4 *)buffer; - } - - for (i=0; idescr; - savedflags = ap->flags; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) {PyErr_Clear(); continue;} - ap->descr = new; - ap->flags = savedflags; - if ((new->alignment > 1) && !__ALIGNED(ip+offset, new->alignment)) - ap->flags &= ~ALIGNED; - else - ap->flags |= ALIGNED; - if (new->f->nonzero(ip+offset, ap)) { - nonz=TRUE; - break; - } - } - ap->descr = descr; - ap->flags = savedflags; - return nonz; - } - len = ap->descr->elsize; - for (i=0; idescr->elsize); -} - -/* taken from Python */ -static int -UNICODE_compare(register PyArray_UCS4 *ip1, register PyArray_UCS4 *ip2, - PyArrayObject *ap) -{ - register int itemsize=ap->descr->elsize; - register PyArray_UCS4 c1, c2; - - if (itemsize < 0) return 0; - - while(itemsize-- > 0) { - c1 = *ip1++; - c2 = *ip2++; - - if (c1 != c2) - return (c1 < c2) ? -1 : 1; - } - return 0; -} - -/* If fields are defined, then compare on first field and if equal - compare on second field. Continue until done or comparison results - in not_equal. - - Must align data passed on to sub-comparisons. -*/ - -static int -VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) -{ - PyArray_Descr *descr, *new; - PyObject *names, *key; - PyObject *tup, *title; - char *nip1, *nip2; - int i, offset, res=0; - - if (!PyArray_HASFIELDS(ap)) - return STRING_compare(ip1, ip2, ap); - - descr = ap->descr; - /* Compare on the first-field. If equal, then - compare on the second-field, etc. - */ - names = descr->names; - for (i=0; ifields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, - &title)) { - goto finish; - } - ap->descr = new; - nip1 = ip1+offset; - nip2 = ip2+offset; - if (new->alignment > 1) { - if (((intp)(nip1) % new->alignment) != 0) { - /* create buffer and copy */ - nip1 = _pya_malloc(new->elsize); - if (nip1 == NULL) goto finish; - memcpy(nip1, ip1+offset, new->elsize); - } - if (((intp)(nip2) % new->alignment) != 0) { - /* copy data to a buffer */ - nip2 = _pya_malloc(new->elsize); - if (nip2 == NULL) { - if (nip1 != ip1+offset) - _pya_free(nip1); - goto finish; - } - memcpy(nip2, ip2+offset, new->elsize); - } - } - res = new->f->compare(nip1, nip2, ap); - if (new->alignment > 1) { - if (nip1 != ip1+offset) { - _pya_free(nip1); - } - if (nip2 != ip2+offset) { - _pya_free(nip2); - } - } - if (res != 0) break; - } - - finish: - ap->descr = descr; - return res; -} - -/****************** argfunc **********************************/ - -/**begin repeat - -#fname= BOOL,BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE# -#type= Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, float, double, longdouble# -#incr= ip++*14, ip+=2*3# -*/ - -static int -@fname@_argmax(@type@ *ip, intp n, intp *max_ind, PyArrayObject *aip) -{ - register intp i; - @type@ mp=*ip; - *max_ind=0; - for (i=1; i mp) { - mp = *ip; - *max_ind = i; - } - } - return 0; -} - -/**end repeat**/ - -static int -OBJECT_argmax(PyObject **ip, intp n, intp *max_ind, PyArrayObject *aip) -{ - register intp i; - PyObject *mp=ip[0]; *max_ind=0; - i = 1; - while(i 0) { - mp = *ip; - *max_ind=i; - } - } - return 0; -} - -/**begin repeat - -#fname= STRING, UNICODE# -#type= char, PyArray_UCS4# - -*/ -static int -@fname@_argmax(@type@ *ip, intp n, intp *max_ind, PyArrayObject *aip) -{ - register intp i; - int elsize = aip->descr->elsize; - @type@ *mp = (@type@ *)_pya_malloc(elsize); - - if (mp==NULL) return 0; - memcpy(mp, ip, elsize); - *max_ind = 0; - for(i=1; i 0) { - memcpy(mp, ip, elsize); - *max_ind=i; - } - } - _pya_free(mp); - return 0; -} - -/**end repeat**/ - -#define VOID_argmax NULL - -static void -BOOL_dot(char *ip1, intp is1, char *ip2, intp is2, char *op, intp n, - void *ignore) -{ - register Bool tmp=FALSE; - register intp i; - for(i=0;ireal; - start.imag = buffer->imag; - delta.real = buffer[1].real; - delta.imag = buffer[1].imag; - delta.real -= start.real; - delta.imag -= start.imag; - buffer += 2; - for (i=2; ireal = start.real + i*delta.real; - buffer->imag = start.imag + i*delta.imag; - } -} -/**end repeat**/ - - -/* this requires buffer to be filled with objects or NULL */ -static void -OBJECT_fillwithscalar(PyObject **buffer, intp length, PyObject **value, void *ignored) -{ - intp i; - PyObject *val = *value; - for (i=0; i max_val) { - out[i] = max_val; - } - } - return; - } - - for (i = 0; i < ni; i++) { - if (in[i] < min_val) { - out[i] = min_val; - } else if (in[i] > max_val) { - out[i] = max_val; - } - } - - return; -} -/**end repeat**/ - -/**begin repeat -#name=CFLOAT, CDOUBLE, CLONGDOUBLE# -#type= cfloat, cdouble, clongdouble# -*/ -static void -@name@_fastclip(@type@ *in, intp ni, @type@ *min, @type@ *max, @type@ *out) -{ - register npy_intp i; - @type@ max_val, min_val; - - min_val = *min; - max_val = *max; - - if (max != NULL) - max_val = *max; - if (min != NULL) - min_val = *min; - - if (max == NULL) { - for (i = 0; i < ni; i++) { - if (PyArray_CLT(in[i],min_val)) { - out[i] = min_val; - } - } - return; - } - - if (min == NULL) { - for (i = 0; i < ni; i++) { - if (PyArray_CGT(in[i], max_val)) { - out[i] = max_val; - } - } - return; - } - - for (i = 0; i < ni; i++) { - if (PyArray_CLT(in[i], min_val)) { - out[i] = min_val; - } else if (PyArray_CGT(in[i], max_val)) { - out[i] = max_val; - } - } - return; -} - -/**end repeat**/ - -#define OBJECT_fastclip NULL - -/************************ - * Fast putmask functions - *************************/ - -/**begin repeat -#name=BOOL,BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE,CFLOAT, CDOUBLE, CLONGDOUBLE# -#type= Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble,cfloat, cdouble, clongdouble# -*/ -static void -@name@_fastputmask(@type@ *in, Bool *mask, intp ni, @type@ *vals, intp nv) -{ - register npy_intp i; - @type@ s_val; - - if (nv == 1) { - s_val = *vals; - for (i = 0; i < ni; i++) { - if (mask[i]) { - in[i] = s_val; - } - } - } - else { - for (i = 0; i < ni; i++) { - if (mask[i]) { - in[i] = vals[i%nv]; - } - } - } - return; -} -/**end repeat**/ - -#define OBJECT_fastputmask NULL - - -#define _ALIGN(type) offsetof(struct {char c; type v;},v) - -/* Disable harmless compiler warning "4116: unnamed type definition in - parentheses" which is caused by the _ALIGN macro. */ - -#if defined(_MSC_VER) -#pragma warning(disable:4116) -#endif - - -/**begin repeat - -#from= VOID, STRING, UNICODE# -#align= char, char, PyArray_UCS4# -#NAME= Void, String, Unicode# -#endian= |, |, =# -*/ - -static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { - { - (PyArray_VectorUnaryFunc*)@from@_to_BOOL, - (PyArray_VectorUnaryFunc*)@from@_to_BYTE, - (PyArray_VectorUnaryFunc*)@from@_to_UBYTE, - (PyArray_VectorUnaryFunc*)@from@_to_SHORT, - (PyArray_VectorUnaryFunc*)@from@_to_USHORT, - (PyArray_VectorUnaryFunc*)@from@_to_INT, - (PyArray_VectorUnaryFunc*)@from@_to_UINT, - (PyArray_VectorUnaryFunc*)@from@_to_LONG, - (PyArray_VectorUnaryFunc*)@from@_to_ULONG, - (PyArray_VectorUnaryFunc*)@from@_to_LONGLONG, - (PyArray_VectorUnaryFunc*)@from@_to_ULONGLONG, - (PyArray_VectorUnaryFunc*)@from@_to_FLOAT, - (PyArray_VectorUnaryFunc*)@from@_to_DOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_LONGDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_CFLOAT, - (PyArray_VectorUnaryFunc*)@from@_to_CDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_CLONGDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_OBJECT, - (PyArray_VectorUnaryFunc*)@from@_to_STRING, - (PyArray_VectorUnaryFunc*)@from@_to_UNICODE, - (PyArray_VectorUnaryFunc*)@from@_to_VOID - }, - (PyArray_GetItemFunc*)@from@_getitem, - (PyArray_SetItemFunc*)@from@_setitem, - (PyArray_CopySwapNFunc*)@from@_copyswapn, - (PyArray_CopySwapFunc*)@from@_copyswap, - (PyArray_CompareFunc*)@from@_compare, - (PyArray_ArgFunc*)@from@_argmax, - (PyArray_DotFunc*)NULL, - (PyArray_ScanFunc*)@from@_scan, - (PyArray_FromStrFunc*)@from@_fromstr, - (PyArray_NonzeroFunc*)@from@_nonzero, - (PyArray_FillFunc*)NULL, - (PyArray_FillWithScalarFunc*)NULL, - { - NULL, NULL, NULL - }, - { - NULL, NULL, NULL - }, - NULL, - (PyArray_ScalarKindFunc*)NULL, - NULL, - NULL, - (PyArray_FastClipFunc *)NULL, - (PyArray_FastPutmaskFunc *)NULL -}; - -static PyArray_Descr @from@_Descr = { - PyObject_HEAD_INIT(&PyArrayDescr_Type) - &Py@NAME@ArrType_Type, - PyArray_@from@LTR, - PyArray_@from@LTR, - '@endian@', 0, - PyArray_@from@, 0, - _ALIGN(@align@), - NULL, - NULL, - NULL, - &_Py@NAME@_ArrFuncs, -}; - -/**end repeat**/ - - -/**begin repeat - -#from= BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT# -#num= 1*14,2*3,1# -#fromtyp= Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, float, double, longdouble, PyObject *# -#NAME= Bool, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble, Object# -#kind= GENBOOL, SIGNED, UNSIGNED, SIGNED, UNSIGNED, SIGNED, UNSIGNED, SIGNED, UNSIGNED, SIGNED, UNSIGNED, FLOATING, FLOATING, FLOATING, COMPLEX, COMPLEX, COMPLEX, OBJECT# -#endian= |*3, =*14, |# -#isobject= 0*17,NPY_OBJECT_DTYPE_FLAGS# -*/ - -static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { - { - (PyArray_VectorUnaryFunc*)@from@_to_BOOL, - (PyArray_VectorUnaryFunc*)@from@_to_BYTE, - (PyArray_VectorUnaryFunc*)@from@_to_UBYTE, - (PyArray_VectorUnaryFunc*)@from@_to_SHORT, - (PyArray_VectorUnaryFunc*)@from@_to_USHORT, - (PyArray_VectorUnaryFunc*)@from@_to_INT, - (PyArray_VectorUnaryFunc*)@from@_to_UINT, - (PyArray_VectorUnaryFunc*)@from@_to_LONG, - (PyArray_VectorUnaryFunc*)@from@_to_ULONG, - (PyArray_VectorUnaryFunc*)@from@_to_LONGLONG, - (PyArray_VectorUnaryFunc*)@from@_to_ULONGLONG, - (PyArray_VectorUnaryFunc*)@from@_to_FLOAT, - (PyArray_VectorUnaryFunc*)@from@_to_DOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_LONGDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_CFLOAT, - (PyArray_VectorUnaryFunc*)@from@_to_CDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_CLONGDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_OBJECT, - (PyArray_VectorUnaryFunc*)@from@_to_STRING, - (PyArray_VectorUnaryFunc*)@from@_to_UNICODE, - (PyArray_VectorUnaryFunc*)@from@_to_VOID - }, - (PyArray_GetItemFunc*)@from@_getitem, - (PyArray_SetItemFunc*)@from@_setitem, - (PyArray_CopySwapNFunc*)@from@_copyswapn, - (PyArray_CopySwapFunc*)@from@_copyswap, - (PyArray_CompareFunc*)@from@_compare, - (PyArray_ArgFunc*)@from@_argmax, - (PyArray_DotFunc*)@from@_dot, - (PyArray_ScanFunc*)@from@_scan, - (PyArray_FromStrFunc*)@from@_fromstr, - (PyArray_NonzeroFunc*)@from@_nonzero, - (PyArray_FillFunc*)@from@_fill, - (PyArray_FillWithScalarFunc*)@from@_fillwithscalar, - { - NULL, NULL, NULL - }, - { - NULL, NULL, NULL - }, - NULL, - (PyArray_ScalarKindFunc*)NULL, - NULL, - NULL, - (PyArray_FastClipFunc*)@from@_fastclip, - (PyArray_FastPutmaskFunc*)@from@_fastputmask -}; - -static PyArray_Descr @from@_Descr = { - PyObject_HEAD_INIT(&PyArrayDescr_Type) - &Py@NAME@ArrType_Type, - PyArray_@kind@LTR, - PyArray_@from@LTR, - '@endian@', @isobject@, - PyArray_@from@, - @num@*sizeof(@fromtyp@), - _ALIGN(@fromtyp@), - NULL, - NULL, - NULL, - &_Py@NAME@_ArrFuncs, -}; - -/**end repeat**/ - -#define _MAX_LETTER 128 -static char _letter_to_num[_MAX_LETTER]; - -static PyArray_Descr *_builtin_descrs[] = { - &BOOL_Descr, - &BYTE_Descr, - &UBYTE_Descr, - &SHORT_Descr, - &USHORT_Descr, - &INT_Descr, - &UINT_Descr, - &LONG_Descr, - &ULONG_Descr, - &LONGLONG_Descr, - &ULONGLONG_Descr, - &FLOAT_Descr, - &DOUBLE_Descr, - &LONGDOUBLE_Descr, - &CFLOAT_Descr, - &CDOUBLE_Descr, - &CLONGDOUBLE_Descr, - &OBJECT_Descr, - &STRING_Descr, - &UNICODE_Descr, - &VOID_Descr, -}; - -/*OBJECT_API - Get the PyArray_Descr structure for a type. -*/ -static PyArray_Descr * -PyArray_DescrFromType(int type) -{ - PyArray_Descr *ret=NULL; - - if (type < PyArray_NTYPES) { - ret = _builtin_descrs[type]; - } - else if (type == PyArray_NOTYPE) { - /* This needs to not raise an error so - that PyArray_DescrFromType(PyArray_NOTYPE) - works for backwards-compatible C-API - */ - return NULL; - } - else if ((type == PyArray_CHAR) || \ - (type == PyArray_CHARLTR)) { - ret = PyArray_DescrNew(_builtin_descrs[PyArray_STRING]); - ret->elsize = 1; - ret->type = PyArray_CHARLTR; - return ret; - } - else if PyTypeNum_ISUSERDEF(type) { - ret = userdescrs[type-PyArray_USERDEF]; - } - else { - int num=PyArray_NTYPES; - if (type < _MAX_LETTER) - num = (int) _letter_to_num[type]; - if (num >= PyArray_NTYPES) - ret = NULL; - else - ret = _builtin_descrs[num]; - } - if (ret==NULL) { - PyErr_SetString(PyExc_ValueError, - "Invalid data-type for array"); - } - else Py_INCREF(ret); - return ret; -} - - -static int -set_typeinfo(PyObject *dict) -{ - PyObject *infodict, *s; - int i; - - for (i=0; i<_MAX_LETTER; i++) { - _letter_to_num[i] = PyArray_NTYPES; - } - -/**begin repeat -#name=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,INTP,UINTP,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT,STRING,UNICODE,VOID# -*/ - _letter_to_num[PyArray_@name@LTR] = PyArray_@name@; -/**end repeat**/ - _letter_to_num[PyArray_STRINGLTR2] = PyArray_STRING; - -/**begin repeat -#name=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT,STRING,UNICODE,VOID# -*/ - @name@_Descr.fields = Py_None; -/**end repeat**/ - - /* Set a dictionary with type information */ - infodict = PyDict_New(); - if (infodict == NULL) return -1; - -#define BITSOF_INTP CHAR_BIT*SIZEOF_PY_INTPTR_T -#define BITSOF_BYTE CHAR_BIT - -/**begin repeat - -#name=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,INTP,UINTP,LONG,ULONG,LONGLONG,ULONGLONG# -#uname=BOOL,BYTE*2,SHORT*2,INT*2,INTP*2,LONG*2,LONGLONG*2# -#Name=Bool,Byte,UByte,Short,UShort,Int,UInt,Intp,UIntp,Long,ULong,LongLong,ULongLong# -#type=Bool,byte,ubyte,short,ushort,int,uint,intp,uintp,long,ulong,longlong,ulonglong# -#max=1,MAX_BYTE,MAX_UBYTE,MAX_SHORT,MAX_USHORT,MAX_INT,PyLong_FromUnsignedLong(MAX_UINT),PyLong_FromLongLong((longlong) MAX_INTP),PyLong_FromUnsignedLongLong((ulonglong) MAX_UINTP),MAX_LONG,PyLong_FromUnsignedLong((unsigned long) MAX_ULONG),PyLong_FromLongLong((longlong) MAX_LONGLONG), PyLong_FromUnsignedLongLong((ulonglong) MAX_ULONGLONG)# -#min=0,MIN_BYTE,0,MIN_SHORT,0,MIN_INT,0,PyLong_FromLongLong((longlong) MIN_INTP),0,MIN_LONG,0,PyLong_FromLongLong((longlong) MIN_LONGLONG),0# -#cx=i*6,N,N,N,l,N,N,N# -#cn=i*7,N,i,l,i,N,i# -*/ - PyDict_SetItemString(infodict, "@name@", - s=Py_BuildValue("ciii@cx@@cn@O", - PyArray_@name@LTR, - PyArray_@name@, - BITSOF_@uname@, - _ALIGN(@type@), - @max@, @min@, - (PyObject *)&Py@Name@ArrType_Type)); - Py_DECREF(s); -/**end repeat**/ - -#define BITSOF_CFLOAT 2*BITSOF_FLOAT -#define BITSOF_CDOUBLE 2*BITSOF_DOUBLE -#define BITSOF_CLONGDOUBLE 2*BITSOF_LONGDOUBLE - -/**begin repeat - -#type=float,double,longdouble,cfloat,cdouble,clongdouble# -#name=FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE# -#Name=Float,Double,LongDouble,CFloat,CDouble,CLongDouble# -*/ - PyDict_SetItemString(infodict, "@name@", - s=Py_BuildValue("ciiiO", PyArray_@name@LTR, - PyArray_@name@, BITSOF_@name@, - _ALIGN(@type@), - (PyObject *)\ - &Py@Name@ArrType_Type)); - Py_DECREF(s); -/**end repeat**/ - - PyDict_SetItemString(infodict, "OBJECT", - s=Py_BuildValue("ciiiO", PyArray_OBJECTLTR, - PyArray_OBJECT, - sizeof(PyObject *)*CHAR_BIT, - _ALIGN(PyObject *), - (PyObject *)\ - &PyObjectArrType_Type)); - Py_DECREF(s); - PyDict_SetItemString(infodict, "STRING", - s=Py_BuildValue("ciiiO", PyArray_STRINGLTR, - PyArray_STRING, 0, - _ALIGN(char), - (PyObject *)\ - &PyStringArrType_Type)); - Py_DECREF(s); - PyDict_SetItemString(infodict, "UNICODE", - s=Py_BuildValue("ciiiO", PyArray_UNICODELTR, - PyArray_UNICODE, 0, - _ALIGN(PyArray_UCS4), - (PyObject *)\ - &PyUnicodeArrType_Type)); - Py_DECREF(s); - PyDict_SetItemString(infodict, "VOID", - s=Py_BuildValue("ciiiO", PyArray_VOIDLTR, - PyArray_VOID, 0, - _ALIGN(char), - (PyObject *)\ - &PyVoidArrType_Type)); - Py_DECREF(s); - -#define SETTYPE(name) \ - Py_INCREF(&Py##name##ArrType_Type); \ - PyDict_SetItemString(infodict, #name, \ - (PyObject *)&Py##name##ArrType_Type); - - SETTYPE(Generic) - SETTYPE(Number) - SETTYPE(Integer) - SETTYPE(Inexact) - SETTYPE(SignedInteger) - SETTYPE(UnsignedInteger) - SETTYPE(Floating) - SETTYPE(ComplexFloating) - SETTYPE(Flexible) - SETTYPE(Character) - -#undef SETTYPE - - PyDict_SetItemString(dict, "typeinfo", infodict); - Py_DECREF(infodict); - return 0; -} - -#undef _MAX_LETTER diff --git a/numpy/core/src/multiarraymodule.c b/numpy/core/src/multiarraymodule.c deleted file mode 100644 index c67d1db0a..000000000 --- a/numpy/core/src/multiarraymodule.c +++ /dev/null @@ -1,7666 +0,0 @@ -/* - Python Multiarray Module -- A useful collection of functions for creating and - using ndarrays - - Original file - Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu - - Modified for numpy in 2005 - - Travis E. Oliphant - oliphant@ee.byu.edu - Brigham Young University -*/ - -/* $Id: multiarraymodule.c,v 1.36 2005/09/14 00:14:00 teoliphant Exp $ */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" - -#define PyAO PyArrayObject - - -static PyObject *typeDict=NULL; /* Must be explicitly loaded */ - -static PyArray_Descr * -_arraydescr_fromobj(PyObject *obj) -{ - PyObject *dtypedescr; - PyArray_Descr *new; - int ret; - - dtypedescr = PyObject_GetAttrString(obj, "dtype"); - PyErr_Clear(); - if (dtypedescr) { - ret = PyArray_DescrConverter(dtypedescr, &new); - Py_DECREF(dtypedescr); - if (ret == PY_SUCCEED) return new; - PyErr_Clear(); - } - /* Understand basic ctypes */ - dtypedescr = PyObject_GetAttrString(obj, "_type_"); - PyErr_Clear(); - if (dtypedescr) { - ret = PyArray_DescrConverter(dtypedescr, &new); - Py_DECREF(dtypedescr); - if (ret == PY_SUCCEED) { - PyObject *length; - length = PyObject_GetAttrString(obj, "_length_"); - PyErr_Clear(); - if (length) { /* derived type */ - PyObject *newtup; - PyArray_Descr *derived; - newtup = Py_BuildValue("NO", new, length); - ret = PyArray_DescrConverter(newtup, &derived); - Py_DECREF(newtup); - if (ret == PY_SUCCEED) return derived; - PyErr_Clear(); - return NULL; - } - return new; - } - PyErr_Clear(); - return NULL; - } - /* Understand ctypes structures -- - bit-fields are not supported - automatically aligns */ - dtypedescr = PyObject_GetAttrString(obj, "_fields_"); - PyErr_Clear(); - if (dtypedescr) { - ret = PyArray_DescrAlignConverter(dtypedescr, &new); - Py_DECREF(dtypedescr); - if (ret == PY_SUCCEED) return new; - PyErr_Clear(); - } - return NULL; -} - - -/* Including this file is the only way I know how to declare functions - static in each file, and store the pointers from functions in both - arrayobject.c and multiarraymodule.c for the C-API - - Declarying an external pointer-containing variable in arrayobject.c - and trying to copy it to PyArray_API, did not work. - - Think about two modules with a common api that import each other... - - This file would just be the module calls. -*/ - -#include "arrayobject.c" - - -/* An Error object -- rarely used? */ -static PyObject *MultiArrayError; - -/*MULTIARRAY_API - Multiply a List of ints -*/ -static int -PyArray_MultiplyIntList(register int *l1, register int n) -{ - register int s=1; - while (n--) s *= (*l1++); - return s; -} - -/*MULTIARRAY_API - Multiply a List -*/ -static intp -PyArray_MultiplyList(register intp *l1, register int n) -{ - register intp s=1; - while (n--) s *= (*l1++); - return s; -} - -/*MULTIARRAY_API - Produce a pointer into array -*/ -static void * -PyArray_GetPtr(PyArrayObject *obj, register intp* ind) -{ - register int n = obj->nd; - register intp *strides = obj->strides; - register char *dptr = obj->data; - - while (n--) dptr += (*strides++) * (*ind++); - return (void *)dptr; -} - -/*MULTIARRAY_API - Get axis from an object (possibly None) -- a converter function, -*/ -static int -PyArray_AxisConverter(PyObject *obj, int *axis) -{ - if (obj == Py_None) { - *axis = MAX_DIMS; - } - else { - *axis = (int) PyInt_AsLong(obj); - if (PyErr_Occurred()) { - return PY_FAIL; - } - } - return PY_SUCCEED; -} - -/*MULTIARRAY_API - Compare Lists -*/ -static int -PyArray_CompareLists(intp *l1, intp *l2, int n) -{ - int i; - for(i=0;iob_type; - - Py_INCREF(self->descr); - new = PyArray_NewFromDescr(subtype, - self->descr, - self->nd, self->dimensions, - self->strides, - self->data, - self->flags, (PyObject *)self); - - if (new==NULL) return NULL; - Py_INCREF(self); - PyArray_BASE(new) = (PyObject *)self; - - if (type != NULL) { - if (PyObject_SetAttrString(new, "dtype", - (PyObject *)type) < 0) { - Py_DECREF(new); - Py_DECREF(type); - return NULL; - } - Py_DECREF(type); - } - return new; -} - -/* Returns a contiguous array */ - -/*MULTIARRAY_API - Ravel -*/ -static PyObject * -PyArray_Ravel(PyArrayObject *a, NPY_ORDER fortran) -{ - PyArray_Dims newdim = {NULL,1}; - intp val[1] = {-1}; - - if (fortran == PyArray_ANYORDER) - fortran = PyArray_ISFORTRAN(a); - - newdim.ptr = val; - if (!fortran && PyArray_ISCONTIGUOUS(a)) { - return PyArray_Newshape(a, &newdim, PyArray_CORDER); - } - else if (fortran && PyArray_ISFORTRAN(a)) { - return PyArray_Newshape(a, &newdim, PyArray_FORTRANORDER); - } - else - return PyArray_Flatten(a, fortran); -} - -static double -power_of_ten(int n) -{ - static const double p10[] = {1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8}; - double ret; - if (n < 9) - ret = p10[n]; - else { - ret = 1e9; - while (n-- > 9) - ret *= 10.; - } - return ret; -} - -/*MULTIARRAY_API - Round -*/ -static PyObject * -PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) -{ - PyObject *f, *ret=NULL, *tmp, *op1, *op2; - int ret_int=0; - PyArray_Descr *my_descr; - if (out && (PyArray_SIZE(out) != PyArray_SIZE(a))) { - PyErr_SetString(PyExc_ValueError, - "invalid output shape"); - return NULL; - } - if (PyArray_ISCOMPLEX(a)) { - PyObject *part; - PyObject *round_part; - PyObject *new; - int res; - if (out) { - new = (PyObject *)out; - Py_INCREF(new); - } - else { - new = PyArray_Copy(a); - if (new == NULL) return NULL; - } - - /* new.real = a.real.round(decimals) */ - part = PyObject_GetAttrString(new, "real"); - if (part == NULL) {Py_DECREF(new); return NULL;} - part = PyArray_EnsureAnyArray(part); - round_part = PyArray_Round((PyArrayObject *)part, - decimals, NULL); - Py_DECREF(part); - if (round_part == NULL) {Py_DECREF(new); return NULL;} - res = PyObject_SetAttrString(new, "real", round_part); - Py_DECREF(round_part); - if (res < 0) {Py_DECREF(new); return NULL;} - - /* new.imag = a.imag.round(decimals) */ - part = PyObject_GetAttrString(new, "imag"); - if (part == NULL) {Py_DECREF(new); return NULL;} - part = PyArray_EnsureAnyArray(part); - round_part = PyArray_Round((PyArrayObject *)part, - decimals, NULL); - Py_DECREF(part); - if (round_part == NULL) {Py_DECREF(new); return NULL;} - res = PyObject_SetAttrString(new, "imag", round_part); - Py_DECREF(round_part); - if (res < 0) {Py_DECREF(new); return NULL;} - return new; - } - /* do the most common case first */ - if (decimals >= 0) { - if (PyArray_ISINTEGER(a)) { - if (out) { - if (PyArray_CopyAnyInto(out, a) < 0) return NULL; - Py_INCREF(out); - return (PyObject *)out; - } - else { - Py_INCREF(a); - return (PyObject *)a; - } - } - if (decimals == 0) { - if (out) { - return PyObject_CallFunction(n_ops.rint, "OO", - a, out); - } - return PyObject_CallFunction(n_ops.rint, "O", a); - } - op1 = n_ops.multiply; - op2 = n_ops.true_divide; - } - else { - op1 = n_ops.true_divide; - op2 = n_ops.multiply; - decimals = -decimals; - } - if (!out) { - if (PyArray_ISINTEGER(a)) { - ret_int = 1; - my_descr = PyArray_DescrFromType(NPY_DOUBLE); - } - else { - Py_INCREF(a->descr); - my_descr = a->descr; - } - out = (PyArrayObject *)PyArray_Empty(a->nd, a->dimensions, - my_descr, - PyArray_ISFORTRAN(a)); - if (out == NULL) return NULL; - } - else Py_INCREF(out); - f = PyFloat_FromDouble(power_of_ten(decimals)); - if (f==NULL) return NULL; - ret = PyObject_CallFunction(op1, "OOO", a, f, out); - if (ret==NULL) goto finish; - tmp = PyObject_CallFunction(n_ops.rint, "OO", ret, ret); - if (tmp == NULL) {Py_DECREF(ret); ret=NULL; goto finish;} - Py_DECREF(tmp); - tmp = PyObject_CallFunction(op2, "OOO", ret, f, ret); - if (tmp == NULL) {Py_DECREF(ret); ret=NULL; goto finish;} - Py_DECREF(tmp); - - finish: - Py_DECREF(f); - Py_DECREF(out); - if (ret_int) { - Py_INCREF(a->descr); - tmp = PyArray_CastToType((PyArrayObject *)ret, - a->descr, PyArray_ISFORTRAN(a)); - Py_DECREF(ret); - return tmp; - } - return ret; - -} - - -/*MULTIARRAY_API - Flatten -*/ -static PyObject * -PyArray_Flatten(PyArrayObject *a, NPY_ORDER order) -{ - PyObject *ret; - intp size; - - if (order == PyArray_ANYORDER) - order = PyArray_ISFORTRAN(a); - - size = PyArray_SIZE(a); - Py_INCREF(a->descr); - ret = PyArray_NewFromDescr(a->ob_type, - a->descr, - 1, &size, - NULL, - NULL, - 0, (PyObject *)a); - - if (ret== NULL) return NULL; - if (_flat_copyinto(ret, (PyObject *)a, order) < 0) { - Py_DECREF(ret); - return NULL; - } - return ret; -} - - -/* For back-ward compatability * - - / * Not recommended */ - -/*MULTIARRAY_API - Reshape an array -*/ -static PyObject * -PyArray_Reshape(PyArrayObject *self, PyObject *shape) -{ - PyObject *ret; - PyArray_Dims newdims; - - if (!PyArray_IntpConverter(shape, &newdims)) return NULL; - ret = PyArray_Newshape(self, &newdims, PyArray_CORDER); - PyDimMem_FREE(newdims.ptr); - return ret; -} - -/* inserts 0 for strides where dimension will be 1 */ -static int -_check_ones(PyArrayObject *self, int newnd, intp* newdims, intp *strides) -{ - int nd; - intp *dims; - Bool done=FALSE; - int j, k; - - nd = self->nd; - dims = self->dimensions; - - for (k=0, j=0; !done && (jstrides[j]; - j++; k++; - } - else if ((kstrides). - * - * If some output dimensions have length 1, the strides assigned to - * them are arbitrary. In the current implementation, they are the - * stride of the next-fastest index. - */ -static int -_attempt_nocopy_reshape(PyArrayObject *self, int newnd, intp* newdims, - intp *newstrides, int fortran) -{ - int oldnd; - intp olddims[MAX_DIMS]; - intp oldstrides[MAX_DIMS]; - int oi, oj, ok, ni, nj, nk; - int np, op; - - oldnd = 0; - for (oi=0; oind; oi++) { - if (self->dimensions[oi]!=1) { - olddims[oldnd] = self->dimensions[oi]; - oldstrides[oldnd] = self->strides[oi]; - oldnd++; - } - } - - /* - fprintf(stderr, "_attempt_nocopy_reshape( ("); - for (oi=0; oi ("); - for (ni=0; nini;nk--) - newstrides[nk-1]=newstrides[nk]*newdims[nk]; - } - - ni = nj++; - oi = oj++; - - } - - /* - fprintf(stderr, "success: _attempt_nocopy_reshape ("); - for (oi=0; oi ("); - for (ni=0; niptr; - n = newshape->len; - s_known = 1; - i_unknown = -1; - - for(i=0; i= 0) { - if ((s_known == 0) || (s_original % s_known != 0)) { - PyErr_SetString(PyExc_ValueError, msg); - return -1; - } - dimensions[i_unknown] = s_original/s_known; - } else { - if (s_original != s_known) { - PyErr_SetString(PyExc_ValueError, msg); - return -1; - } - } - return 0; -} - -/* Returns a new array - with the new shape from the data - in the old array --- order-perspective depends on fortran argument. - copy-only-if-necessary -*/ - -/*MULTIARRAY_API - New shape for an array -*/ -static PyObject * -PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, - NPY_ORDER fortran) -{ - intp i; - intp *dimensions = newdims->ptr; - PyArrayObject *ret; - int n = newdims->len; - Bool same, incref=TRUE; - intp *strides = NULL; - intp newstrides[MAX_DIMS]; - int flags; - - if (fortran == PyArray_ANYORDER) - fortran = PyArray_ISFORTRAN(self); - - /* Quick check to make sure anything actually needs to be done */ - if (n == self->nd) { - same = TRUE; - i=0; - while(same && iflags; - - if (strides==NULL) { /* we are really re-shaping not just adding ones - to the shape somewhere */ - - /* fix any -1 dimensions and check new-dimensions against - old size */ - if (_fix_unknown_dimension(newdims, PyArray_SIZE(self)) < 0) - return NULL; - - /* sometimes we have to create a new copy of the array - in order to get the right orientation and - because we can't just re-use the buffer with the - data in the order it is in. - */ - if (!(PyArray_ISONESEGMENT(self)) || - (((PyArray_CHKFLAGS(self, NPY_CONTIGUOUS) && - fortran == NPY_FORTRANORDER) - || (PyArray_CHKFLAGS(self, NPY_FORTRAN) && - fortran == NPY_CORDER)) && (self->nd > 1))) { - - int success=0; - success = _attempt_nocopy_reshape(self,n,dimensions, - newstrides,fortran); - if (success) { - /* no need to copy the array after all */ - strides = newstrides; - flags = self->flags; - } else { - PyObject *new; - new = PyArray_NewCopy(self, fortran); - if (new == NULL) return NULL; - incref = FALSE; - self = (PyArrayObject *)new; - flags = self->flags; - } - } - - /* We always have to interpret the contiguous buffer correctly - */ - - /* Make sure the flags argument is set. - */ - if (n > 1) { - if (fortran == NPY_FORTRANORDER) { - flags &= ~NPY_CONTIGUOUS; - flags |= NPY_FORTRAN; - } - else { - flags &= ~NPY_FORTRAN; - flags |= NPY_CONTIGUOUS; - } - } - } - else if (n > 0) { - /* replace any 0-valued strides with - appropriate value to preserve contiguousness - */ - if (fortran == PyArray_FORTRANORDER) { - if (strides[0] == 0) - strides[0] = self->descr->elsize; - for (i=1; idescr->elsize; - for (i=n-2; i>-1; i--) { - if (strides[i] == 0) - strides[i] = strides[i+1] * \ - dimensions[i+1]; - } - } - } - - Py_INCREF(self->descr); - ret = (PyAO *)PyArray_NewFromDescr(self->ob_type, - self->descr, - n, dimensions, - strides, - self->data, - flags, (PyObject *)self); - - if (ret== NULL) goto fail; - - if (incref) Py_INCREF(self); - ret->base = (PyObject *)self; - PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - - return (PyObject *)ret; - - fail: - if (!incref) {Py_DECREF(self);} - return NULL; -} - - - -/* return a new view of the array object with all of its unit-length - dimensions squeezed out if needed, otherwise - return the same array. -*/ - -/*MULTIARRAY_API*/ -static PyObject * -PyArray_Squeeze(PyArrayObject *self) -{ - int nd = self->nd; - int newnd = nd; - intp dimensions[MAX_DIMS]; - intp strides[MAX_DIMS]; - int i,j; - PyObject *ret; - - if (nd == 0) { - Py_INCREF(self); - return (PyObject *)self; - } - for (j=0, i=0; idimensions[i] == 1) { - newnd -= 1; - } - else { - dimensions[j] = self->dimensions[i]; - strides[j++] = self->strides[i]; - } - } - - Py_INCREF(self->descr); - ret = PyArray_NewFromDescr(self->ob_type, - self->descr, - newnd, dimensions, - strides, self->data, - self->flags, - (PyObject *)self); - if (ret == NULL) return NULL; - PyArray_FLAGS(ret) &= ~OWNDATA; - PyArray_BASE(ret) = (PyObject *)self; - Py_INCREF(self); - return (PyObject *)ret; -} - - -/*MULTIARRAY_API - Mean -*/ -static PyObject * -PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) -{ - PyObject *obj1=NULL, *obj2=NULL; - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - - obj1 = PyArray_GenericReduceFunction((PyAO *)new, n_ops.add, axis, - rtype, out); - obj2 = PyFloat_FromDouble((double) PyArray_DIM(new,axis)); - Py_DECREF(new); - if (obj1 == NULL || obj2 == NULL) { - Py_XDECREF(obj1); - Py_XDECREF(obj2); - return NULL; - } - if (!out) { - ret = PyNumber_Divide(obj1, obj2); - } - else { - ret = PyObject_CallFunction(n_ops.divide, "OOO", out, obj2, out); - } - Py_DECREF(obj1); - Py_DECREF(obj2); - return ret; -} - -/* Set variance to 1 to by-pass square-root calculation and return variance */ -/*MULTIARRAY_API - Std -*/ -static PyObject * -PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, - int variance) -{ - PyObject *obj1=NULL, *obj2=NULL, *new=NULL; - PyObject *ret=NULL, *newshape=NULL; - int i, n; - intp val; - - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - - /* Compute and reshape mean */ - obj1 = PyArray_EnsureArray(PyArray_Mean((PyAO *)new, axis, rtype, NULL)); - if (obj1 == NULL) {Py_DECREF(new); return NULL;} - n = PyArray_NDIM(new); - newshape = PyTuple_New(n); - if (newshape == NULL) {Py_DECREF(obj1); Py_DECREF(new); return NULL;} - for (i=0; iob_type == ret->ob_type) return ret; - obj1 = PyArray_EnsureArray(ret); - if (obj1 == NULL) return NULL; - ret = PyArray_View((PyAO *)obj1, NULL, self->ob_type); - Py_DECREF(obj1); - if (out) { - if (PyArray_CopyAnyInto(out, (PyArrayObject *)ret) < 0) { - Py_DECREF(ret); - return NULL; - } - Py_DECREF(ret); - Py_INCREF(out); - return (PyObject *)out; - } - return ret; -} - - -/*MULTIARRAY_API - Sum -*/ -static PyObject * -PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - - ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.add, axis, - rtype, out); - Py_DECREF(new); - return ret; -} - -/*MULTIARRAY_API - Prod -*/ -static PyObject * -PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - - ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.multiply, axis, - rtype, out); - Py_DECREF(new); - return ret; -} - -/*MULTIARRAY_API - CumSum -*/ -static PyObject * -PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - - ret = PyArray_GenericAccumulateFunction((PyAO *)new, n_ops.add, axis, - rtype, out); - Py_DECREF(new); - return ret; -} - -/*MULTIARRAY_API - CumProd -*/ -static PyObject * -PyArray_CumProd(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - - ret = PyArray_GenericAccumulateFunction((PyAO *)new, - n_ops.multiply, axis, - rtype, out); - Py_DECREF(new); - return ret; -} - -/*MULTIARRAY_API - Any -*/ -static PyObject * -PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - - ret = PyArray_GenericReduceFunction((PyAO *)new, - n_ops.logical_or, axis, - PyArray_BOOL, out); - Py_DECREF(new); - return ret; -} - -/*MULTIARRAY_API - All -*/ -static PyObject * -PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - - ret = PyArray_GenericReduceFunction((PyAO *)new, - n_ops.logical_and, axis, - PyArray_BOOL, out); - Py_DECREF(new); - return ret; -} - - -/*MULTIARRAY_API - Compress -*/ -static PyObject * -PyArray_Compress(PyArrayObject *self, PyObject *condition, int axis, - PyArrayObject *out) -{ - PyArrayObject *cond; - PyObject *res, *ret; - - cond = (PyAO *)PyArray_FROM_O(condition); - if (cond == NULL) return NULL; - - if (cond->nd != 1) { - Py_DECREF(cond); - PyErr_SetString(PyExc_ValueError, - "condition must be 1-d array"); - return NULL; - } - - res = PyArray_Nonzero(cond); - Py_DECREF(cond); - if (res == NULL) return res; - ret = PyArray_TakeFrom(self, PyTuple_GET_ITEM(res, 0), axis, - out, NPY_RAISE); - Py_DECREF(res); - return ret; -} - -/*MULTIARRAY_API - Nonzero -*/ -static PyObject * -PyArray_Nonzero(PyArrayObject *self) -{ - int n=self->nd, j; - intp count=0, i, size; - PyArrayIterObject *it=NULL; - PyObject *ret=NULL, *item; - intp *dptr[MAX_DIMS]; - - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it==NULL) return NULL; - - size = it->size; - for (i=0; idescr->f->nonzero(it->dataptr, self)) count++; - PyArray_ITER_NEXT(it); - } - - PyArray_ITER_RESET(it); - ret = PyTuple_New(n); - if (ret == NULL) goto fail; - for (j=0; job_type, 1, &count, - PyArray_INTP, NULL, NULL, 0, 0, - (PyObject *)self); - if (item == NULL) goto fail; - PyTuple_SET_ITEM(ret, j, item); - dptr[j] = (intp *)PyArray_DATA(item); - } - if (n==1) { - for (i=0; idescr->f->nonzero(it->dataptr, self)) - *(dptr[0])++ = i; - PyArray_ITER_NEXT(it); - } - } - else { - /* reset contiguous so that coordinates gets updated */ - it->contiguous = 0; - for (i=0; idescr->f->nonzero(it->dataptr, self)) - for (j=0; jcoordinates[j]; - PyArray_ITER_NEXT(it); - } - } - - Py_DECREF(it); - return ret; - - fail: - Py_XDECREF(ret); - Py_XDECREF(it); - return NULL; - -} - -static PyObject * -_GenericBinaryOutFunction(PyArrayObject *m1, PyObject *m2, PyArrayObject *out, - PyObject *op) -{ - if (out == NULL) - return PyObject_CallFunction(op, "OO", m1, m2); - else - return PyObject_CallFunction(op, "OOO", m1, m2, out); -} - -static PyObject * -_slow_array_clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *out) -{ - PyObject *res1=NULL, *res2=NULL; - - if (max != NULL) { - res1 = _GenericBinaryOutFunction(self, max, out, n_ops.minimum); - if (res1 == NULL) return NULL; - } - else { - res1 = (PyObject *)self; - Py_INCREF(res1); - } - - if (min != NULL) { - res2 = _GenericBinaryOutFunction((PyArrayObject *)res1, - min, out, n_ops.maximum); - if (res2 == NULL) {Py_XDECREF(res1); return NULL;} - } - else { - res2 = res1; - Py_INCREF(res2); - } - Py_DECREF(res1); - return res2; -} - -/*MULTIARRAY_API - Clip -*/ -static PyObject * -PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *out) -{ - PyArray_FastClipFunc *func; - int outgood=0, ingood=0; - PyArrayObject *maxa=NULL; - PyArrayObject *mina=NULL; - PyArrayObject *newout=NULL, *newin=NULL; - PyArray_Descr *indescr, *newdescr; - char *max_data, *min_data; - PyObject *zero; - - if ((max == NULL) && (min == NULL)) { - PyErr_SetString(PyExc_ValueError, "array_clip: must set either max "\ - "or min"); - return NULL; - } - - func = self->descr->f->fastclip; - if (func == NULL || (min != NULL && !PyArray_CheckAnyScalar(min)) || - (max != NULL && !PyArray_CheckAnyScalar(max))) - return _slow_array_clip(self, min, max, out); - - /* Use the fast scalar clip function */ - - /* First we need to figure out the correct type */ - indescr = NULL; - if (min != NULL) { - indescr = PyArray_DescrFromObject(min, NULL); - if (indescr == NULL) return NULL; - } - if (max != NULL) { - newdescr = PyArray_DescrFromObject(max, indescr); - Py_XDECREF(indescr); - if (newdescr == NULL) return NULL; - } - else { - newdescr = indescr; /* Steal the reference */ - } - - - /* Use the scalar descriptor only if it is of a bigger - KIND than the input array (and then find the - type that matches both). - */ - if (PyArray_ScalarKind(newdescr->type_num, NULL) > - PyArray_ScalarKind(self->descr->type_num, NULL)) { - indescr = _array_small_type(newdescr, self->descr); - func = indescr->f->fastclip; - } - else { - indescr = self->descr; - Py_INCREF(indescr); - } - Py_DECREF(newdescr); - - if (!PyDataType_ISNOTSWAPPED(indescr)) { - PyArray_Descr *descr2; - descr2 = PyArray_DescrNewByteorder(indescr, '='); - Py_DECREF(indescr); - if (descr2 == NULL) goto fail; - indescr = descr2; - } - - /* Convert max to an array */ - if (max != NULL) { - maxa = (NPY_AO *)PyArray_FromAny(max, indescr, 0, 0, - NPY_DEFAULT, NULL); - if (maxa == NULL) return NULL; - } - else { - /* Side-effect of PyArray_FromAny */ - Py_DECREF(indescr); - } - - - /* If we are unsigned, then make sure min is not <0 */ - /* This is to match the behavior of - _slow_array_clip - - We allow min and max to go beyond the limits - for other data-types in which case they - are interpreted as their modular counterparts. - */ - if (min != NULL) { - if (PyArray_ISUNSIGNED(self)) { - int cmp; - zero = PyInt_FromLong(0); - cmp = PyObject_RichCompareBool(min, zero, Py_LT); - if (cmp == -1) { Py_DECREF(zero); goto fail;} - if (cmp == 1) { - min = zero; - } - else { - Py_DECREF(zero); - Py_INCREF(min); - } - } - else { - Py_INCREF(min); - } - - /* Convert min to an array */ - Py_INCREF(indescr); - mina = (NPY_AO *)PyArray_FromAny(min, indescr, 0, 0, - NPY_DEFAULT, NULL); - Py_DECREF(min); - if (mina == NULL) goto fail; - } - - - /* Check to see if input is single-segment, aligned, - and in native byteorder */ - if (PyArray_ISONESEGMENT(self) && PyArray_CHKFLAGS(self, ALIGNED) && - PyArray_ISNOTSWAPPED(self) && (self->descr == indescr)) - ingood = 1; - - if (!ingood) { - int flags; - if (PyArray_ISFORTRAN(self)) flags = NPY_FARRAY; - else flags = NPY_CARRAY; - Py_INCREF(indescr); - newin = (NPY_AO *)PyArray_FromArray(self, indescr, flags); - if (newin == NULL) goto fail; - } - else { - newin = self; - Py_INCREF(newin); - } - - /* At this point, newin is a single-segment, aligned, and correct - byte-order array of the correct type - - if ingood == 0, then it is a copy, otherwise, - it is the original input. - */ - - /* If we have already made a copy of the data, then use - that as the output array - */ - if (out == NULL && !ingood) { - out = newin; - } - - /* Now, we know newin is a usable array for fastclip, - we need to make sure the output array is available - and usable */ - if (out == NULL) { - Py_INCREF(indescr); - out = (NPY_AO*)PyArray_NewFromDescr(self->ob_type, - indescr, self->nd, - self->dimensions, - NULL, NULL, - PyArray_ISFORTRAN(self), - NULL); - if (out == NULL) goto fail; - outgood = 1; - } - else Py_INCREF(out); - /* Input is good at this point */ - if (out == newin) { - outgood = 1; - } - if (!outgood && PyArray_ISONESEGMENT(out) && - PyArray_CHKFLAGS(out, ALIGNED) && PyArray_ISNOTSWAPPED(out) && - PyArray_EquivTypes(out->descr, indescr)) { - outgood = 1; - } - - /* Do we still not have a suitable output array? */ - /* Create one, now */ - if (!outgood) { - int oflags; - if (PyArray_ISFORTRAN(out)) - oflags = NPY_FARRAY; - else - oflags = NPY_CARRAY; - oflags |= NPY_UPDATEIFCOPY | NPY_FORCECAST; - Py_INCREF(indescr); - newout = (NPY_AO*)PyArray_FromArray(out, indescr, oflags); - if (newout == NULL) goto fail; - } - else { - newout = out; - Py_INCREF(newout); - } - - /* make sure the shape of the output array is the same */ - if (!PyArray_SAMESHAPE(newin, newout)) { - PyErr_SetString(PyExc_ValueError, "clip: Output array must have the" - "same shape as the input."); - goto fail; - } - - if (newout->data != newin->data) { - memcpy(newout->data, newin->data, PyArray_NBYTES(newin)); - } - - /* Now we can call the fast-clip function */ - - min_data = max_data = NULL; - if (mina != NULL) - min_data = mina->data; - if (maxa != NULL) - max_data = maxa->data; - - func(newin->data, PyArray_SIZE(newin), min_data, max_data, - newout->data); - - /* Clean up temporary variables */ - Py_XDECREF(mina); - Py_XDECREF(maxa); - Py_DECREF(newin); - /* Copy back into out if out was not already a nice array. */ - Py_DECREF(newout); - return (PyObject *)out; - - fail: - Py_XDECREF(maxa); - Py_XDECREF(mina); - Py_XDECREF(newin); - PyArray_XDECREF_ERR(newout); - return NULL; -} - - -/*MULTIARRAY_API - Conjugate -*/ -static PyObject * -PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) -{ - if (PyArray_ISCOMPLEX(self)) { - if (out == NULL) { - return PyArray_GenericUnaryFunction(self, - n_ops.conjugate); - } - else { - return PyArray_GenericBinaryFunction(self, - (PyObject *)out, - n_ops.conjugate); - } - } - else { - PyArrayObject *ret; - if (out) { - if (PyArray_CopyAnyInto(out, self)< 0) - return NULL; - ret = out; - } - else ret = self; - Py_INCREF(ret); - return (PyObject *)ret; - } -} - -/*MULTIARRAY_API - Trace -*/ -static PyObject * -PyArray_Trace(PyArrayObject *self, int offset, int axis1, int axis2, - int rtype, PyArrayObject *out) -{ - PyObject *diag=NULL, *ret=NULL; - - diag = PyArray_Diagonal(self, offset, axis1, axis2); - if (diag == NULL) return NULL; - ret = PyArray_GenericReduceFunction((PyAO *)diag, n_ops.add, -1, rtype, out); - Py_DECREF(diag); - return ret; -} - -/*MULTIARRAY_API - Diagonal -*/ -static PyObject * -PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2) -{ - int n = self->nd; - PyObject *new; - PyArray_Dims newaxes; - intp dims[MAX_DIMS]; - int i, pos; - - newaxes.ptr = dims; - if (n < 2) { - PyErr_SetString(PyExc_ValueError, - "array.ndim must be >= 2"); - return NULL; - } - if (axis1 < 0) axis1 += n; - if (axis2 < 0) axis2 += n; - if ((axis1 == axis2) || (axis1 < 0) || (axis1 >= n) || \ - (axis2 < 0) || (axis2 >= n)) { - PyErr_Format(PyExc_ValueError, "axis1(=%d) and axis2(=%d) "\ - "must be different and within range (nd=%d)", - axis1, axis2, n); - return NULL; - } - - newaxes.len = n; - /* insert at the end */ - newaxes.ptr[n-2] = axis1; - newaxes.ptr[n-1] = axis2; - pos = 0; - for (i=0; idimensions[0]; - n2 = self->dimensions[1]; - step = n2+1; - if (offset < 0) { - start = -n2 * offset; - stop = MIN(n2, n1+offset)*(n2+1) - n2*offset; - } - else { - start = offset; - stop = MIN(n1, n2-offset)*(n2+1) + offset; - } - - /* count = ceil((stop-start)/step) */ - count = ((stop-start) / step) + (((stop-start) % step) != 0); - - indices = PyArray_New(&PyArray_Type, 1, &count, - PyArray_INTP, NULL, NULL, 0, 0, NULL); - if (indices == NULL) { - Py_DECREF(self); return NULL; - } - dptr = (intp *)PyArray_DATA(indices); - for (n1=start; n1descr; - - mydiagonal = PyList_New(0); - if (mydiagonal == NULL) {Py_DECREF(self); return NULL;} - n1 = self->dimensions[0]; - for (i=0; i 3)) { - PyErr_SetString(PyExc_ValueError, - "C arrays of only 1-3 dimensions available"); - Py_XDECREF(typedescr); - return -1; - } - if ((ap = (PyArrayObject*)PyArray_FromAny(*op, typedescr, nd, nd, - CARRAY, NULL)) == NULL) - return -1; - switch(nd) { - case 1: - *((char **)ptr) = ap->data; - break; - case 2: - n = ap->dimensions[0]; - ptr2 = (char **)_pya_malloc(n * sizeof(char *)); - if (!ptr2) goto fail; - for (i=0; idata + i*ap->strides[0]; - } - *((char ***)ptr) = ptr2; - break; - case 3: - n = ap->dimensions[0]; - m = ap->dimensions[1]; - ptr3 = (char ***)_pya_malloc(n*(m+1) * sizeof(char *)); - if (!ptr3) goto fail; - for (i=0; idata + i*ap->strides[0] + \ - j*ap->strides[1]; - } - } - *((char ****)ptr) = ptr3; - } - memcpy(dims, ap->dimensions, nd*sizeof(intp)); - *op = (PyObject *)ap; - return 0; - - fail: - PyErr_SetString(PyExc_MemoryError, "no memory"); - return -1; -} - -/* Deprecated --- Use PyArray_AsCArray instead */ - -/*MULTIARRAY_API - Convert to a 1D C-array -*/ -static int -PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) -{ - intp newd1; - PyArray_Descr *descr; - - descr = PyArray_DescrFromType(typecode); - if (PyArray_AsCArray(op, (void *)ptr, &newd1, 1, descr) == -1) - return -1; - *d1 = (int) newd1; - return 0; -} - -/*MULTIARRAY_API - Convert to a 2D C-array -*/ -static int -PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode) -{ - intp newdims[2]; - PyArray_Descr *descr; - - descr = PyArray_DescrFromType(typecode); - if (PyArray_AsCArray(op, (void *)ptr, newdims, 2, descr) == -1) - return -1; - - *d1 = (int ) newdims[0]; - *d2 = (int ) newdims[1]; - return 0; -} - -/* End Deprecated */ - -/*MULTIARRAY_API - Free pointers created if As2D is called -*/ -static int -PyArray_Free(PyObject *op, void *ptr) -{ - PyArrayObject *ap = (PyArrayObject *)op; - - if ((ap->nd < 1) || (ap->nd > 3)) - return -1; - if (ap->nd >= 2) { - _pya_free(ptr); - } - Py_DECREF(ap); - return 0; -} - - -static PyObject * -_swap_and_concat(PyObject *op, int axis, int n) -{ - PyObject *newtup=NULL; - PyObject *otmp, *arr; - int i; - - newtup = PyTuple_New(n); - if (newtup==NULL) return NULL; - for (i=0; i= MAX_DIMS) { - otmp = PyArray_Ravel(mps[i],0); - Py_DECREF(mps[i]); - mps[i] = (PyArrayObject *)otmp; - } - if (mps[i]->ob_type != subtype) { - prior2 = PyArray_GetPriority((PyObject *)(mps[i]), 0.0); - if (prior2 > prior1) { - prior1 = prior2; - subtype = mps[i]->ob_type; - } - } - } - - new_dim = 0; - for(i=0; ind; - else { - if (nd != mps[i]->nd) { - PyErr_SetString(PyExc_ValueError, - "arrays must have same "\ - "number of dimensions"); - goto fail; - } - if (!PyArray_CompareLists(mps[0]->dimensions+1, - mps[i]->dimensions+1, - nd-1)) { - PyErr_SetString(PyExc_ValueError, - "array dimensions must "\ - "agree except for d_0"); - goto fail; - } - } - if (nd == 0) { - PyErr_SetString(PyExc_ValueError, - "0-d arrays can't be concatenated"); - goto fail; - } - new_dim += mps[i]->dimensions[0]; - } - - tmp = mps[0]->dimensions[0]; - mps[0]->dimensions[0] = new_dim; - Py_INCREF(mps[0]->descr); - ret = (PyArrayObject *)PyArray_NewFromDescr(subtype, - mps[0]->descr, nd, - mps[0]->dimensions, - NULL, NULL, 0, - (PyObject *)ret); - mps[0]->dimensions[0] = tmp; - - if (ret == NULL) goto fail; - - data = ret->data; - for(i=0; idata, numbytes); - data += numbytes; - } - - PyArray_INCREF(ret); - for(i=0; ind; - if (n <= 1) { - Py_INCREF(ap); - return (PyObject *)ap; - } - - if (a1 < 0) a1 += n; - if (a2 < 0) a2 += n; - if ((a1 < 0) || (a1 >= n)) { - PyErr_SetString(PyExc_ValueError, - "bad axis1 argument to swapaxes"); - return NULL; - } - if ((a2 < 0) || (a2 >= n)) { - PyErr_SetString(PyExc_ValueError, - "bad axis2 argument to swapaxes"); - return NULL; - } - new_axes.ptr = dims; - new_axes.len = n; - - for (i=0; ind; - for (i=0; ilen; - axes = permute->ptr; - if (n != ap->nd) { - PyErr_SetString(PyExc_ValueError, - "axes don't match array"); - return NULL; - } - for (i=0; ind+axis; - if (axis < 0 || axis >= ap->nd) { - PyErr_SetString(PyExc_ValueError, - "invalid axis for this array"); - return NULL; - } - if (reverse_permutation[axis] != -1) { - PyErr_SetString(PyExc_ValueError, - "repeated axis in transpose"); - return NULL; - } - reverse_permutation[axis] = i; - permutation[i] = axis; - } - for (i=0; idata. */ - Py_INCREF(ap->descr); - ret = (PyArrayObject *)\ - PyArray_NewFromDescr(ap->ob_type, - ap->descr, - n, ap->dimensions, - NULL, ap->data, ap->flags, - (PyObject *)ap); - if (ret == NULL) return NULL; - - /* point at true owner of memory: */ - ret->base = (PyObject *)ap; - Py_INCREF(ap); - - /* fix the dimensions and strides of the return-array */ - for(i=0; idimensions[i] = ap->dimensions[permutation[i]]; - ret->strides[i] = ap->strides[permutation[i]]; - } - PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - - return (PyObject *)ret; -} - -/*MULTIARRAY_API - Repeat the array. -*/ -static PyObject * -PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) -{ - intp *counts; - intp n, n_outer, i, j, k, chunk, total; - intp tmp; - int nd; - PyArrayObject *repeats=NULL; - PyObject *ap=NULL; - PyArrayObject *ret=NULL; - char *new_data, *old_data; - - repeats = (PyAO *)PyArray_ContiguousFromAny(op, PyArray_INTP, 0, 1); - if (repeats == NULL) return NULL; - nd = repeats->nd; - counts = (intp *)repeats->data; - - if ((ap=_check_axis(aop, &axis, CARRAY))==NULL) { - Py_DECREF(repeats); - return NULL; - } - - aop = (PyAO *)ap; - - if (nd == 1) - n = repeats->dimensions[0]; - else /* nd == 0 */ - n = aop->dimensions[axis]; - - if (aop->dimensions[axis] != n) { - PyErr_SetString(PyExc_ValueError, - "a.shape[axis] != len(repeats)"); - goto fail; - } - - - if (nd == 0) - total = counts[0]*n; - else { - - total = 0; - for(j=0; jdimensions[axis] = total; - Py_INCREF(aop->descr); - ret = (PyArrayObject *)PyArray_NewFromDescr(aop->ob_type, - aop->descr, - aop->nd, - aop->dimensions, - NULL, NULL, 0, - (PyObject *)aop); - aop->dimensions[axis] = n; - - if (ret == NULL) goto fail; - - new_data = ret->data; - old_data = aop->data; - - chunk = aop->descr->elsize; - for(i=axis+1; ind; i++) { - chunk *= aop->dimensions[i]; - } - - n_outer = 1; - for(i=0; idimensions[i]; - - for(i=0; idescr->elsize; - byteorder = arr->descr->byteorder; - ptr = arr->data; - if (elsize > 1 && \ - (byteorder == PyArray_LITTLE || \ - (byteorder == PyArray_NATIVE && - PyArray_ISNBO(PyArray_LITTLE)))) - ptr += elsize-1; - - return ((*ptr & bitmask) != 0); -} - - -/*OBJECT_API*/ -static NPY_SCALARKIND -PyArray_ScalarKind(int typenum, PyArrayObject **arr) -{ - if (PyTypeNum_ISSIGNED(typenum)) { - if (arr && _signbit_set(*arr)) return PyArray_INTNEG_SCALAR; - else return PyArray_INTPOS_SCALAR; - } - if (PyTypeNum_ISFLOAT(typenum)) return PyArray_FLOAT_SCALAR; - if (PyTypeNum_ISUNSIGNED(typenum)) return PyArray_INTPOS_SCALAR; - if (PyTypeNum_ISCOMPLEX(typenum)) return PyArray_COMPLEX_SCALAR; - if (PyTypeNum_ISBOOL(typenum)) return PyArray_BOOL_SCALAR; - - if (PyTypeNum_ISUSERDEF(typenum)) { - NPY_SCALARKIND retval; - PyArray_Descr* descr; - descr = PyArray_DescrFromType(typenum); - if (descr->f->scalarkind) - retval = descr->f->scalarkind((arr ? *arr : NULL)); - else - retval = PyArray_NOSCALAR; - Py_DECREF(descr); - return retval; - } - return PyArray_OBJECT_SCALAR; -} - -/*OBJECT_API*/ -static int -PyArray_CanCoerceScalar(int thistype, int neededtype, - NPY_SCALARKIND scalar) -{ - PyArray_Descr* from; - int *castlist; - - if (scalar == PyArray_NOSCALAR) { - return PyArray_CanCastSafely(thistype, neededtype); - } - from = PyArray_DescrFromType(thistype); - if (from->f->cancastscalarkindto && - (castlist = from->f->cancastscalarkindto[scalar])) { - while (*castlist != PyArray_NOTYPE) - if (*castlist++ == neededtype) return 1; - } - switch(scalar) { - case PyArray_BOOL_SCALAR: - case PyArray_OBJECT_SCALAR: - return PyArray_CanCastSafely(thistype, neededtype); - default: - if (PyTypeNum_ISUSERDEF(neededtype)) return FALSE; - switch(scalar) { - case PyArray_INTPOS_SCALAR: - return (neededtype >= PyArray_BYTE); - case PyArray_INTNEG_SCALAR: - return (neededtype >= PyArray_BYTE) && \ - !(PyTypeNum_ISUNSIGNED(neededtype)); - case PyArray_FLOAT_SCALAR: - return (neededtype >= PyArray_FLOAT); - case PyArray_COMPLEX_SCALAR: - return (neededtype >= PyArray_CFLOAT); - default: - return 1; /* should never get here... */ - } - } -} - - -/*OBJECT_API*/ -static PyArrayObject ** -PyArray_ConvertToCommonType(PyObject *op, int *retn) -{ - int i, n, allscalars=0; - PyArrayObject **mps=NULL; - PyObject *otmp; - PyArray_Descr *intype=NULL, *stype=NULL; - PyArray_Descr *newtype=NULL; - NPY_SCALARKIND scalarkind=NPY_NOSCALAR, intypekind=NPY_NOSCALAR; - - *retn = n = PySequence_Length(op); - if (PyErr_Occurred()) {*retn = 0; return NULL;} - - mps = (PyArrayObject **)PyDataMem_NEW(n*sizeof(PyArrayObject *)); - if (mps == NULL) { - *retn = 0; - return (void*)PyErr_NoMemory(); - } - - if (PyArray_Check(op)) { - for (i=0; itype_num, - NULL); - } - else { - newtype = PyArray_DescrFromObject(otmp, stype); - Py_XDECREF(stype); - stype = newtype; - scalarkind = PyArray_ScalarKind(newtype->type_num, - NULL); - mps[i] = (PyArrayObject *)Py_None; - Py_INCREF(Py_None); - } - Py_XDECREF(otmp); - } - if (intype==NULL) { /* all scalars */ - allscalars = 1; - intype = stype; - Py_INCREF(intype); - for (i=0; itype_num, - intype->type_num, - scalarkind)) { - newtype = _array_small_type(intype, stype); - Py_XDECREF(intype); - intype = newtype; - } - for (i=0; ind < mps[i]->nd) { - PyErr_SetString(PyExc_ValueError, - "too many dimensions"); - goto fail; - } - if (!PyArray_CompareLists(ap->dimensions+(ap->nd-mps[i]->nd), - mps[i]->dimensions, mps[i]->nd)) { - PyErr_SetString(PyExc_ValueError, - "array dimensions must agree"); - goto fail; - } - sizes[i] = PyArray_NBYTES(mps[i]); - } - - if (!ret) { - Py_INCREF(mps[0]->descr); - ret = (PyArrayObject *)PyArray_NewFromDescr(ap->ob_type, - mps[0]->descr, - ap->nd, - ap->dimensions, - NULL, NULL, 0, - (PyObject *)ap); - } - else { - PyArrayObject *obj; - int flags = NPY_CARRAY | NPY_UPDATEIFCOPY | NPY_FORCECAST; - - if (PyArray_SIZE(ret) != PyArray_SIZE(ap)) { - PyErr_SetString(PyExc_TypeError, - "invalid shape for output array."); - ret = NULL; - goto fail; - } - if (clipmode == NPY_RAISE) { - /* we need to make sure and get a copy - so the input array is not changed - before the error is called - */ - flags |= NPY_ENSURECOPY; - } - Py_INCREF(mps[0]->descr); - obj = (PyArrayObject *)PyArray_FromArray(ret, mps[0]->descr, - flags); - if (obj != ret) copyret = 1; - ret = obj; - } - - if (ret == NULL) goto fail; - elsize = ret->descr->elsize; - m = PyArray_SIZE(ret); - self_data = (intp *)ap->data; - ret_data = ret->data; - - for (i=0; i= n) { - switch(clipmode) { - case NPY_RAISE: - PyErr_SetString(PyExc_ValueError, - "invalid entry in choice "\ - "array"); - goto fail; - case NPY_WRAP: - if (mi < 0) { - while(mi<0) mi += n; - } - else { - while(mi>=n) mi -= n; - } - break; - case NPY_CLIP: - if (mi < 0) mi=0; - else if (mi>=n) mi=n-1; - break; - } - } - offset = i*elsize; - if (offset >= sizes[mi]) {offset = offset % sizes[mi]; } - memmove(ret_data, mps[mi]->data+offset, elsize); - ret_data += elsize; self_data++; - } - - PyArray_INCREF(ret); - for(i=0; ibase; - Py_INCREF(obj); - Py_DECREF(ret); - ret = (PyArrayObject *)obj; - } - return (PyObject *)ret; - - fail: - for(i=0; idescr) - - sort = op->descr->f->sort[which]; - size = it->size; - N = op->dimensions[axis]; - elsize = op->descr->elsize; - astride = op->strides[axis]; - - needcopy = !(op->flags & ALIGNED) || (astride != (intp) elsize) \ - || swap; - - if (needcopy) { - char *buffer; - buffer = PyDataMem_NEW(N*elsize); - while (size--) { - _unaligned_strided_byte_copy(buffer, (intp) elsize, it->dataptr, - astride, N, elsize); - if (swap) _strided_byte_swap(buffer, (intp) elsize, N, elsize); - if (sort(buffer, N, op) < 0) { - PyDataMem_FREE(buffer); goto fail; - } - if (swap) _strided_byte_swap(buffer, (intp) elsize, N, elsize); - - _unaligned_strided_byte_copy(it->dataptr, astride, buffer, - (intp) elsize, N, elsize); - PyArray_ITER_NEXT(it); - } - PyDataMem_FREE(buffer); - } - else { - while (size--) { - if (sort(it->dataptr, N, op) < 0) goto fail; - PyArray_ITER_NEXT(it); - } - } - - NPY_END_THREADS_DESCR(op->descr) - - Py_DECREF(it); - return 0; - - fail: - END_THREADS - - Py_DECREF(it); - return 0; -} - -static PyObject* -_new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - - PyArrayIterObject *it=NULL; - PyArrayIterObject *rit=NULL; - PyObject *ret; - int needcopy=0, i; - intp N, size; - int elsize, swap; - intp astride, rstride, *iptr; - PyArray_ArgSortFunc *argsort; - BEGIN_THREADS_DEF - - ret = PyArray_New(op->ob_type, op->nd, - op->dimensions, PyArray_INTP, - NULL, NULL, 0, 0, (PyObject *)op); - if (ret == NULL) return NULL; - - it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)op, &axis); - rit = (PyArrayIterObject *)PyArray_IterAllButAxis(ret, &axis); - if (rit == NULL || it == NULL) goto fail; - - swap = !PyArray_ISNOTSWAPPED(op); - - NPY_BEGIN_THREADS_DESCR(op->descr) - - argsort = op->descr->f->argsort[which]; - size = it->size; - N = op->dimensions[axis]; - elsize = op->descr->elsize; - astride = op->strides[axis]; - rstride = PyArray_STRIDE(ret,axis); - - needcopy = swap || !(op->flags & ALIGNED) || (astride != (intp) elsize) || \ - (rstride != sizeof(intp)); - - if (needcopy) { - char *valbuffer, *indbuffer; - valbuffer = PyDataMem_NEW(N*elsize); - indbuffer = PyDataMem_NEW(N*sizeof(intp)); - while (size--) { - _unaligned_strided_byte_copy(valbuffer, (intp) elsize, it->dataptr, - astride, N, elsize); - if (swap) _strided_byte_swap(valbuffer, (intp) elsize, N, elsize); - iptr = (intp *)indbuffer; - for (i=0; idataptr, rstride, indbuffer, - sizeof(intp), N, sizeof(intp)); - PyArray_ITER_NEXT(it); - PyArray_ITER_NEXT(rit); - } - PyDataMem_FREE(valbuffer); - PyDataMem_FREE(indbuffer); - } - else { - while (size--) { - iptr = (intp *)rit->dataptr; - for (i=0; idataptr, (intp *)rit->dataptr, - N, op) < 0) goto fail; - PyArray_ITER_NEXT(it); - PyArray_ITER_NEXT(rit); - } - } - - NPY_END_THREADS_DESCR(op->descr) - - Py_DECREF(it); - Py_DECREF(rit); - return ret; - - fail: - - END_THREADS - - Py_DECREF(ret); - Py_XDECREF(it); - Py_XDECREF(rit); - return NULL; -} - - -/* Be sure to save this global_compare when necessary */ - -static PyArrayObject *global_obj; - -static int -qsortCompare (const void *a, const void *b) -{ - return global_obj->descr->f->compare(a,b,global_obj); -} - -/* Consumes reference to ap (op gets it) - op contains a version of the array with axes swapped if - local variable axis is not the last dimension. - orign must be defined locally. -*/ - -#define SWAPAXES(op, ap) { \ - orign = (ap)->nd-1; \ - if (axis != orign) { \ - (op) = (PyAO *)PyArray_SwapAxes((ap), axis, orign); \ - Py_DECREF((ap)); \ - if ((op) == NULL) return NULL; \ - } \ - else (op) = (ap); \ - } - -/* Consumes reference to ap (op gets it) - origin must be previously defined locally. - SWAPAXES must have been called previously. - op contains the swapped version of the array. -*/ -#define SWAPBACK(op, ap) { \ - if (axis != orign) { \ - (op) = (PyAO *)PyArray_SwapAxes((ap), axis, orign); \ - Py_DECREF((ap)); \ - if ((op) == NULL) return NULL; \ - } \ - else (op) = (ap); \ - } - -/* These swap axes in-place if necessary */ -#define SWAPINTP(a,b) {intp c; c=(a); (a) = (b); (b) = c;} -#define SWAPAXES2(ap) { \ - orign = (ap)->nd-1; \ - if (axis != orign) { \ - SWAPINTP(ap->dimensions[axis], ap->dimensions[orign]); \ - SWAPINTP(ap->strides[axis], ap->strides[orign]); \ - PyArray_UpdateFlags(ap, CONTIGUOUS | FORTRAN); \ - } \ - } - -#define SWAPBACK2(ap) { \ - if (axis != orign) { \ - SWAPINTP(ap->dimensions[axis], ap->dimensions[orign]); \ - SWAPINTP(ap->strides[axis], ap->strides[orign]); \ - PyArray_UpdateFlags(ap, CONTIGUOUS | FORTRAN); \ - } \ - } - -/*MULTIARRAY_API - Sort an array in-place -*/ -static int -PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArrayObject *ap=NULL, *store_arr=NULL; - char *ip; - int i, n, m, elsize, orign; - - n = op->nd; - if ((n==0) || (PyArray_SIZE(op)==1)) return 0; - - if (axis < 0) axis += n; - if ((axis < 0) || (axis >= n)) { - PyErr_Format(PyExc_ValueError, - "axis(=%d) out of bounds", axis); - return -1; - } - if (!PyArray_ISWRITEABLE(op)) { - PyErr_SetString(PyExc_RuntimeError, - "attempted sort on unwriteable array."); - return -1; - } - - /* Determine if we should use type-specific algorithm or not */ - if (op->descr->f->sort[which] != NULL) { - return _new_sort(op, axis, which); - } - - if ((which != PyArray_QUICKSORT) || \ - op->descr->f->compare == NULL) { - PyErr_SetString(PyExc_TypeError, - "desired sort not supported for this type"); - return -1; - } - - SWAPAXES2(op); - - ap = (PyArrayObject *)PyArray_FromAny((PyObject *)op, - NULL, 1, 0, - DEFAULT | UPDATEIFCOPY, NULL); - if (ap == NULL) goto fail; - - elsize = ap->descr->elsize; - m = ap->dimensions[ap->nd-1]; - if (m == 0) goto finish; - - n = PyArray_SIZE(ap)/m; - - /* Store global -- allows re-entry -- restore before leaving*/ - store_arr = global_obj; - global_obj = ap; - - for (ip=ap->data, i=0; idescr->elsize; - const intp *ipa = ip1; - const intp *ipb = ip2; - return global_obj->descr->f->compare(global_data + (isize * *ipa), - global_data + (isize * *ipb), - global_obj); -} - -/*MULTIARRAY_API - ArgSort an array -*/ -static PyObject * -PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArrayObject *ap=NULL, *ret=NULL, *store, *op2; - intp *ip; - intp i, j, n, m, orign; - int argsort_elsize; - char *store_ptr; - - n = op->nd; - if ((n==0) || (PyArray_SIZE(op)==1)) { - ret = (PyArrayObject *)PyArray_New(op->ob_type, op->nd, - op->dimensions, - PyArray_INTP, - NULL, NULL, 0, 0, - (PyObject *)op); - if (ret == NULL) return NULL; - *((intp *)ret->data) = 0; - return (PyObject *)ret; - } - - /* Creates new reference op2 */ - if ((op2=(PyAO *)_check_axis(op, &axis, 0))==NULL) return NULL; - - /* Determine if we should use new algorithm or not */ - if (op2->descr->f->argsort[which] != NULL) { - ret = (PyArrayObject *)_new_argsort(op2, axis, which); - Py_DECREF(op2); - return (PyObject *)ret; - } - - if ((which != PyArray_QUICKSORT) || op2->descr->f->compare == NULL) { - PyErr_SetString(PyExc_TypeError, - "requested sort not available for type"); - Py_DECREF(op2); - op = NULL; - goto fail; - } - - /* ap will contain the reference to op2 */ - SWAPAXES(ap, op2); - - op = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)ap, - PyArray_NOTYPE, - 1, 0); - - Py_DECREF(ap); - if (op == NULL) return NULL; - - ret = (PyArrayObject *)PyArray_New(op->ob_type, op->nd, - op->dimensions, PyArray_INTP, - NULL, NULL, 0, 0, (PyObject *)op); - if (ret == NULL) goto fail; - - - ip = (intp *)ret->data; - argsort_elsize = op->descr->elsize; - m = op->dimensions[op->nd-1]; - if (m == 0) goto finish; - - n = PyArray_SIZE(op)/m; - store_ptr = global_data; - global_data = op->data; - store = global_obj; - global_obj = op; - for (i=0; i 0 in lexsort"); - return NULL; - } - mps = (PyArrayObject **) _pya_malloc(n*sizeof(PyArrayObject)); - if (mps==NULL) return PyErr_NoMemory(); - its = (PyArrayIterObject **) _pya_malloc(n*sizeof(PyArrayIterObject)); - if (its == NULL) {_pya_free(mps); return PyErr_NoMemory();} - for (i=0; i0) { - if ((mps[i]->nd != mps[0]->nd) || \ - (!PyArray_CompareLists(mps[i]->dimensions, - mps[0]->dimensions, - mps[0]->nd))) { - PyErr_SetString(PyExc_ValueError, - "all keys need to be the same shape"); - goto fail; - } - } - if (!mps[i]->descr->f->argsort[PyArray_MERGESORT]) { - PyErr_Format(PyExc_TypeError, - "merge sort not available for item %d", i); - goto fail; - } - if (!object && - PyDataType_FLAGCHK(mps[i]->descr, NPY_NEEDS_PYAPI)) - object = 1; - its[i] = (PyArrayIterObject *)PyArray_IterAllButAxis \ - ((PyObject *)mps[i], &axis); - if (its[i]==NULL) goto fail; - } - - /* Now we can check the axis */ - nd = mps[0]->nd; - if ((nd==0) || (PyArray_SIZE(mps[0])==1)) { - ret = (PyArrayObject *)PyArray_New(&PyArray_Type, mps[0]->nd, - mps[0]->dimensions, - PyArray_INTP, - NULL, NULL, 0, 0, NULL); - - if (ret == NULL) goto fail; - *((intp *)(ret->data)) = 0; - goto finish; - } - if (axis < 0) axis += nd; - if ((axis < 0) || (axis >= nd)) { - PyErr_Format(PyExc_ValueError, - "axis(=%d) out of bounds", axis); - goto fail; - } - - /* Now do the sorting */ - - ret = (PyArrayObject *)PyArray_New(&PyArray_Type, mps[0]->nd, - mps[0]->dimensions, PyArray_INTP, - NULL, NULL, 0, 0, NULL); - if (ret == NULL) goto fail; - - rit = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ret, &axis); - if (rit == NULL) goto fail; - - if (!object) {NPY_BEGIN_THREADS} - - size = rit->size; - N = mps[0]->dimensions[axis]; - rstride = PyArray_STRIDE(ret,axis); - - maxelsize = mps[0]->descr->elsize; - needcopy = (rstride != sizeof(intp)); - for (j=0; jflags & ALIGNED) || \ - (mps[j]->strides[axis] != (intp)mps[j]->descr->elsize); - if (mps[j]->descr->elsize > maxelsize) - maxelsize = mps[j]->descr->elsize; - } - - if (needcopy) { - char *valbuffer, *indbuffer; - int *swaps; - valbuffer = PyDataMem_NEW(N*maxelsize); - indbuffer = PyDataMem_NEW(N*sizeof(intp)); - swaps = malloc(n*sizeof(int)); - for (j=0; jdescr->elsize; - astride = mps[j]->strides[axis]; - argsort = mps[j]->descr->f->argsort[PyArray_MERGESORT]; - _unaligned_strided_byte_copy(valbuffer, (intp) elsize, - its[j]->dataptr, astride, N, elsize); - if (swaps[j]) - _strided_byte_swap(valbuffer, (intp) elsize, N, elsize); - if (argsort(valbuffer, (intp *)indbuffer, N, mps[j]) < 0) { - PyDataMem_FREE(valbuffer); - PyDataMem_FREE(indbuffer); - free(swaps); - goto fail; - } - PyArray_ITER_NEXT(its[j]); - } - _unaligned_strided_byte_copy(rit->dataptr, rstride, indbuffer, - sizeof(intp), N, sizeof(intp)); - PyArray_ITER_NEXT(rit); - } - PyDataMem_FREE(valbuffer); - PyDataMem_FREE(indbuffer); - free(swaps); - } - else { - while (size--) { - iptr = (intp *)rit->dataptr; - for (i=0; idescr->f->argsort[PyArray_MERGESORT]; - if (argsort(its[j]->dataptr, (intp *)rit->dataptr, - N, mps[j]) < 0) goto fail; - PyArray_ITER_NEXT(its[j]); - } - PyArray_ITER_NEXT(rit); - } - } - - if (!object) {NPY_END_THREADS} - - finish: - for (i=0; i= keys. - * - * For each key use bisection to find the first index i s.t. key <= arr[i]. - * When there is no such index i, set i = len(arr). Return the results in ret. - * All arrays are assumed contiguous on entry and both arr and key must be of - * the same comparable type. - * - * @param arr contiguous sorted array to be searched. - * @param key contiguous array of keys. - * @param ret contiguous array of intp for returned indices. - * @return void - */ -static void -local_search_left(PyArrayObject *arr, PyArrayObject *key, PyArrayObject *ret) -{ - PyArray_CompareFunc *compare = key->descr->f->compare; - intp nelts = arr->dimensions[arr->nd - 1]; - intp nkeys = PyArray_SIZE(key); - char *parr = arr->data; - char *pkey = key->data; - intp *pret = (intp *)ret->data; - int elsize = arr->descr->elsize; - intp i; - - for(i = 0; i < nkeys; ++i) { - intp imin = 0; - intp imax = nelts; - while (imin < imax) { - intp imid = imin + ((imax - imin) >> 2); - if (compare(parr + elsize*imid, pkey, key) < 0) - imin = imid + 1; - else - imax = imid; - } - *pret = imin; - pret += 1; - pkey += elsize; - } -} - - -/** @brief Use bisection of sorted array to find first entries > keys. - * - * For each key use bisection to find the first index i s.t. key < arr[i]. - * When there is no such index i, set i = len(arr). Return the results in ret. - * All arrays are assumed contiguous on entry and both arr and key must be of - * the same comparable type. - * - * @param arr contiguous sorted array to be searched. - * @param key contiguous array of keys. - * @param ret contiguous array of intp for returned indices. - * @return void - */ -static void -local_search_right(PyArrayObject *arr, PyArrayObject *key, PyArrayObject *ret) -{ - PyArray_CompareFunc *compare = key->descr->f->compare; - intp nelts = arr->dimensions[arr->nd - 1]; - intp nkeys = PyArray_SIZE(key); - char *parr = arr->data; - char *pkey = key->data; - intp *pret = (intp *)ret->data; - int elsize = arr->descr->elsize; - intp i; - - for(i = 0; i < nkeys; ++i) { - intp imin = 0; - intp imax = nelts; - while (imin < imax) { - intp imid = imin + ((imax - imin) >> 2); - if (compare(parr + elsize*imid, pkey, key) <= 0) - imin = imid + 1; - else - imax = imid; - } - *pret = imin; - pret += 1; - pkey += elsize; - } -} - - -/*MULTIARRAY_API - Convert object to searchsorted side -*/ -static int -PyArray_SearchsideConverter(PyObject *obj, void *addr) -{ - NPY_SEARCHSIDE *side = (NPY_SEARCHSIDE *)addr; - char *str = PyString_AsString(obj); - - if (!str || strlen(str) < 1) { - PyErr_SetString(PyExc_ValueError, - "expected nonempty string for keyword 'side'"); - return PY_FAIL; - } - - if (str[0] == 'l' || str[0] == 'L') - *side = NPY_SEARCHLEFT; - else if (str[0] == 'r' || str[0] == 'R') - *side = NPY_SEARCHRIGHT; - else { - PyErr_Format(PyExc_ValueError, - "'%s' is an invalid value for keyword 'side'", str); - return PY_FAIL; - } - return PY_SUCCEED; -} - - -/*MULTIARRAY_API - Numeric.searchsorted(a,v) -*/ -static PyObject * -PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE side) -{ - PyArrayObject *ap1=NULL; - PyArrayObject *ap2=NULL; - PyArrayObject *ret=NULL; - int typenum = 0; - - NPY_BEGIN_THREADS_DEF - - typenum = PyArray_ObjectType((PyObject *)op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - /* need ap1 as contiguous array and of right type */ - ap1 = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)op1, - typenum, - 1, 1); - if (ap1 == NULL) - return NULL; - - /* need ap2 as contiguous array and of right type */ - ap2 = (PyArrayObject *)PyArray_ContiguousFromAny(op2, typenum, - 0, 0); - if (ap2 == NULL) - goto fail; - - /* ret is a contiguous array of intp type to hold returned indices */ - ret = (PyArrayObject *)PyArray_New(ap2->ob_type, ap2->nd, - ap2->dimensions, PyArray_INTP, - NULL, NULL, 0, 0, (PyObject *)ap2); - if (ret == NULL) - goto fail; - - /* check that comparison function exists */ - if (ap2->descr->f->compare == NULL) { - PyErr_SetString(PyExc_TypeError, - "compare not supported for type"); - goto fail; - } - - if (side == NPY_SEARCHLEFT) { - NPY_BEGIN_THREADS_DESCR(ap2->descr) - local_search_left(ap1, ap2, ret); - NPY_END_THREADS_DESCR(ap2->descr) - } - else if (side == NPY_SEARCHRIGHT) { - NPY_BEGIN_THREADS_DESCR(ap2->descr) - local_search_right(ap1, ap2, ret); - NPY_END_THREADS_DESCR(ap2->descr) - } - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - -/* - Make a new empty array, of the passed size, of a type that takes the - priority of ap1 and ap2 into account. -*/ -static PyArrayObject * -new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, - int nd, intp dimensions[], int typenum) -{ - PyArrayObject *ret; - PyTypeObject *subtype; - double prior1, prior2; - /* Need to choose an output array that can hold a sum - -- use priority to determine which subtype. - */ - if (ap2->ob_type != ap1->ob_type) { - prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); - prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - - subtype = (prior2 > prior1 ? ap2->ob_type : ap1->ob_type); - } else { - prior1 = prior2 = 0.0; - subtype = ap1->ob_type; - } - - ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *) - (prior2 > prior1 ? ap2 : ap1)); - return ret; -} - -/* Could perhaps be redone to not make contiguous arrays - */ - -/*MULTIARRAY_API - Numeric.innerproduct(a,v) -*/ -static PyObject * -PyArray_InnerProduct(PyObject *op1, PyObject *op2) -{ - PyArrayObject *ap1, *ap2, *ret=NULL; - PyArrayIterObject *it1, *it2; - intp i, j, l; - int typenum, nd, axis; - intp is1, is2, os; - char *op; - intp dimensions[MAX_DIMS]; - PyArray_DotFunc *dot; - PyArray_Descr *typec; - - NPY_BEGIN_THREADS_DEF - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - typec = PyArray_DescrFromType(typenum); - Py_INCREF(typec); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, - BEHAVED, NULL); - if (ap1 == NULL) {Py_DECREF(typec); return NULL;} - ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, - BEHAVED, NULL); - if (ap2 == NULL) goto fail; - - if (ap1->nd == 0 || ap2->nd == 0) { - ret = (ap1->nd == 0 ? ap1 : ap2); - ret = (PyArrayObject *)ret->ob_type->tp_as_number->\ - nb_multiply((PyObject *)ap1, (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - } - - l = ap1->dimensions[ap1->nd-1]; - - if (ap2->dimensions[ap2->nd-1] != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); - goto fail; - } - - nd = ap1->nd+ap2->nd-2; - j = 0; - for(i=0; ind-1; i++) { - dimensions[j++] = ap1->dimensions[i]; - } - for(i=0; ind-1; i++) { - dimensions[j++] = ap2->dimensions[i]; - } - - - /* Need to choose an output array that can hold a sum - -- use priority to determine which subtype. - */ - ret = new_array_for_sum(ap1, ap2, nd, dimensions, typenum); - if (ret == NULL) goto fail; - - dot = (ret->descr->f->dotfunc); - - if (dot == NULL) { - PyErr_SetString(PyExc_ValueError, - "dot not available for this type"); - goto fail; - } - - is1 = ap1->strides[ap1->nd-1]; - is2 = ap2->strides[ap2->nd-1]; - op = ret->data; os = ret->descr->elsize; - - axis = ap1->nd-1; - it1 = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ap1, &axis); - axis = ap2->nd-1; - it2 = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ap2, &axis); - - NPY_BEGIN_THREADS_DESCR(ap2->descr) - while(1) { - while(it2->index < it2->size) { - dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); - op += os; - PyArray_ITER_NEXT(it2); - } - PyArray_ITER_NEXT(it1); - if (it1->index >= it1->size) break; - PyArray_ITER_RESET(it2); - } - NPY_END_THREADS_DESCR(ap2->descr) - Py_DECREF(it1); - Py_DECREF(it2); - - if (PyErr_Occurred()) goto fail; - - - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - - -/* just like inner product but does the swapaxes stuff on the fly */ -/*MULTIARRAY_API - Numeric.matrixproduct(a,v) -*/ -static PyObject * -PyArray_MatrixProduct(PyObject *op1, PyObject *op2) -{ - PyArrayObject *ap1, *ap2, *ret=NULL; - PyArrayIterObject *it1, *it2; - intp i, j, l; - int typenum, nd, axis, matchDim; - intp is1, is2, os; - char *op; - intp dimensions[MAX_DIMS]; - PyArray_DotFunc *dot; - PyArray_Descr *typec; - - NPY_BEGIN_THREADS_DEF - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - typec = PyArray_DescrFromType(typenum); - Py_INCREF(typec); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, - BEHAVED, NULL); - if (ap1 == NULL) {Py_DECREF(typec); return NULL;} - ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, - BEHAVED, NULL); - if (ap2 == NULL) goto fail; - - if (ap1->nd == 0 || ap2->nd == 0) { - ret = (ap1->nd == 0 ? ap1 : ap2); - ret = (PyArrayObject *)ret->ob_type->tp_as_number->\ - nb_multiply((PyObject *)ap1, (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - } - - l = ap1->dimensions[ap1->nd-1]; - if (ap2->nd > 1) { - matchDim = ap2->nd - 2; - } - else { - matchDim = 0; - } - - if (ap2->dimensions[matchDim] != l) { - PyErr_SetString(PyExc_ValueError, "objects are not aligned"); - goto fail; - } - - nd = ap1->nd+ap2->nd-2; - if (nd > NPY_MAXDIMS) { - PyErr_SetString(PyExc_ValueError, - "dot: too many dimensions in result"); - goto fail; - } - j = 0; - for(i=0; ind-1; i++) { - dimensions[j++] = ap1->dimensions[i]; - } - for(i=0; ind-2; i++) { - dimensions[j++] = ap2->dimensions[i]; - } - if(ap2->nd > 1) { - dimensions[j++] = ap2->dimensions[ap2->nd-1]; - } - /* - fprintf(stderr, "nd=%d dimensions=", nd); - for(i=0; istrides[ap1->nd-1]; is2 = ap2->strides[matchDim]; - - /* Choose which subtype to return */ - ret = new_array_for_sum(ap1, ap2, nd, dimensions, typenum); - if (ret == NULL) goto fail; - - /* Ensure that multiarray.dot(,<0xM>) -> zeros((N,M)) */ - if (PyArray_SIZE(ap1) == 0 && PyArray_SIZE(ap2) == 0) { - memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret)); - } - else { /* Ensure that multiarray.dot([],[]) -> 0 */ - memset(PyArray_DATA(ret), 0, PyArray_ITEMSIZE(ret)); - } - - - dot = ret->descr->f->dotfunc; - if (dot == NULL) { - PyErr_SetString(PyExc_ValueError, - "dot not available for this type"); - goto fail; - } - - op = ret->data; os = ret->descr->elsize; - - axis = ap1->nd-1; - it1 = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ap1, &axis); - it2 = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ap2, &matchDim); - - NPY_BEGIN_THREADS_DESCR(ap2->descr) - while(1) { - while(it2->index < it2->size) { - dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); - op += os; - PyArray_ITER_NEXT(it2); - } - PyArray_ITER_NEXT(it1); - if (it1->index >= it1->size) break; - PyArray_ITER_RESET(it2); - } - NPY_END_THREADS_DESCR(ap2->descr) - Py_DECREF(it1); - Py_DECREF(it2); - if (PyErr_Occurred()) goto fail; /* only for OBJECT arrays */ - - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - -/*MULTIARRAY_API - Fast Copy and Transpose -*/ -static PyObject * -PyArray_CopyAndTranspose(PyObject *op) -{ - PyObject *ret, *arr; - int nd; - intp dims[2]; - intp i,j; - int elsize, str2; - char *iptr; - char *optr; - - /* make sure it is well-behaved */ - arr = PyArray_FromAny(op, NULL, 0, 0, CARRAY, NULL); - nd = PyArray_NDIM(arr); - if (nd == 1) { /* we will give in to old behavior */ - ret = PyArray_Copy((PyArrayObject *)arr); - Py_DECREF(arr); - return ret; - } - else if (nd != 2) { - Py_DECREF(arr); - PyErr_SetString(PyExc_ValueError, - "only 2-d arrays are allowed"); - return NULL; - } - - /* Now construct output array */ - dims[0] = PyArray_DIM(arr,1); - dims[1] = PyArray_DIM(arr,0); - elsize = PyArray_ITEMSIZE(arr); - - Py_INCREF(PyArray_DESCR(arr)); - ret = PyArray_NewFromDescr(arr->ob_type, - PyArray_DESCR(arr), - 2, dims, - NULL, NULL, 0, arr); - - if (ret == NULL) { - Py_DECREF(arr); - return NULL; - } - /* do 2-d loop */ - NPY_BEGIN_ALLOW_THREADS - optr = PyArray_DATA(ret); - str2 = elsize*dims[0]; - for (i=0; idimensions[0]; - n2 = ap2->dimensions[0]; - - if (n1 < n2) { - ret = ap1; ap1 = ap2; ap2 = ret; - ret = NULL; i = n1;n1=n2;n2=i; - } - length = n1; - n = n2; - switch(mode) { - case 0: - length = length-n+1; - n_left = n_right = 0; - break; - case 1: - n_left = (intp)(n/2); - n_right = n-n_left-1; - break; - case 2: - n_right = n-1; - n_left = n-1; - length = length+n-1; - break; - default: - PyErr_SetString(PyExc_ValueError, - "mode must be 0, 1, or 2"); - goto fail; - } - - /* Need to choose an output array that can hold a sum - -- use priority to determine which subtype. - */ - ret = new_array_for_sum(ap1, ap2, 1, &length, typenum); - if (ret == NULL) goto fail; - - dot = ret->descr->f->dotfunc; - if (dot == NULL) { - PyErr_SetString(PyExc_ValueError, - "function not available for this data type"); - goto fail; - } - - NPY_BEGIN_THREADS_DESCR(ret->descr) - - is1 = ap1->strides[0]; is2 = ap2->strides[0]; - op = ret->data; os = ret->descr->elsize; - - ip1 = ap1->data; ip2 = ap2->data+n_left*is2; - n = n-n_left; - for(i=0; idescr) - - if (PyErr_Occurred()) goto fail; - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - - -/*MULTIARRAY_API - ArgMin -*/ -static PyObject * -PyArray_ArgMin(PyArrayObject *ap, int axis, PyArrayObject *out) -{ - PyObject *obj, *new, *ret; - - if (PyArray_ISFLEXIBLE(ap)) { - PyErr_SetString(PyExc_TypeError, - "argmax is unsupported for this type"); - return NULL; - } - else if (PyArray_ISUNSIGNED(ap)) - obj = PyInt_FromLong((long) -1); - - else if (PyArray_TYPE(ap)==PyArray_BOOL) - obj = PyInt_FromLong((long) 1); - - else - obj = PyInt_FromLong((long) 0); - - new = PyArray_EnsureAnyArray(PyNumber_Subtract(obj, (PyObject *)ap)); - Py_DECREF(obj); - if (new == NULL) return NULL; - ret = PyArray_ArgMax((PyArrayObject *)new, axis, out); - Py_DECREF(new); - return ret; -} - -/*MULTIARRAY_API - Max -*/ -static PyObject * -PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out) -{ - PyArrayObject *arr; - PyObject *ret; - - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL) - return NULL; - ret = PyArray_GenericReduceFunction(arr, n_ops.maximum, axis, - arr->descr->type_num, out); - Py_DECREF(arr); - return ret; -} - -/*MULTIARRAY_API - Min -*/ -static PyObject * -PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out) -{ - PyArrayObject *arr; - PyObject *ret; - - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL) - return NULL; - ret = PyArray_GenericReduceFunction(arr, n_ops.minimum, axis, - arr->descr->type_num, out); - Py_DECREF(arr); - return ret; -} - -/*MULTIARRAY_API - Ptp -*/ -static PyObject * -PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) -{ - PyArrayObject *arr; - PyObject *ret; - PyObject *obj1=NULL, *obj2=NULL; - - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL) - return NULL; - obj1 = PyArray_Max(arr, axis, out); - if (obj1 == NULL) goto fail; - obj2 = PyArray_Min(arr, axis, NULL); - if (obj2 == NULL) goto fail; - Py_DECREF(arr); - if (out) { - ret = PyObject_CallFunction(n_ops.subtract, "OOO", out, obj2, out); - } - else { - ret = PyNumber_Subtract(obj1, obj2); - } - Py_DECREF(obj1); - Py_DECREF(obj2); - return ret; - - fail: - Py_XDECREF(arr); - Py_XDECREF(obj1); - Py_XDECREF(obj2); - return NULL; -} - - -/*MULTIARRAY_API - ArgMax -*/ -static PyObject * -PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) -{ - PyArrayObject *ap=NULL, *rp=NULL; - PyArray_ArgFunc* arg_func; - char *ip; - intp *rptr; - intp i, n, m; - int elsize; - int copyret=0; - - NPY_BEGIN_THREADS_DEF - - if ((ap=(PyAO *)_check_axis(op, &axis, 0))==NULL) return NULL; - - /* We need to permute the array so that axis is placed at the end. - And all other dimensions are shifted left. - */ - if (axis != ap->nd-1) { - PyArray_Dims newaxes; - intp dims[MAX_DIMS]; - int i; - newaxes.ptr = dims; - newaxes.len = ap->nd; - for (i=0; ind-1; i++) dims[i] = i+1; - dims[ap->nd-1] = axis; - op = (PyAO *)PyArray_Transpose(ap, &newaxes); - Py_DECREF(ap); - if (op == NULL) return NULL; - } - else { - op = ap; - } - - /* Will get native-byte order contiguous copy. - */ - ap = (PyArrayObject *)\ - PyArray_ContiguousFromAny((PyObject *)op, - op->descr->type_num, 1, 0); - - Py_DECREF(op); - if (ap == NULL) return NULL; - - arg_func = ap->descr->f->argmax; - if (arg_func == NULL) { - PyErr_SetString(PyExc_TypeError, "data type not ordered"); - goto fail; - } - - elsize = ap->descr->elsize; - m = ap->dimensions[ap->nd-1]; - if (m == 0) { - PyErr_SetString(MultiArrayError, - "attempt to get argmax/argmin "\ - "of an empty sequence"); - goto fail; - } - - if (!out) { - rp = (PyArrayObject *)PyArray_New(ap->ob_type, ap->nd-1, - ap->dimensions, PyArray_INTP, - NULL, NULL, 0, 0, - (PyObject *)ap); - if (rp == NULL) goto fail; - } - else { - if (PyArray_SIZE(out) != \ - PyArray_MultiplyList(ap->dimensions, ap->nd-1)) { - PyErr_SetString(PyExc_TypeError, - "invalid shape for output array."); - } - rp = (PyArrayObject *)\ - PyArray_FromArray(out, - PyArray_DescrFromType(PyArray_INTP), - NPY_CARRAY | NPY_UPDATEIFCOPY); - if (rp == NULL) goto fail; - if (rp != out) copyret = 1; - } - - NPY_BEGIN_THREADS_DESCR(ap->descr) - n = PyArray_SIZE(ap)/m; - rptr = (intp *)rp->data; - for (ip = ap->data, i=0; idescr) - - Py_DECREF(ap); - if (copyret) { - PyArrayObject *obj; - obj = (PyArrayObject *)rp->base; - Py_INCREF(obj); - Py_DECREF(rp); - rp = obj; - } - return (PyObject *)rp; - - fail: - Py_DECREF(ap); - Py_XDECREF(rp); - return NULL; -} - - -/*MULTIARRAY_API - Take -*/ -static PyObject * -PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, - PyArrayObject *ret, NPY_CLIPMODE clipmode) -{ - PyArrayObject *self, *indices; - intp nd, i, j, n, m, max_item, tmp, chunk; - intp shape[MAX_DIMS]; - char *src, *dest; - int copyret=0; - - indices = NULL; - self = (PyAO *)_check_axis(self0, &axis, CARRAY); - if (self == NULL) return NULL; - - indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, - PyArray_INTP, - 1, 0); - if (indices == NULL) goto fail; - - n = m = chunk = 1; - nd = self->nd + indices->nd - 1; - for (i=0; i< nd; i++) { - if (i < axis) { - shape[i] = self->dimensions[i]; - n *= shape[i]; - } else { - if (i < axis+indices->nd) { - shape[i] = indices->dimensions[i-axis]; - m *= shape[i]; - } else { - shape[i] = self->dimensions[i-indices->nd+1]; - chunk *= shape[i]; - } - } - } - Py_INCREF(self->descr); - if (!ret) { - ret = (PyArrayObject *)PyArray_NewFromDescr(self->ob_type, - self->descr, - nd, shape, - NULL, NULL, 0, - (PyObject *)self); - - if (ret == NULL) goto fail; - } - else { - PyArrayObject *obj; - int flags = NPY_CARRAY | NPY_UPDATEIFCOPY; - - if ((ret->nd != nd) || - !PyArray_CompareLists(ret->dimensions, shape, nd)) { - PyErr_SetString(PyExc_ValueError, - "bad shape in output array"); - ret = NULL; - Py_DECREF(self->descr); - goto fail; - } - - if (clipmode == NPY_RAISE) { - /* we need to make sure and get a copy - so the input array is not changed - before the error is called - */ - flags |= NPY_ENSURECOPY; - } - obj = (PyArrayObject *)PyArray_FromArray(ret, self->descr, - flags); - if (obj != ret) copyret = 1; - ret = obj; - } - - max_item = self->dimensions[axis]; - chunk = chunk * ret->descr->elsize; - src = self->data; - dest = ret->data; - - switch(clipmode) { - case NPY_RAISE: - for(i=0; idata))[j]; - if (tmp < 0) tmp = tmp+max_item; - if ((tmp < 0) || (tmp >= max_item)) { - PyErr_SetString(PyExc_IndexError, - "index out of range "\ - "for array"); - goto fail; - } - memmove(dest, src+tmp*chunk, chunk); - dest += chunk; - } - src += chunk*max_item; - } - break; - case NPY_WRAP: - for(i=0; idata))[j]; - if (tmp < 0) while (tmp < 0) tmp += max_item; - else if (tmp >= max_item) - while (tmp >= max_item) - tmp -= max_item; - memmove(dest, src+tmp*chunk, chunk); - dest += chunk; - } - src += chunk*max_item; - } - break; - case NPY_CLIP: - for(i=0; idata))[j]; - if (tmp < 0) - tmp = 0; - else if (tmp >= max_item) - tmp = max_item-1; - memmove(dest, src+tmp*chunk, chunk); - dest += chunk; - } - src += chunk*max_item; - } - break; - } - - PyArray_INCREF(ret); - - Py_XDECREF(indices); - Py_XDECREF(self); - if (copyret) { - PyObject *obj; - obj = ret->base; - Py_INCREF(obj); - Py_DECREF(ret); - ret = (PyArrayObject *)obj; - } - - return (PyObject *)ret; - - - fail: - PyArray_XDECREF_ERR(ret); - Py_XDECREF(indices); - Py_XDECREF(self); - return NULL; -} - -/*MULTIARRAY_API - Put values into an array -*/ -static PyObject * -PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, - NPY_CLIPMODE clipmode) -{ - PyArrayObject *indices, *values; - int i, chunk, ni, max_item, nv, tmp; - char *src, *dest; - int copied = 0; - - indices = NULL; - values = NULL; - - if (!PyArray_Check(self)) { - PyErr_SetString(PyExc_TypeError, - "put: first argument must be an array"); - return NULL; - } - if (!PyArray_ISCONTIGUOUS(self)) { - PyArrayObject *obj; - int flags = NPY_CARRAY | NPY_UPDATEIFCOPY; - if (clipmode == NPY_RAISE) { - flags |= NPY_ENSURECOPY; - } - Py_INCREF(self->descr); - obj = (PyArrayObject *)PyArray_FromArray(self, - self->descr, flags); - if (obj != self) copied = 1; - self = obj; - } - max_item = PyArray_SIZE(self); - dest = self->data; - chunk = self->descr->elsize; - - indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, - PyArray_INTP, 0, 0); - if (indices == NULL) goto fail; - ni = PyArray_SIZE(indices); - - Py_INCREF(self->descr); - values = (PyArrayObject *)PyArray_FromAny(values0, self->descr, 0, 0, - DEFAULT | FORCECAST, NULL); - if (values == NULL) goto fail; - nv = PyArray_SIZE(values); - if (nv <= 0) goto finish; - if (PyDataType_REFCHK(self->descr)) { - switch(clipmode) { - case NPY_RAISE: - for(i=0; idata + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = tmp+max_item; - if ((tmp < 0) || (tmp >= max_item)) { - PyErr_SetString(PyExc_IndexError, - "index out of " \ - "range for array"); - goto fail; - } - PyArray_Item_INCREF(src, self->descr); - PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); - memmove(dest + tmp * chunk, src, chunk); - } - break; - case NPY_WRAP: - for(i=0; idata + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) while(tmp < 0) tmp+=max_item; - else if (tmp >= max_item) - while(tmp >= max_item) - tmp -= max_item; - PyArray_Item_INCREF(src, self->descr); - PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); - memmove(dest + tmp * chunk, src, chunk); - } - break; - case NPY_CLIP: - for(i=0; idata + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = 0; - else if (tmp >= max_item) - tmp = max_item - 1; - PyArray_Item_INCREF(src, self->descr); - PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); - memmove(dest + tmp * chunk, src, chunk); - } - break; - } - } - else { - switch(clipmode) { - case NPY_RAISE: - for(i=0; idata + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = tmp+max_item; - if ((tmp < 0) || (tmp >= max_item)) { - PyErr_SetString(PyExc_IndexError, - "index out of " \ - "range for array"); - goto fail; - } - memmove(dest + tmp * chunk, src, chunk); - } - break; - case NPY_WRAP: - for(i=0; idata + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) while(tmp < 0) tmp+=max_item; - else if (tmp >= max_item) - while(tmp >= max_item) - tmp -= max_item; - memmove(dest + tmp * chunk, src, chunk); - } - break; - case NPY_CLIP: - for(i=0; idata + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = 0; - else if (tmp >= max_item) - tmp = max_item - 1; - memmove(dest + tmp * chunk, src, chunk); - } - break; - } - } - - finish: - Py_XDECREF(values); - Py_XDECREF(indices); - if (copied) { - Py_DECREF(self); - } - Py_INCREF(Py_None); - return Py_None; - - fail: - Py_XDECREF(indices); - Py_XDECREF(values); - if (copied) { - PyArray_XDECREF_ERR(self); - } - return NULL; -} - -static PyObject * -array_putmask(PyObject *module, PyObject *args, PyObject *kwds) -{ - PyObject *mask, *values; - PyObject *array; - - static char *kwlist[] = {"arr", "mask", "values", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!OO:putmask", kwlist, - &PyArray_Type, - &array, &mask, &values)) - return NULL; - - return PyArray_PutMask((PyArrayObject *)array, values, mask); -} - -/*MULTIARRAY_API - Put values into an array according to a mask. -*/ -static PyObject * -PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) -{ - PyArray_FastPutmaskFunc *func; - PyArrayObject *mask, *values; - int i, chunk, ni, max_item, nv, tmp; - char *src, *dest; - int copied=0; - - mask = NULL; - values = NULL; - - if (!PyArray_Check(self)) { - PyErr_SetString(PyExc_TypeError, - "putmask: first argument must "\ - "be an array"); - return NULL; - } - if (!PyArray_ISCONTIGUOUS(self)) { - PyArrayObject *obj; - int flags = NPY_CARRAY | NPY_UPDATEIFCOPY; - Py_INCREF(self->descr); - obj = (PyArrayObject *)PyArray_FromArray(self, - self->descr, flags); - if (obj != self) copied = 1; - self = obj; - } - - max_item = PyArray_SIZE(self); - dest = self->data; - chunk = self->descr->elsize; - - mask = (PyArrayObject *)\ - PyArray_FROM_OTF(mask0, PyArray_BOOL, CARRAY | FORCECAST); - if (mask == NULL) goto fail; - ni = PyArray_SIZE(mask); - if (ni != max_item) { - PyErr_SetString(PyExc_ValueError, - "putmask: mask and data must be "\ - "the same size"); - goto fail; - } - Py_INCREF(self->descr); - values = (PyArrayObject *)\ - PyArray_FromAny(values0, self->descr, 0, 0, NPY_CARRAY, NULL); - if (values == NULL) goto fail; - nv = PyArray_SIZE(values); /* zero if null array */ - if (nv <= 0) { - Py_XDECREF(values); - Py_XDECREF(mask); - Py_INCREF(Py_None); - return Py_None; - } - if (PyDataType_REFCHK(self->descr)) { - for(i=0; idata))[i]; - if (tmp) { - src = values->data + chunk * (i % nv); - PyArray_Item_INCREF(src, self->descr); - PyArray_Item_XDECREF(dest+i*chunk, self->descr); - memmove(dest + i * chunk, src, chunk); - } - } - } - else { - func = self->descr->f->fastputmask; - if (func == NULL) { - for(i=0; idata))[i]; - if (tmp) { - src = values->data + chunk * (i % nv); - memmove(dest + i * chunk, src, chunk); - } - } - } - else { - func(dest, mask->data, ni, values->data, nv); - } - } - - Py_XDECREF(values); - Py_XDECREF(mask); - if (copied) { - Py_DECREF(self); - } - Py_INCREF(Py_None); - return Py_None; - - fail: - Py_XDECREF(mask); - Py_XDECREF(values); - if (copied) { - PyArray_XDECREF_ERR(self); - } - return NULL; -} - - -/* This conversion function can be used with the "O&" argument for - PyArg_ParseTuple. It will immediately return an object of array type - or will convert to a CARRAY any other object. - - If you use PyArray_Converter, you must DECREF the array when finished - as you get a new reference to it. -*/ - -/*MULTIARRAY_API - Useful to pass as converter function for O& processing in - PyArgs_ParseTuple. -*/ -static int -PyArray_Converter(PyObject *object, PyObject **address) -{ - if (PyArray_Check(object)) { - *address = object; - Py_INCREF(object); - return PY_SUCCEED; - } - else { - *address = PyArray_FromAny(object, NULL, 0, 0, CARRAY, NULL); - if (*address == NULL) return PY_FAIL; - return PY_SUCCEED; - } -} - -/*MULTIARRAY_API - Useful to pass as converter function for O& processing in - PyArgs_ParseTuple for output arrays -*/ -static int -PyArray_OutputConverter(PyObject *object, PyArrayObject **address) -{ - if (object == NULL || object == Py_None) { - *address = NULL; - return PY_SUCCEED; - } - if (PyArray_Check(object)) { - *address = (PyArrayObject *)object; - return PY_SUCCEED; - } - else { - PyErr_SetString(PyExc_TypeError, - "output must be an array"); - *address = NULL; - return PY_FAIL; - } -} - - -/*MULTIARRAY_API - Convert an object to true / false -*/ -static int -PyArray_BoolConverter(PyObject *object, Bool *val) -{ - if (PyObject_IsTrue(object)) - *val=TRUE; - else *val=FALSE; - if (PyErr_Occurred()) - return PY_FAIL; - return PY_SUCCEED; -} - -/*MULTIARRAY_API - Convert an object to FORTRAN / C / ANY -*/ -static int -PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) -{ - char *str; - if (object == NULL || object == Py_None) { - *val = PyArray_ANYORDER; - } - else if (!PyString_Check(object) || PyString_GET_SIZE(object) < 1) { - if (PyObject_IsTrue(object)) - *val = PyArray_FORTRANORDER; - else - *val = PyArray_CORDER; - if (PyErr_Occurred()) - return PY_FAIL; - return PY_SUCCEED; - } - else { - str = PyString_AS_STRING(object); - if (str[0] == 'C' || str[0] == 'c') { - *val = PyArray_CORDER; - } - else if (str[0] == 'F' || str[0] == 'f') { - *val = PyArray_FORTRANORDER; - } - else if (str[0] == 'A' || str[0] == 'a') { - *val = PyArray_ANYORDER; - } - else { - PyErr_SetString(PyExc_TypeError, - "order not understood"); - return PY_FAIL; - } - } - return PY_SUCCEED; -} - -/*MULTIARRAY_API - Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP -*/ -static int -PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val) -{ - if (object == NULL || object == Py_None) { - *val = NPY_RAISE; - } - else if (PyString_Check(object)) { - char *str; - str = PyString_AS_STRING(object); - if (str[0] == 'C' || str[0] == 'c') { - *val = NPY_CLIP; - } - else if (str[0] == 'W' || str[0] == 'w') { - *val = NPY_WRAP; - } - else if (str[0] == 'R' || str[0] == 'r') { - *val = NPY_RAISE; - } - else { - PyErr_SetString(PyExc_TypeError, - "clipmode not understood"); - return PY_FAIL; - } - } - else { - int number; - number = PyInt_AsLong(object); - if (number == -1 && PyErr_Occurred()) goto fail; - if (number <= (int) NPY_RAISE && - number >= (int) NPY_CLIP) - *val = (NPY_CLIPMODE) number; - else goto fail; - } - return PY_SUCCEED; - - fail: - PyErr_SetString(PyExc_TypeError, - "clipmode not understood"); - return PY_FAIL; -} - - - -/*MULTIARRAY_API - Typestr converter -*/ -static int -PyArray_TypestrConvert(int itemsize, int gentype) -{ - register int newtype = gentype; - - if (gentype == PyArray_GENBOOLLTR) { - if (itemsize == 1) - newtype = PyArray_BOOL; - else - newtype = PyArray_NOTYPE; - } - else if (gentype == PyArray_SIGNEDLTR) { - switch(itemsize) { - case 1: - newtype = PyArray_INT8; - break; - case 2: - newtype = PyArray_INT16; - break; - case 4: - newtype = PyArray_INT32; - break; - case 8: - newtype = PyArray_INT64; - break; -#ifdef PyArray_INT128 - case 16: - newtype = PyArray_INT128; - break; -#endif - default: - newtype = PyArray_NOTYPE; - } - } - - else if (gentype == PyArray_UNSIGNEDLTR) { - switch(itemsize) { - case 1: - newtype = PyArray_UINT8; - break; - case 2: - newtype = PyArray_UINT16; - break; - case 4: - newtype = PyArray_UINT32; - break; - case 8: - newtype = PyArray_UINT64; - break; -#ifdef PyArray_INT128 - case 16: - newtype = PyArray_UINT128; - break; -#endif - default: - newtype = PyArray_NOTYPE; - break; - } - } - else if (gentype == PyArray_FLOATINGLTR) { - switch(itemsize) { - case 4: - newtype = PyArray_FLOAT32; - break; - case 8: - newtype = PyArray_FLOAT64; - break; -#ifdef PyArray_FLOAT80 - case 10: - newtype = PyArray_FLOAT80; - break; -#endif -#ifdef PyArray_FLOAT96 - case 12: - newtype = PyArray_FLOAT96; - break; -#endif -#ifdef PyArray_FLOAT128 - case 16: - newtype = PyArray_FLOAT128; - break; -#endif - default: - newtype = PyArray_NOTYPE; - } - } - - else if (gentype == PyArray_COMPLEXLTR) { - switch(itemsize) { - case 8: - newtype = PyArray_COMPLEX64; - break; - case 16: - newtype = PyArray_COMPLEX128; - break; -#ifdef PyArray_FLOAT80 - case 20: - newtype = PyArray_COMPLEX160; - break; -#endif -#ifdef PyArray_FLOAT96 - case 24: - newtype = PyArray_COMPLEX192; - break; -#endif -#ifdef PyArray_FLOAT128 - case 32: - newtype = PyArray_COMPLEX256; - break; -#endif - default: - newtype = PyArray_NOTYPE; - } - } - - return newtype; -} - - -/* this function takes a Python object which exposes the (single-segment) - buffer interface and returns a pointer to the data segment - - You should increment the reference count by one of buf->base - if you will hang on to a reference - - You only get a borrowed reference to the object. Do not free the - memory... -*/ - - -/*MULTIARRAY_API - Get buffer chunk from object -*/ -static int -PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) -{ - Py_ssize_t buflen; - - buf->ptr = NULL; - buf->flags = BEHAVED; - buf->base = NULL; - - if (obj == Py_None) - return PY_SUCCEED; - - if (PyObject_AsWriteBuffer(obj, &(buf->ptr), &buflen) < 0) { - PyErr_Clear(); - buf->flags &= ~WRITEABLE; - if (PyObject_AsReadBuffer(obj, (const void **)&(buf->ptr), - &buflen) < 0) - return PY_FAIL; - } - buf->len = (intp) buflen; - - /* Point to the base of the buffer object if present */ - if (PyBuffer_Check(obj)) buf->base = ((PyArray_Chunk *)obj)->base; - if (buf->base == NULL) buf->base = obj; - - return PY_SUCCEED; -} - - - -/* This function takes a Python sequence object and allocates and - fills in an intp array with the converted values. - - **Remember to free the pointer seq.ptr when done using - PyDimMem_FREE(seq.ptr)** -*/ - -/*MULTIARRAY_API - Get intp chunk from sequence -*/ -static int -PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) -{ - int len; - int nd; - - seq->ptr = NULL; - seq->len = 0; - if (obj == Py_None) return PY_SUCCEED; - len = PySequence_Size(obj); - if (len == -1) { /* Check to see if it is a number */ - if (PyNumber_Check(obj)) len = 1; - } - if (len < 0) { - PyErr_SetString(PyExc_TypeError, - "expected sequence object with len >= 0"); - return PY_FAIL; - } - if (len > MAX_DIMS) { - PyErr_Format(PyExc_ValueError, "sequence too large; " \ - "must be smaller than %d", MAX_DIMS); - return PY_FAIL; - } - if (len > 0) { - seq->ptr = PyDimMem_NEW(len); - if (seq->ptr == NULL) { - PyErr_NoMemory(); - return PY_FAIL; - } - } - seq->len = len; - nd = PyArray_IntpFromSequence(obj, (intp *)seq->ptr, len); - if (nd == -1 || nd != len) { - PyDimMem_FREE(seq->ptr); - seq->ptr=NULL; - return PY_FAIL; - } - return PY_SUCCEED; -} - - -/* A tuple type would be either (generic typeobject, typesize) - or (fixed-length data-type, shape) - - or (inheriting data-type, new-data-type) - The new data-type must have the same itemsize as the inheriting data-type - unless the latter is 0 - - Thus (int32, {'real':(int16,0),'imag',(int16,2)}) - - is one way to specify a descriptor that will give - a['real'] and a['imag'] to an int32 array. -*/ - -/* leave type reference alone */ -static PyArray_Descr * -_use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag) -{ - PyArray_Descr *new; - PyArray_Descr *conv; - - *errflag = 0; - if (!PyArray_DescrConverter(newobj, &conv)) { - return NULL; - } - *errflag = 1; - new = PyArray_DescrNew(type); - if (new == NULL) goto fail; - - if (new->elsize && new->elsize != conv->elsize) { - PyErr_SetString(PyExc_ValueError, - "mismatch in size of old "\ - "and new data-descriptor"); - goto fail; - } - new->elsize = conv->elsize; - if (conv->names) { - new->fields = conv->fields; - Py_XINCREF(new->fields); - new->names = conv->names; - Py_XINCREF(new->names); - } - new->hasobject = conv->hasobject; - Py_DECREF(conv); - *errflag = 0; - return new; - - fail: - Py_DECREF(conv); - return NULL; - -} - -static PyArray_Descr * -_convert_from_tuple(PyObject *obj) -{ - PyArray_Descr *type, *res; - PyObject *val; - int errflag; - - if (PyTuple_GET_SIZE(obj) != 2) return NULL; - - if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj,0), &type)) - return NULL; - val = PyTuple_GET_ITEM(obj,1); - /* try to interpret next item as a type */ - res = _use_inherit(type, val, &errflag); - if (res || errflag) { - Py_DECREF(type); - if (res) return res; - else return NULL; - } - PyErr_Clear(); - /* We get here if res was NULL but errflag wasn't set - --- i.e. the conversion to a data-descr failed in _use_inherit - */ - - if (type->elsize == 0) { /* interpret next item as a typesize */ - int itemsize; - itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); - if (error_converting(itemsize)) { - PyErr_SetString(PyExc_ValueError, - "invalid itemsize in generic type "\ - "tuple"); - goto fail; - } - PyArray_DESCR_REPLACE(type); - if (type->type_num == PyArray_UNICODE) - type->elsize = itemsize << 2; - else - type->elsize = itemsize; - } - else { - /* interpret next item as shape (if it's a tuple) - and reset the type to PyArray_VOID with - a new fields attribute. - */ - PyArray_Dims shape={NULL,-1}; - PyArray_Descr *newdescr; - if (!(PyArray_IntpConverter(val, &shape)) || - (shape.len > MAX_DIMS)) { - PyDimMem_FREE(shape.ptr); - PyErr_SetString(PyExc_ValueError, - "invalid shape in fixed-type tuple."); - goto fail; - } - /* If (type, 1) was given, it is equivalent to type... - or (type, ()) was given it is equivalent to type... */ - if ((shape.len == 1 && shape.ptr[0] == 1 && PyNumber_Check(val)) || \ - (shape.len == 0 && PyTuple_Check(val))) { - PyDimMem_FREE(shape.ptr); - return type; - } - newdescr = PyArray_DescrNewFromType(PyArray_VOID); - if (newdescr == NULL) {PyDimMem_FREE(shape.ptr); goto fail;} - newdescr->elsize = type->elsize; - newdescr->elsize *= PyArray_MultiplyList(shape.ptr, - shape.len); - PyDimMem_FREE(shape.ptr); - newdescr->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr)); - newdescr->subarray->base = type; - newdescr->hasobject = type->hasobject; - Py_INCREF(val); - newdescr->subarray->shape = val; - Py_XDECREF(newdescr->fields); - Py_XDECREF(newdescr->names); - newdescr->fields = NULL; - newdescr->names = NULL; - type = newdescr; - } - return type; - - fail: - Py_XDECREF(type); - return NULL; -} - -/* obj is a list. Each item is a tuple with - - (field-name, data-type (either a list or a string), and an optional - shape parameter). -*/ -static PyArray_Descr * -_convert_from_array_descr(PyObject *obj, int align) -{ - int n, i, totalsize; - int ret; - PyObject *fields, *item, *newobj; - PyObject *name, *tup, *title; - PyObject *nameslist; - PyArray_Descr *new; - PyArray_Descr *conv; - int dtypeflags=0; - int maxalign = 0; - - - n = PyList_GET_SIZE(obj); - nameslist = PyTuple_New(n); - if (!nameslist) return NULL; - totalsize = 0; - fields = PyDict_New(); - for (i=0; ihasobject & NPY_FROM_FIELDS); - tup = PyTuple_New((title == NULL ? 2 : 3)); - PyTuple_SET_ITEM(tup, 0, (PyObject *)conv); - if (align) { - int _align; - _align = conv->alignment; - if (_align > 1) totalsize = \ - ((totalsize + _align - 1)/_align)*_align; - maxalign = MAX(maxalign, _align); - } - PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize)); - - /* Title can be "meta-data". Only insert it - into the fields dictionary if it is a string - */ - if (title != NULL) { - Py_INCREF(title); - PyTuple_SET_ITEM(tup, 2, title); - if (PyString_Check(title) || PyUnicode_Check(title)) - PyDict_SetItem(fields, title, tup); - } - PyDict_SetItem(fields, name, tup); - totalsize += conv->elsize; - Py_DECREF(tup); - } - new = PyArray_DescrNewFromType(PyArray_VOID); - new->fields = fields; - new->names = nameslist; - new->elsize = totalsize; - new->hasobject=dtypeflags; - if (maxalign > 1) { - totalsize = ((totalsize+maxalign-1)/maxalign)*maxalign; - } - if (align) new->alignment = maxalign; - return new; - - fail: - Py_DECREF(fields); - Py_DECREF(nameslist); - return NULL; - -} - -/* a list specifying a data-type can just be - a list of formats. The names for the fields - will default to f0, f1, f2, and so forth. -*/ - -static PyArray_Descr * -_convert_from_list(PyObject *obj, int align) -{ - int n, i; - int totalsize; - PyObject *fields; - PyArray_Descr *conv=NULL; - PyArray_Descr *new; - PyObject *key, *tup; - PyObject *nameslist=NULL; - int ret; - int maxalign=0; - int dtypeflags=0; - - n = PyList_GET_SIZE(obj); - /* Ignore any empty string at end which _internal._commastring - can produce */ - key = PyList_GET_ITEM(obj, n-1); - if (PyString_Check(key) && PyString_GET_SIZE(key) == 0) n = n-1; - /* End ignore code.*/ - totalsize = 0; - if (n==0) return NULL; - nameslist = PyTuple_New(n); - if (!nameslist) return NULL; - fields = PyDict_New(); - for (i=0; ihasobject & NPY_FROM_FIELDS); - PyTuple_SET_ITEM(tup, 0, (PyObject *)conv); - if (align) { - int _align; - _align = conv->alignment; - if (_align > 1) totalsize = \ - ((totalsize + _align - 1)/_align)*_align; - maxalign = MAX(maxalign, _align); - } - PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize)); - PyDict_SetItem(fields, key, tup); - Py_DECREF(tup); - PyTuple_SET_ITEM(nameslist, i, key); - totalsize += conv->elsize; - } - new = PyArray_DescrNewFromType(PyArray_VOID); - new->fields = fields; - new->names = nameslist; - new->hasobject=dtypeflags; - if (maxalign > 1) { - totalsize = ((totalsize+maxalign-1)/maxalign)*maxalign; - } - if (align) new->alignment = maxalign; - new->elsize = totalsize; - return new; - - fail: - Py_DECREF(nameslist); - Py_DECREF(fields); - return NULL; -} - - -/* comma-separated string */ -/* this is the format developed by the numarray records module */ -/* and implemented by the format parser in that module */ -/* this is an alternative implementation found in the _internal.py - file patterned after that one -- the approach is to try to convert - to a list (with tuples if any repeat information is present) - and then call the _convert_from_list) -*/ - -static PyArray_Descr * -_convert_from_commastring(PyObject *obj, int align) -{ - PyObject *listobj; - PyArray_Descr *res; - PyObject *_numpy_internal; - - if (!PyString_Check(obj)) return NULL; - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - listobj = PyObject_CallMethod(_numpy_internal, "_commastring", - "O", obj); - Py_DECREF(_numpy_internal); - if (!listobj) return NULL; - if (!PyList_Check(listobj) || PyList_GET_SIZE(listobj)<1) { - PyErr_SetString(PyExc_RuntimeError, "_commastring is " \ - "not returning a list with len >= 1"); - return NULL; - } - if (PyList_GET_SIZE(listobj) == 1) { - if (PyArray_DescrConverter(PyList_GET_ITEM(listobj, 0), - &res) == NPY_FAIL) { - res = NULL; - } - } - else { - res = _convert_from_list(listobj, align); - } - Py_DECREF(listobj); - if (!res && !PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, "invalid data-type"); - return NULL; - } - return res; -} - - - -/* a dictionary specifying a data-type - must have at least two and up to four - keys These must all be sequences of the same length. - - "names" --- field names - "formats" --- the data-type descriptors for the field. - - Optional: - - "offsets" --- integers indicating the offset into the - record of the start of the field. - if not given, then "consecutive offsets" - will be assumed and placed in the dictionary. - - "titles" --- Allows the use of an additional key - for the fields dictionary.(if these are strings - or unicode objects) or - this can also be meta-data to - be passed around with the field description. - - Attribute-lookup-based field names merely has to query the fields - dictionary of the data-descriptor. Any result present can be used - to return the correct field. - - So, the notion of what is a name and what is a title is really quite - arbitrary. - - What does distinguish a title, however, is that if it is not None, - it will be placed at the end of the tuple inserted into the - fields dictionary.and can therefore be used to carry meta-data around. - - If the dictionary does not have "names" and "formats" entries, - then it will be checked for conformity and used directly. -*/ - -static PyArray_Descr * -_use_fields_dict(PyObject *obj, int align) -{ - PyObject *_numpy_internal; - PyArray_Descr *res; - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - res = (PyArray_Descr *)PyObject_CallMethod(_numpy_internal, - "_usefields", - "Oi", obj, align); - Py_DECREF(_numpy_internal); - return res; -} - -static PyArray_Descr * -_convert_from_dict(PyObject *obj, int align) -{ - PyArray_Descr *new; - PyObject *fields=NULL; - PyObject *names, *offsets, *descrs, *titles; - int n, i; - int totalsize; - int maxalign=0; - int dtypeflags=0; - - fields = PyDict_New(); - if (fields == NULL) return (PyArray_Descr *)PyErr_NoMemory(); - - names = PyDict_GetItemString(obj, "names"); - descrs = PyDict_GetItemString(obj, "formats"); - - if (!names || !descrs) { - Py_DECREF(fields); - return _use_fields_dict(obj, align); - } - n = PyObject_Length(names); - offsets = PyDict_GetItemString(obj, "offsets"); - titles = PyDict_GetItemString(obj, "titles"); - if ((n > PyObject_Length(descrs)) || \ - (offsets && (n > PyObject_Length(offsets))) || \ - (titles && (n > PyObject_Length(titles)))) { - PyErr_SetString(PyExc_ValueError, - "all items in the dictionary must have" \ - " the same length."); - goto fail; - } - - totalsize = 0; - for(i=0; ialignment; - maxalign = MAX(maxalign,_align); - } - if (offsets) { - long offset; - off = PyObject_GetItem(offsets, index); - offset = PyInt_AsLong(off); - PyTuple_SET_ITEM(tup, 1, off); - if (offset < totalsize) { - PyErr_SetString(PyExc_ValueError, - "invalid offset (must be "\ - "ordered)"); - ret = PY_FAIL; - } - if (offset > totalsize) totalsize = offset; - } - else { - if (align && _align > 1) { - totalsize = ((totalsize + _align - 1) \ - /_align)*_align; - } - PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(totalsize)); - } - if (len == 3) PyTuple_SET_ITEM(tup, 2, item); - name = PyObject_GetItem(names, index); - Py_DECREF(index); - if (!(PyString_Check(name) || PyUnicode_Check(name))) { - PyErr_SetString(PyExc_ValueError, - "field names must be strings"); - ret = PY_FAIL; - } - - /* Insert into dictionary */ - if (PyDict_GetItem(fields, name) != NULL) { - PyErr_SetString(PyExc_ValueError, - "name already used as a name or "\ - "title"); - ret = PY_FAIL; - } - PyDict_SetItem(fields, name, tup); - Py_DECREF(name); - if (len == 3) { - if ((PyString_Check(item) || PyUnicode_Check(item)) && - PyDict_GetItem(fields, item) != NULL) { - PyErr_SetString(PyExc_ValueError, - "title already used as a "\ - "name or title."); - ret=PY_FAIL; - } - else { - PyDict_SetItem(fields, item, tup); - } - } - Py_DECREF(tup); - if ((ret == PY_FAIL) || (newdescr->elsize == 0)) goto fail; - dtypeflags |= (newdescr->hasobject & NPY_FROM_FIELDS); - totalsize += newdescr->elsize; - } - - new = PyArray_DescrNewFromType(PyArray_VOID); - if (new == NULL) goto fail; - if (maxalign > 1) - totalsize = ((totalsize + maxalign - 1)/maxalign)*maxalign; - if (align) new->alignment = maxalign; - new->elsize = totalsize; - if (!PyTuple_Check(names)) { - names = PySequence_Tuple(names); - } - else { - Py_INCREF(names); - } - new->names = names; - new->fields = fields; - new->hasobject = dtypeflags; - return new; - - fail: - Py_XDECREF(fields); - return NULL; -} - -#define _chk_byteorder(arg) (arg == '>' || arg == '<' || \ - arg == '|' || arg == '=') - -static int -_check_for_commastring(char *type, int len) -{ - int i; - - /* Check for ints at start of string */ - if ((type[0] >= '0' && type[0] <= '9') || - ((len > 1) && _chk_byteorder(type[0]) && - (type[1] >= '0' && type[1] <= '9'))) - return 1; - - /* Check for empty tuple */ - if (((len > 1) && (type[0] == '(' && type[1] == ')')) || - ((len > 3) && _chk_byteorder(type[0]) && - (type[1] == '(' && type[2] == ')'))) - return 1; - - /* Check for presence of commas */ - for (i=1;i= size computed from fields - - The .fields attribute must return a convertible dictionary if - present. Result inherits from PyArray_VOID. -*/ - - -/*MULTIARRAY_API - Get type-descriptor from an object forcing alignment if possible - None goes to DEFAULT type. -*/ -static int -PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at) -{ - if PyDict_Check(obj) { - *at = _convert_from_dict(obj, 1); - } - else if PyString_Check(obj) { - *at = _convert_from_commastring(obj, 1); - } - else if PyList_Check(obj) { - *at = _convert_from_array_descr(obj, 1); - } - else { - return PyArray_DescrConverter(obj, at); - } - if (*at == NULL) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, - "data-type-descriptor not understood"); - } - return PY_FAIL; - } - return PY_SUCCEED; -} - -/*MULTIARRAY_API - Get type-descriptor from an object forcing alignment if possible - None goes to NULL. -*/ -static int -PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at) -{ - if PyDict_Check(obj) { - *at = _convert_from_dict(obj, 1); - } - else if PyString_Check(obj) { - *at = _convert_from_commastring(obj, 1); - } - else if PyList_Check(obj) { - *at = _convert_from_array_descr(obj, 1); - } - else { - return PyArray_DescrConverter2(obj, at); - } - if (*at == NULL) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, - "data-type-descriptor not understood"); - } - return PY_FAIL; - } - return PY_SUCCEED; -} - - -/*MULTIARRAY_API - Get typenum from an object -- None goes to NULL -*/ -static int -PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) -{ - if (obj == Py_None) { - *at = NULL; - return PY_SUCCEED; - } - else return PyArray_DescrConverter(obj, at); -} - -/* This function takes a Python object representing a type and converts it - to a the correct PyArray_Descr * structure to describe the type. - - Many objects can be used to represent a data-type which in NumPy is - quite a flexible concept. - - This is the central code that converts Python objects to - Type-descriptor objects that are used throughout numpy. -*/ - -/* new reference in *at */ -/*MULTIARRAY_API - Get typenum from an object -- None goes to PyArray_DEFAULT -*/ -static int -PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) -{ - char *type; - int check_num=PyArray_NOTYPE+10; - int len; - PyObject *item; - int elsize = 0; - char endian = '='; - - *at=NULL; - - /* default */ - if (obj == Py_None) { - *at = PyArray_DescrFromType(PyArray_DEFAULT); - return PY_SUCCEED; - } - - if (PyArray_DescrCheck(obj)) { - *at = (PyArray_Descr *)obj; - Py_INCREF(*at); - return PY_SUCCEED; - } - - if (PyType_Check(obj)) { - if (PyType_IsSubtype((PyTypeObject *)obj, - &PyGenericArrType_Type)) { - *at = PyArray_DescrFromTypeObject(obj); - if (*at) return PY_SUCCEED; - else return PY_FAIL; - } - check_num = PyArray_OBJECT; - if (obj == (PyObject *)(&PyInt_Type)) - check_num = PyArray_LONG; - else if (obj == (PyObject *)(&PyLong_Type)) - check_num = PyArray_LONGLONG; - else if (obj == (PyObject *)(&PyFloat_Type)) - check_num = PyArray_DOUBLE; - else if (obj == (PyObject *)(&PyComplex_Type)) - check_num = PyArray_CDOUBLE; - else if (obj == (PyObject *)(&PyBool_Type)) - check_num = PyArray_BOOL; - else if (obj == (PyObject *)(&PyString_Type)) - check_num = PyArray_STRING; - else if (obj == (PyObject *)(&PyUnicode_Type)) - check_num = PyArray_UNICODE; - else if (obj == (PyObject *)(&PyBuffer_Type)) - check_num = PyArray_VOID; - else { - *at = _arraydescr_fromobj(obj); - if (*at) return PY_SUCCEED; - } - goto finish; - } - - /* or a typecode string */ - - if (PyString_Check(obj)) { - /* Check for a string typecode. */ - type = PyString_AS_STRING(obj); - len = PyString_GET_SIZE(obj); - if (len <= 0) goto fail; - - /* check for commas present - or first (or second) element a digit */ - if (_check_for_commastring(type, len)) { - *at = _convert_from_commastring(obj, 0); - if (*at) return PY_SUCCEED; - return PY_FAIL; - } - check_num = (int) type[0]; - if ((char) check_num == '>' || (char) check_num == '<' || \ - (char) check_num == '|' || (char) check_num == '=') { - if (len <= 1) goto fail; - endian = (char) check_num; - type++; len--; - check_num = (int) type[0]; - if (endian == '|') endian = '='; - } - if (len > 1) { - elsize = atoi(type+1); - if (elsize == 0) { - check_num = PyArray_NOTYPE+10; - } - /* When specifying length of UNICODE - the number of characters is given to match - the STRING interface. Each character can be - more than one byte and itemsize must be - the number of bytes. - */ - else if (check_num == PyArray_UNICODELTR) { - elsize <<= 2; - } - /* Support for generic processing - c4, i4, f8, etc... - */ - else if ((check_num != PyArray_STRINGLTR) && \ - (check_num != PyArray_VOIDLTR) && \ - (check_num != PyArray_STRINGLTR2)) { - check_num = \ - PyArray_TypestrConvert(elsize, - check_num); - if (check_num == PyArray_NOTYPE) - check_num += 10; - elsize = 0; - } - } - } - /* or a tuple */ - else if (PyTuple_Check(obj)) { - *at = _convert_from_tuple(obj); - if (*at == NULL){ - if (PyErr_Occurred()) return PY_FAIL; - goto fail; - } - return PY_SUCCEED; - } - /* or a list */ - else if (PyList_Check(obj)) { - *at = _convert_from_array_descr(obj,0); - if (*at == NULL) { - if (PyErr_Occurred()) return PY_FAIL; - goto fail; - } - return PY_SUCCEED; - } - /* or a dictionary */ - else if (PyDict_Check(obj)) { - *at = _convert_from_dict(obj,0); - if (*at == NULL) { - if (PyErr_Occurred()) return PY_FAIL; - goto fail; - } - return PY_SUCCEED; - } - else if (PyArray_Check(obj)) goto fail; - else /* goto fail;*/ { - *at = _arraydescr_fromobj(obj); - if (*at) return PY_SUCCEED; - if (PyErr_Occurred()) return PY_FAIL; - goto fail; - } - if (PyErr_Occurred()) goto fail; - - /* - if (check_num == PyArray_NOTYPE) return PY_FAIL; - */ - - finish: - if ((check_num == PyArray_NOTYPE+10) || \ - (*at = PyArray_DescrFromType(check_num))==NULL) { - /* Now check to see if the object is registered - in typeDict */ - if (typeDict != NULL) { - item = PyDict_GetItem(typeDict, obj); - if (item) return PyArray_DescrConverter(item, at); - } - goto fail; - } - - if (((*at)->elsize == 0) && (elsize != 0)) { - PyArray_DESCR_REPLACE(*at); - (*at)->elsize = elsize; - } - if (endian != '=' && PyArray_ISNBO(endian)) endian = '='; - - if (endian != '=' && (*at)->byteorder != '|' && \ - (*at)->byteorder != endian) { - PyArray_DESCR_REPLACE(*at); - (*at)->byteorder = endian; - } - - return PY_SUCCEED; - - fail: - PyErr_SetString(PyExc_TypeError, - "data type not understood"); - *at=NULL; - return PY_FAIL; -} - -/*MULTIARRAY_API - Convert object to endian -*/ -static int -PyArray_ByteorderConverter(PyObject *obj, char *endian) -{ - char *str; - *endian = PyArray_SWAP; - str = PyString_AsString(obj); - if (!str) return PY_FAIL; - if (strlen(str) < 1) { - PyErr_SetString(PyExc_ValueError, - "Byteorder string must be at least length 1"); - return PY_FAIL; - } - *endian = str[0]; - if (str[0] != PyArray_BIG && str[0] != PyArray_LITTLE && \ - str[0] != PyArray_NATIVE) { - if (str[0] == 'b' || str[0] == 'B') - *endian = PyArray_BIG; - else if (str[0] == 'l' || str[0] == 'L') - *endian = PyArray_LITTLE; - else if (str[0] == 'n' || str[0] == 'N') - *endian = PyArray_NATIVE; - else if (str[0] == 'i' || str[0] == 'I') - *endian = PyArray_IGNORE; - else if (str[0] == 's' || str[0] == 'S') - *endian = PyArray_SWAP; - else { - PyErr_Format(PyExc_ValueError, - "%s is an unrecognized byteorder", - str); - return PY_FAIL; - } - } - return PY_SUCCEED; -} - -/*MULTIARRAY_API - Convert object to sort kind -*/ -static int -PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) -{ - char *str; - *sortkind = PyArray_QUICKSORT; - str = PyString_AsString(obj); - if (!str) return PY_FAIL; - if (strlen(str) < 1) { - PyErr_SetString(PyExc_ValueError, - "Sort kind string must be at least length 1"); - return PY_FAIL; - } - if (str[0] == 'q' || str[0] == 'Q') - *sortkind = PyArray_QUICKSORT; - else if (str[0] == 'h' || str[0] == 'H') - *sortkind = PyArray_HEAPSORT; - else if (str[0] == 'm' || str[0] == 'M') - *sortkind = PyArray_MERGESORT; - else { - PyErr_Format(PyExc_ValueError, - "%s is an unrecognized kind of sort", - str); - return PY_FAIL; - } - return PY_SUCCEED; -} - - -/* compare the field dictionary for two types - return 1 if the same or 0 if not -*/ - -static int -_equivalent_fields(PyObject *field1, PyObject *field2) { - - int same, val; - - if (field1 == field2) return 1; - if (field1 == NULL || field2 == NULL) return 0; - val = PyObject_Compare(field1, field2); - if (val != 0 || PyErr_Occurred()) same = 0; - else same = 1; - PyErr_Clear(); - return same; -} - -/* This function returns true if the two typecodes are - equivalent (same basic kind and same itemsize). -*/ - -/*MULTIARRAY_API*/ -static unsigned char -PyArray_EquivTypes(PyArray_Descr *typ1, PyArray_Descr *typ2) -{ - register int typenum1=typ1->type_num; - register int typenum2=typ2->type_num; - register int size1=typ1->elsize; - register int size2=typ2->elsize; - - if (size1 != size2) return FALSE; - - if (PyArray_ISNBO(typ1->byteorder) != PyArray_ISNBO(typ2->byteorder)) - return FALSE; - - if (typenum1 == PyArray_VOID || \ - typenum2 == PyArray_VOID) { - return ((typenum1 == typenum2) && - _equivalent_fields(typ1->fields, typ2->fields)); - } - return (typ1->kind == typ2->kind); -} - -/*MULTIARRAY_API*/ -static unsigned char -PyArray_EquivTypenums(int typenum1, int typenum2) -{ - PyArray_Descr *d1, *d2; - Bool ret; - d1 = PyArray_DescrFromType(typenum1); - d2 = PyArray_DescrFromType(typenum2); - ret = PyArray_EquivTypes(d1, d2); - Py_DECREF(d1); - Py_DECREF(d2); - return ret; -} - -/*** END C-API FUNCTIONS **/ - -static PyObject * -_prepend_ones(PyArrayObject *arr, int nd, int ndmin) -{ - intp newdims[MAX_DIMS]; - intp newstrides[MAX_DIMS]; - int i,k,num; - PyObject *ret; - - num = ndmin-nd; - for (i=0; idescr->elsize; - } - for (i=num;idimensions[k]; - newstrides[i] = arr->strides[k]; - } - Py_INCREF(arr->descr); - ret = PyArray_NewFromDescr(arr->ob_type, arr->descr, ndmin, - newdims, newstrides, arr->data, arr->flags, - (PyObject *)arr); - /* steals a reference to arr --- so don't increment - here */ - PyArray_BASE(ret) = (PyObject *)arr; - return ret; -} - - -#define _ARET(x) PyArray_Return((PyArrayObject *)(x)) - -#define STRIDING_OK(op, order) ((order) == PyArray_ANYORDER || \ - ((order) == PyArray_CORDER && \ - PyArray_ISCONTIGUOUS(op)) || \ - ((order) == PyArray_FORTRANORDER && \ - PyArray_ISFORTRAN(op))) - -static PyObject * -_array_fromobject(PyObject *ignored, PyObject *args, PyObject *kws) -{ - PyObject *op, *ret=NULL; - static char *kwd[]= {"object", "dtype", "copy", "order", "subok", - "ndmin", NULL}; - Bool subok=FALSE; - Bool copy=TRUE; - int ndmin=0, nd; - PyArray_Descr *type=NULL; - PyArray_Descr *oldtype=NULL; - NPY_ORDER order=PyArray_ANYORDER; - int flags=0; - - if (PyTuple_GET_SIZE(args) > 2) { - PyErr_SetString(PyExc_ValueError, - "only 2 non-keyword arguments accepted"); - return NULL; - } - - if(!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i", kwd, &op, - PyArray_DescrConverter2, - &type, - PyArray_BoolConverter, ©, - PyArray_OrderConverter, &order, - PyArray_BoolConverter, &subok, - &ndmin)) - return NULL; - - /* fast exit if simple call */ - if ((subok && PyArray_Check(op)) || - (!subok && PyArray_CheckExact(op))) { - if (type==NULL) { - if (!copy && STRIDING_OK(op, order)) { - Py_INCREF(op); - ret = op; - goto finish; - } - else { - ret = PyArray_NewCopy((PyArrayObject*)op, - order); - goto finish; - } - } - /* One more chance */ - oldtype = PyArray_DESCR(op); - if (PyArray_EquivTypes(oldtype, type)) { - if (!copy && STRIDING_OK(op, order)) { - Py_INCREF(op); - ret = op; - goto finish; - } - else { - ret = PyArray_NewCopy((PyArrayObject*)op, - order); - if (oldtype == type) goto finish; - Py_INCREF(oldtype); - Py_DECREF(PyArray_DESCR(ret)); - PyArray_DESCR(ret) = oldtype; - goto finish; - } - } - } - - if (copy) { - flags = ENSURECOPY; - } - if (order == PyArray_CORDER) { - flags |= CONTIGUOUS; - } - else if ((order == PyArray_FORTRANORDER) || - /* order == PyArray_ANYORDER && */ - (PyArray_Check(op) && PyArray_ISFORTRAN(op))) { - flags |= FORTRAN; - } - if (!subok) { - flags |= ENSUREARRAY; - } - - flags |= NPY_FORCECAST; - - ret = PyArray_CheckFromAny(op, type, 0, 0, flags, NULL); - - finish: - if (!ret || (nd=PyArray_NDIM(ret)) >= ndmin) return ret; - /* create a new array from the same data with ones in the shape */ - /* steals a reference to ret */ - return _prepend_ones((PyArrayObject *)ret, nd, ndmin); -} - -/* accepts NULL type */ -/* steals referenct to type */ -/*MULTIARRAY_API - Empty -*/ -static PyObject * -PyArray_Empty(int nd, intp *dims, PyArray_Descr *type, int fortran) -{ - PyArrayObject *ret; - - if (!type) type = PyArray_DescrFromType(PyArray_DEFAULT); - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - type, nd, dims, - NULL, NULL, - fortran, NULL); - if (ret == NULL) return NULL; - - if (PyDataType_REFCHK(type)) { - PyArray_FillObjectArray(ret, Py_None); - if (PyErr_Occurred()) {Py_DECREF(ret); return NULL;} - } - return (PyObject *)ret; -} - -static PyObject * -array_empty(PyObject *ignored, PyObject *args, PyObject *kwds) -{ - - static char *kwlist[] = {"shape","dtype","order",NULL}; - PyArray_Descr *typecode=NULL; - PyArray_Dims shape = {NULL, 0}; - NPY_ORDER order = PyArray_CORDER; - Bool fortran; - PyObject *ret=NULL; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&", - kwlist, PyArray_IntpConverter, - &shape, - PyArray_DescrConverter, - &typecode, - PyArray_OrderConverter, &order)) - goto fail; - - if (order == PyArray_FORTRANORDER) fortran = TRUE; - else fortran = FALSE; - - ret = PyArray_Empty(shape.len, shape.ptr, typecode, fortran); - PyDimMem_FREE(shape.ptr); - return ret; - - fail: - PyDimMem_FREE(shape.ptr); - return ret; -} - -/* This function is needed for supporting Pickles of - numpy scalar objects. -*/ -static PyObject * -array_scalar(PyObject *ignored, PyObject *args, PyObject *kwds) -{ - - static char *kwlist[] = {"dtype","obj", NULL}; - PyArray_Descr *typecode; - PyObject *obj=NULL; - int alloc=0; - void *dptr; - PyObject *ret; - - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|O", - kwlist, &PyArrayDescr_Type, - &typecode, - &obj)) - return NULL; - - if (typecode->elsize == 0) { - PyErr_SetString(PyExc_ValueError, \ - "itemsize cannot be zero"); - return NULL; - } - - if (PyDataType_FLAGCHK(typecode, NPY_ITEM_IS_POINTER)) { - if (obj == NULL) obj = Py_None; - dptr = &obj; - } - else { - if (obj == NULL) { - dptr = _pya_malloc(typecode->elsize); - if (dptr == NULL) { - return PyErr_NoMemory(); - } - memset(dptr, '\0', typecode->elsize); - alloc = 1; - } - else { - if (!PyString_Check(obj)) { - PyErr_SetString(PyExc_TypeError, - "initializing object must "\ - "be a string"); - return NULL; - } - if (PyString_GET_SIZE(obj) < typecode->elsize) { - PyErr_SetString(PyExc_ValueError, - "initialization string is too"\ - " small"); - return NULL; - } - dptr = PyString_AS_STRING(obj); - } - } - - ret = PyArray_Scalar(dptr, typecode, NULL); - - /* free dptr which contains zeros */ - if (alloc) _pya_free(dptr); - return ret; -} - - -/* steal a reference */ -/* accepts NULL type */ -/*MULTIARRAY_API - Zeros -*/ -static PyObject * -PyArray_Zeros(int nd, intp *dims, PyArray_Descr *type, int fortran) -{ - PyArrayObject *ret; - intp n; - - if (!type) type = PyArray_DescrFromType(PyArray_DEFAULT); - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - type, - nd, dims, - NULL, NULL, - fortran, NULL); - if (ret == NULL) return NULL; - - if (PyDataType_REFCHK(type)) { - PyObject *zero = PyInt_FromLong(0); - PyArray_FillObjectArray(ret, zero); - Py_DECREF(zero); - if (PyErr_Occurred()) {Py_DECREF(ret); return NULL;} - } - else { - n = PyArray_NBYTES(ret); - memset(ret->data, 0, n); - } - return (PyObject *)ret; - -} - -static PyObject * -array_zeros(PyObject *ignored, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"shape","dtype","order",NULL}; /* XXX ? */ - PyArray_Descr *typecode=NULL; - PyArray_Dims shape = {NULL, 0}; - NPY_ORDER order = PyArray_CORDER; - Bool fortran = FALSE; - PyObject *ret=NULL; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&", - kwlist, PyArray_IntpConverter, - &shape, - PyArray_DescrConverter, - &typecode, - PyArray_OrderConverter, - &order)) - goto fail; - - if (order == PyArray_FORTRANORDER) fortran = TRUE; - else fortran = FALSE; - ret = PyArray_Zeros(shape.len, shape.ptr, typecode, (int) fortran); - PyDimMem_FREE(shape.ptr); - return ret; - - fail: - PyDimMem_FREE(shape.ptr); - return ret; -} - -static PyObject * -array_set_typeDict(PyObject *ignored, PyObject *args) -{ - PyObject *dict; - if (!PyArg_ParseTuple(args, "O", &dict)) return NULL; - Py_XDECREF(typeDict); /* Decrement old reference (if any)*/ - typeDict = dict; - Py_INCREF(dict); /* Create an internal reference to it */ - Py_INCREF(Py_None); - return Py_None; -} - - -/* Reading from a file or a string. - - As much as possible, we try to use the same code for both files and strings, - so the semantics for fromstring and fromfile are the same, especially with - regards to the handling of text representations. -*/ - - -typedef int (*next_element)(void **, void *, PyArray_Descr *, void *); -typedef int (*skip_separator)(void **, const char *, void *); - -static int -fromstr_next_element(char **s, void *dptr, PyArray_Descr *dtype, - const char *end) -{ - int r = dtype->f->fromstr(*s, dptr, s, dtype); - if (end != NULL && *s > end) { - return -1; - } - return r; -} - -static int -fromfile_next_element(FILE **fp, void *dptr, PyArray_Descr *dtype, - void *stream_data) -{ - /* the NULL argument is for backwards-compatibility */ - return dtype->f->scanfunc(*fp, dptr, NULL, dtype); -} - -/* Remove multiple whitespace from the separator, and add a space to the - beginning and end. This simplifies the separator-skipping code below. -*/ -static char * -swab_separator(char *sep) -{ - int skip_space = 0; - char *s, *start; - s = start = malloc(strlen(sep)+3); - /* add space to front if there isn't one */ - if (*sep != '\0' && !isspace(*sep)) { - *s = ' '; s++; - } - while (*sep != '\0') { - if (isspace(*sep)) { - if (skip_space) { - sep++; - } else { - *s = ' '; - s++; sep++; - skip_space = 1; - } - } else { - *s = *sep; - s++; sep++; - skip_space = 0; - } - } - /* add space to end if there isn't one */ - if (s != start && s[-1] == ' ') { - *s = ' '; - s++; - } - *s = '\0'; - return start; -} - -/* Assuming that the separator is the next bit in the string (file), skip it. - - Single spaces in the separator are matched to arbitrary-long sequences - of whitespace in the input. - - If we can't match the separator, return -2. - If we hit the end of the string (file), return -1. - Otherwise, return 0. -*/ - -static int -fromstr_skip_separator(char **s, const char *sep, const char *end) -{ - char *string = *s; - int result = 0; - while (1) { - char c = *string; - if (c == '\0' || (end != NULL && string >= end)) { - result = -1; - break; - } else if (*sep == '\0') { - /* matched separator */ - result = 0; - break; - } else if (*sep == ' ') { - if (!isspace(c)) { - sep++; - continue; - } - } else if (*sep != c) { - result = -2; - break; - } else { - sep++; - } - string++; - } - *s = string; - return result; -} - -static int -fromfile_skip_separator(FILE **fp, const char *sep, void *stream_data) -{ - int result = 0; - while (1) { - int c = fgetc(*fp); - if (c == EOF) { - result = -1; - break; - } else if (*sep == '\0') { - /* matched separator */ - ungetc(c, *fp); - result = 0; - break; - } else if (*sep == ' ') { - if (!isspace(c)) { - sep++; - ungetc(c, *fp); - } - } else if (*sep != c) { - ungetc(c, *fp); - result = -2; - break; - } else { - sep++; - } - } - return result; -} - -/* Create an array by reading from the given stream, using the passed - next_element and skip_separator functions. -*/ - -#define FROM_BUFFER_SIZE 4096 -static PyArrayObject * -array_from_text(PyArray_Descr *dtype, intp num, char *sep, size_t *nread, - void *stream, next_element next, skip_separator skip_sep, - void *stream_data) -{ - PyArrayObject *r; - intp i; - char *dptr, *clean_sep, *tmp; - int err = 0; - intp thisbuf = 0; - intp size; - intp bytes, totalbytes; - - size = (num >= 0) ? num : FROM_BUFFER_SIZE; - - r = (PyArrayObject *) - PyArray_NewFromDescr(&PyArray_Type, - dtype, - 1, &size, - NULL, NULL, - 0, NULL); - if (r == NULL) return NULL; - clean_sep = swab_separator(sep); - NPY_BEGIN_ALLOW_THREADS; - totalbytes = bytes = size * dtype->elsize; - dptr = r->data; - for (i=0; num < 0 || i < num; i++) { - if (next(&stream, dptr, dtype, stream_data) < 0) - break; - *nread += 1; - thisbuf += 1; - dptr += dtype->elsize; - if (num < 0 && thisbuf == size) { - totalbytes += bytes; - tmp = PyDataMem_RENEW(r->data, totalbytes); - if (tmp == NULL) { - err = 1; - break; - } - r->data = tmp; - dptr = tmp + (totalbytes - bytes); - thisbuf = 0; - } - if (skip_sep(&stream, clean_sep, stream_data) < 0) - break; - } - if (num < 0) { - tmp = PyDataMem_RENEW(r->data, (*nread)*dtype->elsize); - if (tmp == NULL) err=1; - else { - PyArray_DIM(r,0) = *nread; - r->data = tmp; - } - } - NPY_END_ALLOW_THREADS; - free(clean_sep); - if (err == 1) PyErr_NoMemory(); - if (PyErr_Occurred()) { - Py_DECREF(r); - return NULL; - } - return r; -} -#undef FROM_BUFFER_SIZE - -/*OBJECT_API - - Given a pointer to a string ``data``, a string length ``slen``, and - a ``PyArray_Descr``, return an array corresponding to the data - encoded in that string. - - If the dtype is NULL, the default array type is used (double). - If non-null, the reference is stolen. - - If ``slen`` is < 0, then the end of string is used for text data. - It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs - would be the norm). - - The number of elements to read is given as ``num``; if it is < 0, then - then as many as possible are read. - - If ``sep`` is NULL or empty, then binary data is assumed, else - text data, with ``sep`` as the separator between elements. Whitespace in - the separator matches any length of whitespace in the text, and a match - for whitespace around the separator is added. -*/ -static PyObject * -PyArray_FromString(char *data, intp slen, PyArray_Descr *dtype, - intp num, char *sep) -{ - int itemsize; - PyArrayObject *ret; - Bool binary; - - if (dtype == NULL) - dtype=PyArray_DescrFromType(PyArray_DEFAULT); - - if (PyDataType_FLAGCHK(dtype, NPY_ITEM_IS_POINTER)) { - PyErr_SetString(PyExc_ValueError, - "Cannot create an object array from" \ - " a string"); - Py_DECREF(dtype); - return NULL; - } - - itemsize = dtype->elsize; - if (itemsize == 0) { - PyErr_SetString(PyExc_ValueError, "zero-valued itemsize"); - Py_DECREF(dtype); - return NULL; - } - - binary = ((sep == NULL) || (strlen(sep) == 0)); - - if (binary) { - if (num < 0 ) { - if (slen % itemsize != 0) { - PyErr_SetString(PyExc_ValueError, - "string size must be a "\ - "multiple of element size"); - Py_DECREF(dtype); - return NULL; - } - num = slen/itemsize; - } else { - if (slen < num*itemsize) { - PyErr_SetString(PyExc_ValueError, - "string is smaller than " \ - "requested size"); - Py_DECREF(dtype); - return NULL; - } - } - - ret = (PyArrayObject *) - PyArray_NewFromDescr(&PyArray_Type, dtype, - 1, &num, NULL, NULL, - 0, NULL); - if (ret == NULL) return NULL; - memcpy(ret->data, data, num*dtype->elsize); - } else { - /* read from character-based string */ - size_t nread = 0; - char *end; - if (dtype->f->scanfunc == NULL) { - PyErr_SetString(PyExc_ValueError, - "don't know how to read " \ - "character strings with that " \ - "array type"); - Py_DECREF(dtype); - return NULL; - } - if (slen < 0) { - end = NULL; - } else { - end = data + slen; - } - ret = array_from_text(dtype, num, sep, &nread, - data, - (next_element) fromstr_next_element, - (skip_separator) fromstr_skip_separator, - end); - } - return (PyObject *)ret; -} - -static PyObject * -array_fromstring(PyObject *ignored, PyObject *args, PyObject *keywds) -{ - char *data; - Py_ssize_t nin=-1; - char *sep=NULL; - Py_ssize_t s; - static char *kwlist[] = {"string", "dtype", "count", "sep", NULL}; - PyArray_Descr *descr=NULL; - - if (!PyArg_ParseTupleAndKeywords(args, keywds, "s#|O&" - NPY_SSIZE_T_PYFMT "s", kwlist, - &data, &s, - PyArray_DescrConverter, &descr, - &nin, &sep)) { - return NULL; - } - - return PyArray_FromString(data, (intp)s, descr, (intp)nin, sep); -} - - - -static PyArrayObject * -array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, intp num, size_t *nread) -{ - PyArrayObject *r; - intp start, numbytes; - - if (num < 0) { - int fail=0; - start = (intp )ftell(fp); - if (start < 0) fail=1; - if (fseek(fp, 0, SEEK_END) < 0) fail=1; - numbytes = (intp) ftell(fp); - if (numbytes < 0) fail=1; - numbytes -= start; - if (fseek(fp, start, SEEK_SET) < 0) fail=1; - if (fail) { - PyErr_SetString(PyExc_IOError, - "could not seek in file"); - Py_DECREF(dtype); - return NULL; - } - num = numbytes / dtype->elsize; - } - r = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - dtype, - 1, &num, - NULL, NULL, - 0, NULL); - if (r==NULL) return NULL; - NPY_BEGIN_ALLOW_THREADS; - *nread = fread(r->data, dtype->elsize, num, fp); - NPY_END_ALLOW_THREADS; - return r; -} - -/*OBJECT_API - - Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an - array corresponding to the data encoded in that file. - - If the dtype is NULL, the default array type is used (double). - If non-null, the reference is stolen. - - The number of elements to read is given as ``num``; if it is < 0, then - then as many as possible are read. - - If ``sep`` is NULL or empty, then binary data is assumed, else - text data, with ``sep`` as the separator between elements. Whitespace in - the separator matches any length of whitespace in the text, and a match - for whitespace around the separator is added. - - For memory-mapped files, use the buffer interface. No more data than - necessary is read by this routine. -*/ -static PyObject * -PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, intp num, char *sep) -{ - PyArrayObject *ret; - size_t nread = 0; - char *tmp; - - if (PyDataType_REFCHK(dtype)) { - PyErr_SetString(PyExc_ValueError, - "cannot read into object array"); - Py_DECREF(dtype); - return NULL; - } - if (dtype->elsize == 0) { - PyErr_SetString(PyExc_ValueError, "0-sized elements."); - Py_DECREF(dtype); - return NULL; - } - - if ((sep == NULL) || (strlen(sep) == 0)) { - ret = array_fromfile_binary(fp, dtype, num, &nread); - } else { - if (dtype->f->scanfunc == NULL) { - PyErr_SetString(PyExc_ValueError, - "don't know how to read " \ - "character files with that " \ - "array type"); - Py_DECREF(dtype); - return NULL; - } - ret = array_from_text(dtype, num, sep, &nread, - fp, - (next_element) fromfile_next_element, - (skip_separator) fromfile_skip_separator, - NULL); - } - if (((intp) nread) < num) { - fprintf(stderr, "%ld items requested but only %ld read\n", - (long) num, (long) nread); - tmp = PyDataMem_RENEW(ret->data, - nread * ret->descr->elsize); - if (tmp == NULL) { - Py_DECREF(ret); - return PyErr_NoMemory(); - } - ret->data = tmp; - PyArray_DIM(ret,0) = nread; - } - return (PyObject *)ret; -} - -static PyObject * -array_fromfile(PyObject *ignored, PyObject *args, PyObject *keywds) -{ - PyObject *file=NULL, *ret; - FILE *fp; - char *sep=""; - Py_ssize_t nin=-1; - static char *kwlist[] = {"file", "dtype", "count", "sep", NULL}; - PyArray_Descr *type=NULL; - - if (!PyArg_ParseTupleAndKeywords(args, keywds, - "O|O&" NPY_SSIZE_T_PYFMT "s", - kwlist, - &file, - PyArray_DescrConverter, &type, - &nin, &sep)) { - return NULL; - } - - if (type == NULL) type = PyArray_DescrFromType(PyArray_DEFAULT); - - if (PyString_Check(file) || PyUnicode_Check(file)) { - file = PyObject_CallFunction((PyObject *)&PyFile_Type, - "Os", file, "rb"); - if (file==NULL) return NULL; - } - else { - Py_INCREF(file); - } - fp = PyFile_AsFile(file); - if (fp == NULL) { - PyErr_SetString(PyExc_IOError, - "first argument must be an open file"); - Py_DECREF(file); - return NULL; - } - ret = PyArray_FromFile(fp, type, (intp) nin, sep); - Py_DECREF(file); - return ret; -} - - -/* steals a reference to dtype (which cannot be NULL) */ -/*OBJECT_API */ -static PyObject * -PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, intp count) -{ - PyObject *value; - PyObject *iter = PyObject_GetIter(obj); - PyArrayObject *ret = NULL; - intp i, elsize, elcount; - char *item, *new_data; - - if (iter == NULL) goto done; - - elcount = (count < 0) ? 0 : count; - elsize = dtype->elsize; - - /* We would need to alter the memory RENEW code to decrement any - reference counts before throwing away any memory. - */ - if (PyDataType_REFCHK(dtype)) { - PyErr_SetString(PyExc_ValueError, "cannot create "\ - "object arrays from iterator"); - goto done; - } - - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, 1, - &elcount, NULL,NULL, 0, NULL); - dtype = NULL; - if (ret == NULL) goto done; - - for (i = 0; (i < count || count == -1) && - (value = PyIter_Next(iter)); i++) { - - if (i >= elcount) { - /* - Grow ret->data: - this is similar for the strategy for PyListObject, but we use - 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ... - */ - elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; - if (elcount <= (intp)((~(size_t)0) / elsize)) - new_data = PyDataMem_RENEW(ret->data, elcount * elsize); - else - new_data = NULL; - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate array memory"); - Py_DECREF(value); - goto done; - } - ret->data = new_data; - } - ret->dimensions[0] = i+1; - - if (((item = index2ptr(ret, i)) == NULL) || - (ret->descr->f->setitem(value, item, ret) == -1)) { - Py_DECREF(value); - goto done; - } - Py_DECREF(value); - - } - - if (i < count) { - PyErr_SetString(PyExc_ValueError, "iterator too short"); - goto done; - } - - /* - Realloc the data so that don't keep extra memory tied up - (assuming realloc is reasonably good about reusing space...) - */ - if (i==0) i = 1; - new_data = PyDataMem_RENEW(ret->data, i * elsize); - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, "cannot allocate array memory"); - goto done; - } - ret->data = new_data; - - done: - Py_XDECREF(iter); - Py_XDECREF(dtype); - if (PyErr_Occurred()) { - Py_XDECREF(ret); - return NULL; - } - return (PyObject *)ret; -} - -static PyObject * -array_fromiter(PyObject *ignored, PyObject *args, PyObject *keywds) -{ - PyObject *iter; - Py_ssize_t nin=-1; - static char *kwlist[] = {"iter", "dtype", "count", NULL}; - PyArray_Descr *descr=NULL; - - if (!PyArg_ParseTupleAndKeywords(args, keywds, - "OO&|" NPY_SSIZE_T_PYFMT, - kwlist, - &iter, - PyArray_DescrConverter, &descr, - &nin)) { - return NULL; - } - - return PyArray_FromIter(iter, descr, (intp)nin); -} - - -/*OBJECT_API*/ -static PyObject * -PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, - intp count, intp offset) -{ - PyArrayObject *ret; - char *data; - Py_ssize_t ts; - intp s, n; - int itemsize; - int write=1; - - - if (PyDataType_REFCHK(type)) { - PyErr_SetString(PyExc_ValueError, - "cannot create an OBJECT array from memory"\ - " buffer"); - Py_DECREF(type); - return NULL; - } - if (type->elsize == 0) { - PyErr_SetString(PyExc_ValueError, - "itemsize cannot be zero in type"); - Py_DECREF(type); - return NULL; - } - - if (buf->ob_type->tp_as_buffer == NULL || \ - (buf->ob_type->tp_as_buffer->bf_getwritebuffer == NULL && \ - buf->ob_type->tp_as_buffer->bf_getreadbuffer == NULL)) { - PyObject *newbuf; - newbuf = PyObject_GetAttrString(buf, "__buffer__"); - if (newbuf == NULL) {Py_DECREF(type); return NULL;} - buf = newbuf; - } - else {Py_INCREF(buf);} - - if (PyObject_AsWriteBuffer(buf, (void *)&data, &ts)==-1) { - write = 0; - PyErr_Clear(); - if (PyObject_AsReadBuffer(buf, (void *)&data, &ts)==-1) { - Py_DECREF(buf); - Py_DECREF(type); - return NULL; - } - } - - if ((offset < 0) || (offset >= ts)) { - PyErr_Format(PyExc_ValueError, - "offset must be positive and smaller than %" - INTP_FMT, (intp)ts); - } - - data += offset; - s = (intp)ts - offset; - n = (intp)count; - itemsize = type->elsize; - - if (n < 0 ) { - if (s % itemsize != 0) { - PyErr_SetString(PyExc_ValueError, - "buffer size must be a multiple"\ - " of element size"); - Py_DECREF(buf); - Py_DECREF(type); - return NULL; - } - n = s/itemsize; - } else { - if (s < n*itemsize) { - PyErr_SetString(PyExc_ValueError, - "buffer is smaller than requested"\ - " size"); - Py_DECREF(buf); - Py_DECREF(type); - return NULL; - } - } - - if ((ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - type, - 1, &n, - NULL, data, - DEFAULT, - NULL)) == NULL) { - Py_DECREF(buf); - return NULL; - } - - if (!write) ret->flags &= ~WRITEABLE; - - /* Store a reference for decref on deallocation */ - ret->base = buf; - PyArray_UpdateFlags(ret, ALIGNED); - return (PyObject *)ret; -} - -static PyObject * -array_frombuffer(PyObject *ignored, PyObject *args, PyObject *keywds) -{ - PyObject *obj=NULL; - Py_ssize_t nin=-1, offset=0; - static char *kwlist[] = {"buffer", "dtype", "count", "offset", NULL}; - PyArray_Descr *type=NULL; - - if (!PyArg_ParseTupleAndKeywords(args, keywds, "O|O&" - NPY_SSIZE_T_PYFMT - NPY_SSIZE_T_PYFMT, kwlist, - &obj, - PyArray_DescrConverter, &type, - &nin, &offset)) { - return NULL; - } - if (type==NULL) - type = PyArray_DescrFromType(PyArray_DEFAULT); - - return PyArray_FromBuffer(obj, type, (intp)nin, (intp)offset); -} - -static PyObject * -array_concatenate(PyObject *dummy, PyObject *args, PyObject *kwds) -{ - PyObject *a0; - int axis=0; - static char *kwlist[] = {"seq", "axis", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist, - &a0, - PyArray_AxisConverter, &axis)) - return NULL; - return PyArray_Concatenate(a0, axis); -} - -static PyObject *array_innerproduct(PyObject *dummy, PyObject *args) { - PyObject *b0, *a0; - - if (!PyArg_ParseTuple(args, "OO", &a0, &b0)) return NULL; - - return _ARET(PyArray_InnerProduct(a0, b0)); -} - -static PyObject *array_matrixproduct(PyObject *dummy, PyObject *args) { - PyObject *v, *a; - - if (!PyArg_ParseTuple(args, "OO", &a, &v)) return NULL; - - return _ARET(PyArray_MatrixProduct(a, v)); -} - -static PyObject *array_fastCopyAndTranspose(PyObject *dummy, PyObject *args) { - PyObject *a0; - - if (!PyArg_ParseTuple(args, "O", &a0)) return NULL; - - return _ARET(PyArray_CopyAndTranspose(a0)); -} - -static PyObject *array_correlate(PyObject *dummy, PyObject *args, PyObject *kwds) { - PyObject *shape, *a0; - int mode=0; - static char *kwlist[] = {"a", "v", "mode", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i", kwlist, - &a0, &shape, &mode)) return NULL; - - return PyArray_Correlate(a0, shape, mode); -} - - -/*MULTIARRAY_API - Arange, -*/ -static PyObject * -PyArray_Arange(double start, double stop, double step, int type_num) -{ - intp length; - PyObject *range; - PyArray_ArrFuncs *funcs; - PyObject *obj; - int ret; - - length = (intp ) ceil((stop - start)/step); - - if (length <= 0) { - length = 0; - return PyArray_New(&PyArray_Type, 1, &length, type_num, - NULL, NULL, 0, 0, NULL); - } - - range = PyArray_New(&PyArray_Type, 1, &length, type_num, - NULL, NULL, 0, 0, NULL); - if (range == NULL) return NULL; - - funcs = PyArray_DESCR(range)->f; - - /* place start in the buffer and the next value in the second position */ - /* if length > 2, then call the inner loop, otherwise stop */ - - obj = PyFloat_FromDouble(start); - ret = funcs->setitem(obj, PyArray_DATA(range), (PyArrayObject *)range); - Py_DECREF(obj); - if (ret < 0) goto fail; - if (length == 1) return range; - - obj = PyFloat_FromDouble(start + step); - ret = funcs->setitem(obj, PyArray_BYTES(range)+PyArray_ITEMSIZE(range), - (PyArrayObject *)range); - Py_DECREF(obj); - if (ret < 0) goto fail; - if (length == 2) return range; - - if (!funcs->fill) { - PyErr_SetString(PyExc_ValueError, "no fill-function for data-type."); - Py_DECREF(range); - return NULL; - } - funcs->fill(PyArray_DATA(range), length, (PyArrayObject *)range); - if (PyErr_Occurred()) goto fail; - - return range; - - fail: - Py_DECREF(range); - return NULL; -} - -/* the formula is - len = (intp) ceil((start - stop) / step); -*/ -static intp -_calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, int cmplx) -{ - intp len; - PyObject *val; - double value; - - *next = PyNumber_Subtract(stop, start); - if (!(*next)) { - if (PyTuple_Check(stop)) { - PyErr_Clear(); - PyErr_SetString(PyExc_TypeError, - "arange: scalar arguments expected "\ - "instead of a tuple."); - } - return -1; - } - val = PyNumber_TrueDivide(*next, step); - Py_DECREF(*next); *next=NULL; - if (!val) return -1; - if (cmplx && PyComplex_Check(val)) { - value = PyComplex_RealAsDouble(val); - if (error_converting(value)) {Py_DECREF(val); return -1;} - len = (intp) ceil(value); - value = PyComplex_ImagAsDouble(val); - Py_DECREF(val); - if (error_converting(value)) return -1; - len = MIN(len, (intp) ceil(value)); - } - else { - value = PyFloat_AsDouble(val); - Py_DECREF(val); - if (error_converting(value)) return -1; - len = (intp) ceil(value); - } - - if (len > 0) { - *next = PyNumber_Add(start, step); - if (!next) return -1; - } - return len; -} - -/* this doesn't change the references */ -/*MULTIARRAY_API - ArangeObj, -*/ -static PyObject * -PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject *step, PyArray_Descr *dtype) -{ - PyObject *range; - PyArray_ArrFuncs *funcs; - PyObject *next; - intp length; - PyArray_Descr *native=NULL; - int swap; - - if (!dtype) { - PyArray_Descr *deftype; - PyArray_Descr *newtype; - /* intentionally made to be PyArray_LONG default */ - deftype = PyArray_DescrFromType(PyArray_LONG); - newtype = PyArray_DescrFromObject(start, deftype); - Py_DECREF(deftype); - deftype = newtype; - if (stop && stop != Py_None) { - newtype = PyArray_DescrFromObject(stop, deftype); - Py_DECREF(deftype); - deftype = newtype; - } - if (step && step != Py_None) { - newtype = PyArray_DescrFromObject(step, deftype); - Py_DECREF(deftype); - deftype = newtype; - } - dtype = deftype; - } - else Py_INCREF(dtype); - - if (!step || step == Py_None) { - step = PyInt_FromLong(1); - } - else Py_XINCREF(step); - - if (!stop || stop == Py_None) { - stop = start; - start = PyInt_FromLong(0); - } - else Py_INCREF(start); - - /* calculate the length and next = start + step*/ - length = _calc_length(start, stop, step, &next, - PyTypeNum_ISCOMPLEX(dtype->type_num)); - - if (PyErr_Occurred()) {Py_DECREF(dtype); goto fail;} - if (length <= 0) { - length = 0; - range = PyArray_SimpleNewFromDescr(1, &length, dtype); - Py_DECREF(step); Py_DECREF(start); return range; - } - - /* If dtype is not in native byte-order then get native-byte - order version. And then swap on the way out. - */ - if (!PyArray_ISNBO(dtype->byteorder)) { - native = PyArray_DescrNewByteorder(dtype, PyArray_NATBYTE); - swap = 1; - } - else { - native = dtype; - swap = 0; - } - - range = PyArray_SimpleNewFromDescr(1, &length, native); - if (range == NULL) goto fail; - - funcs = PyArray_DESCR(range)->f; - - /* place start in the buffer and the next value in the second position */ - /* if length > 2, then call the inner loop, otherwise stop */ - - if (funcs->setitem(start, PyArray_DATA(range), (PyArrayObject *)range) < 0) - goto fail; - if (length == 1) goto finish; - if (funcs->setitem(next, PyArray_BYTES(range)+PyArray_ITEMSIZE(range), - (PyArrayObject *)range) < 0) goto fail; - if (length == 2) goto finish; - - if (!funcs->fill) { - PyErr_SetString(PyExc_ValueError, "no fill-function for data-type."); - Py_DECREF(range); - goto fail; - } - funcs->fill(PyArray_DATA(range), length, (PyArrayObject *)range); - if (PyErr_Occurred()) goto fail; - - finish: - if (swap) { - PyObject *new; - new = PyArray_Byteswap((PyArrayObject *)range, 1); - Py_DECREF(new); - Py_DECREF(PyArray_DESCR(range)); - PyArray_DESCR(range) = dtype; /* steals the reference */ - } - - Py_DECREF(start); - Py_DECREF(step); - Py_DECREF(next); - return range; - - fail: - Py_DECREF(start); - Py_DECREF(step); - Py_XDECREF(next); - return NULL; -} - -static PyObject * -array_arange(PyObject *ignored, PyObject *args, PyObject *kws) { - PyObject *o_start=NULL, *o_stop=NULL, *o_step=NULL; - static char *kwd[]= {"start", "stop", "step", "dtype", NULL}; - PyArray_Descr *typecode=NULL; - - if(!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&", kwd, &o_start, - &o_stop, &o_step, - PyArray_DescrConverter2, - &typecode)) - return NULL; - - return PyArray_ArangeObj(o_start, o_stop, o_step, typecode); -} - -/* - Included at the very first so not auto-grabbed and thus not - labeled. -*/ -static unsigned int -PyArray_GetNDArrayCVersion(void) -{ - return (unsigned int)NPY_VERSION; -} - -static PyObject * -array__get_ndarray_c_version(PyObject *dummy, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {NULL}; - if(!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) return NULL; - - return PyInt_FromLong( (long) PyArray_GetNDArrayCVersion() ); -} - -static PyObject * -array__reconstruct(PyObject *dummy, PyObject *args) -{ - - PyObject *ret; - PyTypeObject *subtype; - PyArray_Dims shape = {NULL, 0}; - PyArray_Descr *dtype=NULL; - if (!PyArg_ParseTuple(args, "O!O&O&", &PyType_Type, &subtype, - PyArray_IntpConverter, &shape, - PyArray_DescrConverter, &dtype)) - goto fail; - - if (!PyType_IsSubtype(subtype, &PyArray_Type)) { - PyErr_SetString(PyExc_TypeError, - "_reconstruct: First argument must be " \ - "a sub-type of ndarray"); - goto fail; - } - - ret = PyArray_NewFromDescr(subtype, dtype, - (int)shape.len, shape.ptr, - NULL, NULL, 0, NULL); - if (shape.ptr) PyDimMem_FREE(shape.ptr); - return ret; - - fail: - Py_XDECREF(dtype); - if (shape.ptr) PyDimMem_FREE(shape.ptr); - return NULL; -} - -static PyObject * -array_set_string_function(PyObject *dummy, PyObject *args, PyObject *kwds) -{ - PyObject *op=NULL; - int repr=1; - static char *kwlist[] = {"f", "repr", NULL}; - - if(!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi", kwlist, - &op, &repr)) return NULL; - - /* reset the array_repr function to built-in */ - if (op == Py_None) op = NULL; - if (op != NULL && !PyCallable_Check(op)) { - PyErr_SetString(PyExc_TypeError, - "Argument must be callable."); - return NULL; - } - PyArray_SetStringFunction(op, repr); - Py_INCREF(Py_None); - return Py_None; -} - -static PyObject * -array_set_ops_function(PyObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *oldops=NULL; - - if ((oldops = PyArray_GetNumericOps())==NULL) return NULL; - - /* Should probably ensure that objects are at least callable */ - /* Leave this to the caller for now --- error will be raised - later when use is attempted - */ - if (kwds && PyArray_SetNumericOps(kwds) == -1) { - Py_DECREF(oldops); - PyErr_SetString(PyExc_ValueError, - "one or more objects not callable"); - return NULL; - } - return oldops; -} - - -/*MULTIARRAY_API - Where -*/ -static PyObject * -PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) -{ - PyArrayObject *arr; - PyObject *tup=NULL, *obj=NULL; - PyObject *ret=NULL, *zero=NULL; - - - arr = (PyArrayObject *)PyArray_FromAny(condition, NULL, 0, 0, 0, NULL); - if (arr == NULL) return NULL; - - if ((x==NULL) && (y==NULL)) { - ret = PyArray_Nonzero(arr); - Py_DECREF(arr); - return ret; - } - - if ((x==NULL) || (y==NULL)) { - Py_DECREF(arr); - PyErr_SetString(PyExc_ValueError, "either both or neither " - "of x and y should be given"); - return NULL; - } - - - zero = PyInt_FromLong((long) 0); - - obj = PyArray_EnsureAnyArray(PyArray_GenericBinaryFunction(arr, zero, - n_ops.not_equal)); - Py_DECREF(zero); - Py_DECREF(arr); - if (obj == NULL) return NULL; - - tup = Py_BuildValue("(OO)", y, x); - if (tup == NULL) {Py_DECREF(obj); return NULL;} - - ret = PyArray_Choose((PyAO *)obj, tup, NULL, NPY_RAISE); - - Py_DECREF(obj); - Py_DECREF(tup); - return ret; -} - -static PyObject * -array_where(PyObject *ignored, PyObject *args) -{ - PyObject *obj=NULL, *x=NULL, *y=NULL; - - if (!PyArg_ParseTuple(args, "O|OO", &obj, &x, &y)) return NULL; - - return PyArray_Where(obj, x, y); -} - -static PyObject * -array_lexsort(PyObject *ignored, PyObject *args, PyObject *kwds) -{ - int axis=-1; - PyObject *obj; - static char *kwlist[] = {"keys", "axis", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|i", kwlist, - &obj, &axis)) return NULL; - - return _ARET(PyArray_LexSort(obj, axis)); -} - -#undef _ARET - -static PyObject * -array_can_cast_safely(PyObject *dummy, PyObject *args, PyObject *kwds) -{ - PyArray_Descr *d1=NULL; - PyArray_Descr *d2=NULL; - Bool ret; - PyObject *retobj; - static char *kwlist[] = {"from", "to", NULL}; - - if(!PyArg_ParseTupleAndKeywords(args, kwds, "O&O&", kwlist, - PyArray_DescrConverter, &d1, - PyArray_DescrConverter, &d2)) - return NULL; - if (d1 == NULL || d2 == NULL) { - PyErr_SetString(PyExc_TypeError, - "did not understand one of the types; " \ - "'None' not accepted"); - return NULL; - } - - ret = PyArray_CanCastTo(d1, d2); - retobj = (ret ? Py_True : Py_False); - Py_INCREF(retobj); - return retobj; -} - -static PyObject * -new_buffer(PyObject *dummy, PyObject *args) -{ - int size; - - if(!PyArg_ParseTuple(args, "i", &size)) - return NULL; - - return PyBuffer_New(size); -} - -static PyObject * -buffer_buffer(PyObject *dummy, PyObject *args, PyObject *kwds) -{ - PyObject *obj; - Py_ssize_t offset=0, size=Py_END_OF_BUFFER, n; - void *unused; - static char *kwlist[] = {"object", "offset", "size", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|" NPY_SSIZE_T_PYFMT \ - NPY_SSIZE_T_PYFMT, kwlist, - &obj, &offset, &size)) - return NULL; - - - if (PyObject_AsWriteBuffer(obj, &unused, &n) < 0) { - PyErr_Clear(); - return PyBuffer_FromObject(obj, offset, size); - } - else - return PyBuffer_FromReadWriteObject(obj, offset, size); -} - -#ifndef _MSC_VER -#include -#include -jmp_buf _NPY_SIGSEGV_BUF; -static void -_SigSegv_Handler(int signum) -{ - longjmp(_NPY_SIGSEGV_BUF, signum); -} -#endif - -#define _test_code() { \ - test = *((char*)memptr); \ - if (!ro) { \ - *((char *)memptr) = '\0'; \ - *((char *)memptr) = test; \ - } \ - test = *((char*)memptr+size-1); \ - if (!ro) { \ - *((char *)memptr+size-1) = '\0'; \ - *((char *)memptr+size-1) = test; \ - } \ - } - -static PyObject * -as_buffer(PyObject *dummy, PyObject *args, PyObject *kwds) -{ - PyObject *mem; - Py_ssize_t size; - Bool ro=FALSE, check=TRUE; - void *memptr; - static char *kwlist[] = {"mem", "size", "readonly", "check", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O" \ - NPY_SSIZE_T_PYFMT "|O&O&", kwlist, - &mem, &size, PyArray_BoolConverter, - &ro, PyArray_BoolConverter, - &check)) return NULL; - memptr = PyLong_AsVoidPtr(mem); - if (memptr == NULL) return NULL; - - if (check) { - /* Try to dereference the start and end of the memory region */ - /* Catch segfault and report error if it occurs */ - char test; - int err=0; -#ifdef _MSC_VER - __try { - _test_code(); - } - __except(1) { - err = 1; - } -#else - PyOS_sighandler_t _npy_sig_save; - _npy_sig_save = PyOS_setsig(SIGSEGV, _SigSegv_Handler); - - if (setjmp(_NPY_SIGSEGV_BUF) == 0) { - _test_code(); - } - else { - err = 1; - } - PyOS_setsig(SIGSEGV, _npy_sig_save); -#endif - if (err) { - PyErr_SetString(PyExc_ValueError, - "cannot use memory location as " \ - "a buffer."); - return NULL; - } - } - - - if (ro) { - return PyBuffer_FromMemory(memptr, size); - } - return PyBuffer_FromReadWriteMemory(memptr, size); -} - -#undef _test_code - -static PyObject * -format_longfloat(PyObject *dummy, PyObject *args, PyObject *kwds) -{ - PyObject *obj; - unsigned int precision; - longdouble x; - static char *kwlist[] = {"x", "precision", NULL}; - static char repr[100]; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OI", kwlist, - &obj, &precision)) { - return NULL; - } - if (!PyArray_IsScalar(obj, LongDouble)) { - PyErr_SetString(PyExc_TypeError, "not a longfloat"); - return NULL; - } - x = ((PyLongDoubleScalarObject *)obj)->obval; - if (precision > 70) { - precision = 70; - } - format_longdouble(repr, 100, x, precision); - return PyString_FromString(repr); -} - -static PyObject * -compare_chararrays(PyObject *dummy, PyObject *args, PyObject *kwds) -{ - PyObject *array; - PyObject *other; - PyArrayObject *newarr, *newoth; - int cmp_op; - Bool rstrip; - char *cmp_str; - Py_ssize_t strlen; - PyObject *res=NULL; - static char msg[] = \ - "comparision must be '==', '!=', '<', '>', '<=', '>='"; - - static char *kwlist[] = {"a1", "a2", "cmp", "rstrip", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOs#O&", kwlist, - &array, &other, - &cmp_str, &strlen, - PyArray_BoolConverter, &rstrip)) - return NULL; - - if (strlen < 1 || strlen > 2) goto err; - if (strlen > 1) { - if (cmp_str[1] != '=') goto err; - if (cmp_str[0] == '=') cmp_op = Py_EQ; - else if (cmp_str[0] == '!') cmp_op = Py_NE; - else if (cmp_str[0] == '<') cmp_op = Py_LE; - else if (cmp_str[0] == '>') cmp_op = Py_GE; - else goto err; - } - else { - if (cmp_str[0] == '<') cmp_op = Py_LT; - else if (cmp_str[0] == '>') cmp_op = Py_GT; - else goto err; - } - - newarr = (PyArrayObject *)PyArray_FROM_O(array); - if (newarr == NULL) return NULL; - newoth = (PyArrayObject *)PyArray_FROM_O(other); - if (newoth == NULL) { - Py_DECREF(newarr); - return NULL; - } - - if (PyArray_ISSTRING(newarr) && PyArray_ISSTRING(newoth)) { - res = _strings_richcompare(newarr, newoth, cmp_op, rstrip != 0); - } - else { - PyErr_SetString(PyExc_TypeError, - "comparison of non-string arrays"); - } - - Py_DECREF(newarr); - Py_DECREF(newoth); - return res; - - err: - PyErr_SetString(PyExc_ValueError, msg); - return NULL; -} - - -#ifndef NPY_NO_SIGNAL - -SIGJMP_BUF _NPY_SIGINT_BUF; - -/*MULTIARRAY_API - */ -static void -_PyArray_SigintHandler(int signum) -{ - PyOS_setsig(signum, SIG_IGN); - SIGLONGJMP(_NPY_SIGINT_BUF, signum); -} - -/*MULTIARRAY_API - */ -static void* -_PyArray_GetSigintBuf(void) -{ - return (void *)&_NPY_SIGINT_BUF; -} - -#else - -static void -_PyArray_SigintHandler(int signum) -{ - return; -} - -static void* -_PyArray_GetSigintBuf(void) -{ - return NULL; -} - -#endif - - -static PyObject * -test_interrupt(PyObject *self, PyObject *args) -{ - int kind=0; - int a = 0; - - if (!PyArg_ParseTuple(args, "|i", &kind)) return NULL; - - if (kind) { - Py_BEGIN_ALLOW_THREADS - while (a>=0) { - if ((a % 1000 == 0) && - PyOS_InterruptOccurred()) break; - a+=1; - } - Py_END_ALLOW_THREADS - } - else { - - NPY_SIGINT_ON - - while(a>=0) { - a += 1; - } - - NPY_SIGINT_OFF - } - - return PyInt_FromLong(a); -} - -static struct PyMethodDef array_module_methods[] = { - {"_get_ndarray_c_version", (PyCFunction)array__get_ndarray_c_version, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"_reconstruct", (PyCFunction)array__reconstruct, - METH_VARARGS, NULL}, - {"set_string_function", (PyCFunction)array_set_string_function, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"set_numeric_ops", (PyCFunction)array_set_ops_function, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"set_typeDict", (PyCFunction)array_set_typeDict, - METH_VARARGS, NULL}, - - {"array", (PyCFunction)_array_fromobject, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"arange", (PyCFunction)array_arange, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"zeros", (PyCFunction)array_zeros, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"empty", (PyCFunction)array_empty, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"scalar", (PyCFunction)array_scalar, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"where", (PyCFunction)array_where, - METH_VARARGS, NULL}, - {"lexsort", (PyCFunction)array_lexsort, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"putmask", (PyCFunction)array_putmask, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"fromstring",(PyCFunction)array_fromstring, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"fromiter",(PyCFunction)array_fromiter, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"concatenate", (PyCFunction)array_concatenate, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"inner", (PyCFunction)array_innerproduct, - METH_VARARGS, NULL}, - {"dot", (PyCFunction)array_matrixproduct, - METH_VARARGS, NULL}, - {"_fastCopyAndTranspose", (PyCFunction)array_fastCopyAndTranspose, - METH_VARARGS, NULL}, - {"correlate", (PyCFunction)array_correlate, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"frombuffer", (PyCFunction)array_frombuffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"fromfile", (PyCFunction)array_fromfile, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"can_cast", (PyCFunction)array_can_cast_safely, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"newbuffer", (PyCFunction)new_buffer, - METH_VARARGS, NULL}, - {"getbuffer", (PyCFunction)buffer_buffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"int_asbuffer", (PyCFunction)as_buffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"format_longfloat", (PyCFunction)format_longfloat, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"compare_chararrays", (PyCFunction)compare_chararrays, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"test_interrupt", (PyCFunction)test_interrupt, - METH_VARARGS, NULL}, - {NULL, NULL, 0} /* sentinel */ -}; - -#include "__multiarray_api.c" - -/* Establish scalar-type hierarchy */ - -/* For dual inheritance we need to make sure that the objects being - inherited from have the tp->mro object initialized. This is - not necessarily true for the basic type objects of Python (it is - checked for single inheritance but not dual in PyType_Ready). - - Thus, we call PyType_Ready on the standard Python Types, here. -*/ -static int -setup_scalartypes(PyObject *dict) -{ - - initialize_numeric_types(); - - if (PyType_Ready(&PyBool_Type) < 0) return -1; - if (PyType_Ready(&PyInt_Type) < 0) return -1; - if (PyType_Ready(&PyFloat_Type) < 0) return -1; - if (PyType_Ready(&PyComplex_Type) < 0) return -1; - if (PyType_Ready(&PyString_Type) < 0) return -1; - if (PyType_Ready(&PyUnicode_Type) < 0) return -1; - -#define SINGLE_INHERIT(child, parent) \ - Py##child##ArrType_Type.tp_base = &Py##parent##ArrType_Type; \ - if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \ - PyErr_Print(); \ - PyErr_Format(PyExc_SystemError, \ - "could not initialize Py%sArrType_Type", \ - #child); \ - return -1; \ - } - - if (PyType_Ready(&PyGenericArrType_Type) < 0) - return -1; - - SINGLE_INHERIT(Number, Generic); - SINGLE_INHERIT(Integer, Number); - SINGLE_INHERIT(Inexact, Number); - SINGLE_INHERIT(SignedInteger, Integer); - SINGLE_INHERIT(UnsignedInteger, Integer); - SINGLE_INHERIT(Floating, Inexact); - SINGLE_INHERIT(ComplexFloating, Inexact); - SINGLE_INHERIT(Flexible, Generic); - SINGLE_INHERIT(Character, Flexible); - -#define DUAL_INHERIT(child, parent1, parent2) \ - Py##child##ArrType_Type.tp_base = &Py##parent2##ArrType_Type; \ - Py##child##ArrType_Type.tp_bases = \ - Py_BuildValue("(OO)", &Py##parent2##ArrType_Type, \ - &Py##parent1##_Type); \ - if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \ - PyErr_Print(); \ - PyErr_Format(PyExc_SystemError, \ - "could not initialize Py%sArrType_Type", \ - #child); \ - return -1; \ - } \ - Py##child##ArrType_Type.tp_hash = Py##parent1##_Type.tp_hash; - -#define DUAL_INHERIT2(child, parent1, parent2) \ - Py##child##ArrType_Type.tp_base = &Py##parent1##_Type; \ - Py##child##ArrType_Type.tp_bases = \ - Py_BuildValue("(OO)", &Py##parent1##_Type, \ - &Py##parent2##ArrType_Type); \ - Py##child##ArrType_Type.tp_richcompare = \ - Py##parent1##_Type.tp_richcompare; \ - Py##child##ArrType_Type.tp_compare = \ - Py##parent1##_Type.tp_compare; \ - Py##child##ArrType_Type.tp_hash = Py##parent1##_Type.tp_hash; \ - if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \ - PyErr_Print(); \ - PyErr_Format(PyExc_SystemError, \ - "could not initialize Py%sArrType_Type", \ - #child); \ - return -1; \ - } - - SINGLE_INHERIT(Bool, Generic); - SINGLE_INHERIT(Byte, SignedInteger); - SINGLE_INHERIT(Short, SignedInteger); -#if SIZEOF_INT == SIZEOF_LONG - DUAL_INHERIT(Int, Int, SignedInteger); -#else - SINGLE_INHERIT(Int, SignedInteger); -#endif - DUAL_INHERIT(Long, Int, SignedInteger); -#if SIZEOF_LONGLONG == SIZEOF_LONG - DUAL_INHERIT(LongLong, Int, SignedInteger); -#else - SINGLE_INHERIT(LongLong, SignedInteger); -#endif - - /* fprintf(stderr, "tp_free = %p, PyObject_Del = %p, int_tp_free = %p, base.tp_free = %p\n", PyIntArrType_Type.tp_free, PyObject_Del, PyInt_Type.tp_free, PySignedIntegerArrType_Type.tp_free); - */ - SINGLE_INHERIT(UByte, UnsignedInteger); - SINGLE_INHERIT(UShort, UnsignedInteger); - SINGLE_INHERIT(UInt, UnsignedInteger); - SINGLE_INHERIT(ULong, UnsignedInteger); - SINGLE_INHERIT(ULongLong, UnsignedInteger); - - SINGLE_INHERIT(Float, Floating); - DUAL_INHERIT(Double, Float, Floating); - SINGLE_INHERIT(LongDouble, Floating); - - SINGLE_INHERIT(CFloat, ComplexFloating); - DUAL_INHERIT(CDouble, Complex, ComplexFloating); - SINGLE_INHERIT(CLongDouble, ComplexFloating); - - DUAL_INHERIT2(String, String, Character); - DUAL_INHERIT2(Unicode, Unicode, Character); - - SINGLE_INHERIT(Void, Flexible); - - SINGLE_INHERIT(Object, Generic); - - return 0; - -#undef SINGLE_INHERIT -#undef DUAL_INHERIT - - /* Clean up string and unicode array types so they act more like - strings -- get their tables from the standard types. - */ -} - -/* place a flag dictionary in d */ - -static void -set_flaginfo(PyObject *d) -{ - PyObject *s; - PyObject *newd; - - newd = PyDict_New(); - -#define _addnew(val, one) \ - PyDict_SetItemString(newd, #val, s=PyInt_FromLong(val)); \ - Py_DECREF(s); \ - PyDict_SetItemString(newd, #one, s=PyInt_FromLong(val)); \ - Py_DECREF(s) - -#define _addone(val) \ - PyDict_SetItemString(newd, #val, s=PyInt_FromLong(val)); \ - Py_DECREF(s) - - _addnew(OWNDATA, O); - _addnew(FORTRAN, F); - _addnew(CONTIGUOUS, C); - _addnew(ALIGNED, A); - _addnew(UPDATEIFCOPY, U); - _addnew(WRITEABLE, W); - _addone(C_CONTIGUOUS); - _addone(F_CONTIGUOUS); - -#undef _addone -#undef _addnew - - PyDict_SetItemString(d, "_flagdict", newd); - Py_DECREF(newd); - return; -} - - -/* Initialization function for the module */ - -PyMODINIT_FUNC initmultiarray(void) { - PyObject *m, *d, *s; - PyObject *c_api; - - /* Create the module and add the functions */ - m = Py_InitModule("multiarray", array_module_methods); - if (!m) goto err; - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - if (!d) goto err; - - PyArray_Type.tp_free = _pya_free; - if (PyType_Ready(&PyArray_Type) < 0) - return; - - if (setup_scalartypes(d) < 0) goto err; - - PyArrayIter_Type.tp_iter = PyObject_SelfIter; - PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter; - PyArrayMultiIter_Type.tp_free = _pya_free; - if (PyType_Ready(&PyArrayIter_Type) < 0) - return; - - if (PyType_Ready(&PyArrayMapIter_Type) < 0) - return; - - if (PyType_Ready(&PyArrayMultiIter_Type) < 0) - return; - - PyArrayDescr_Type.tp_hash = (hashfunc)_Py_HashPointer; - if (PyType_Ready(&PyArrayDescr_Type) < 0) - return; - - if (PyType_Ready(&PyArrayFlags_Type) < 0) - return; - - c_api = PyCObject_FromVoidPtr((void *)PyArray_API, NULL); - PyDict_SetItemString(d, "_ARRAY_API", c_api); - Py_DECREF(c_api); - if (PyErr_Occurred()) goto err; - - MultiArrayError = PyString_FromString ("multiarray.error"); - PyDict_SetItemString (d, "error", MultiArrayError); - - s = PyString_FromString("3.0"); - PyDict_SetItemString(d, "__version__", s); - Py_DECREF(s); - -#define ADDCONST(NAME) \ - s = PyInt_FromLong(NPY_##NAME); \ - PyDict_SetItemString(d, #NAME, s); \ - Py_DECREF(s) - - - ADDCONST(ALLOW_THREADS); - ADDCONST(BUFSIZE); - ADDCONST(CLIP); - - ADDCONST(ITEM_HASOBJECT); - ADDCONST(LIST_PICKLE); - ADDCONST(ITEM_IS_POINTER); - ADDCONST(NEEDS_INIT); - ADDCONST(NEEDS_PYAPI); - ADDCONST(USE_GETITEM); - ADDCONST(USE_SETITEM); - - ADDCONST(RAISE); - ADDCONST(WRAP); - ADDCONST(MAXDIMS); -#undef ADDCONST - - Py_INCREF(&PyArray_Type); - PyDict_SetItemString(d, "ndarray", (PyObject *)&PyArray_Type); - Py_INCREF(&PyArrayIter_Type); - PyDict_SetItemString(d, "flatiter", (PyObject *)&PyArrayIter_Type); - Py_INCREF(&PyArrayMultiIter_Type); - PyDict_SetItemString(d, "broadcast", - (PyObject *)&PyArrayMultiIter_Type); - Py_INCREF(&PyArrayDescr_Type); - PyDict_SetItemString(d, "dtype", (PyObject *)&PyArrayDescr_Type); - - Py_INCREF(&PyArrayFlags_Type); - PyDict_SetItemString(d, "flagsobj", (PyObject *)&PyArrayFlags_Type); - - set_flaginfo(d); - - if (set_typeinfo(d) != 0) goto err; - return; - - err: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load multiarray module."); - } - return; -} diff --git a/numpy/core/src/scalarmathmodule.c.src b/numpy/core/src/scalarmathmodule.c.src deleted file mode 100644 index 8cb77e0b4..000000000 --- a/numpy/core/src/scalarmathmodule.c.src +++ /dev/null @@ -1,1249 +0,0 @@ -/* -*- c -*- */ - -/* The purpose of this module is to add faster math for array scalars - that does not go through the ufunc machinery - - but still supports error-modes. -*/ - -#include "Python.h" -#include "numpy/noprefix.h" -#include "numpy/ufuncobject.h" -#include "numpy/arrayscalars.h" - - -/** numarray adapted routines.... **/ - -static int ulonglong_overflow(ulonglong a, ulonglong b) -{ - ulonglong ah, al, bh, bl, w, x, y, z; - -#if SIZEOF_LONGLONG == 64 - ah = (a >> 32); - al = (a & 0xFFFFFFFFL); - bh = (b >> 32); - bl = (b & 0xFFFFFFFFL); -#elif SIZEOF_LONGLONG == 128 - ah = (a >> 64); - al = (a & 0xFFFFFFFFFFFFFFFFL); - bh = (b >> 64); - bl = (b & 0xFFFFFFFFFFFFFFFFL); -#else - ah = al = bh = bl = 0; -#endif - - /* 128-bit product: z*2**64 + (x+y)*2**32 + w */ - w = al*bl; - x = bh*al; - y = ah*bl; - z = ah*bh; - - /* *c = ((x + y)<<32) + w; */ -#if SIZEOF_LONGLONG == 64 - return z || (x>>32) || (y>>32) || - (((x & 0xFFFFFFFFL) + (y & 0xFFFFFFFFL) + (w >> 32)) >> 32); -#elif SIZEOF_LONGLONG == 128 - return z || (x>>64) || (y>>64) || - (((x & 0xFFFFFFFFFFFFFFFFL) + (y & 0xFFFFFFFFFFFFFFFFL) + (w >> 64)) >> 64); -#else - return 0; -#endif - -} - -static int slonglong_overflow(longlong a0, longlong b0) -{ - ulonglong a, b; - ulonglong ah, al, bh, bl, w, x, y, z; - - /* Convert to non-negative quantities */ - if (a0 < 0) { a = -a0; } else { a = a0; } - if (b0 < 0) { b = -b0; } else { b = b0; } - - -#if SIZEOF_LONGLONG == 64 - ah = (a >> 32); - al = (a & 0xFFFFFFFFL); - bh = (b >> 32); - bl = (b & 0xFFFFFFFFL); -#elif SIZEOF_LONGLONG == 128 - ah = (a >> 64); - al = (a & 0xFFFFFFFFFFFFFFFFL); - bh = (b >> 64); - bl = (b & 0xFFFFFFFFFFFFFFFFL); -#else - ah = al = bh = bl = 0; -#endif - - w = al*bl; - x = bh*al; - y = ah*bl; - z = ah*bh; - - /* - ulonglong c = ((x + y)<<32) + w; - if ((a0 < 0) ^ (b0 < 0)) - *c = -c; - else - *c = c - */ - -#if SIZEOF_LONGLONG == 64 - return z || (x>>31) || (y>>31) || - (((x & 0xFFFFFFFFL) + (y & 0xFFFFFFFFL) + (w >> 32)) >> 31); -#elif SIZEOF_LONGLONG == 128 - return z || (x>>63) || (y>>63) || - (((x & 0xFFFFFFFFFFFFFFFFL) + (y & 0xFFFFFFFFFFFFFFFFL) + (w >> 64)) >> 63); -#else - return 0; -#endif -} -/** end direct numarray code **/ - - -/* Basic operations: - - BINARY: - - add, subtract, multiply, divide, remainder, divmod, power, - floor_divide, true_divide - - lshift, rshift, and, or, xor (integers only) - - UNARY: - - negative, positive, absolute, nonzero, invert, int, long, float, oct, hex - -*/ - -/**begin repeat - #name=byte,short,int,long,longlong# -**/ -static void -@name@_ctype_add(@name@ a, @name@ b, @name@ *out) { - *out = a + b; - if ((*out^a) >= 0 || (*out^b) >= 0) - return; - generate_overflow_error(); - return; -} -static void -@name@_ctype_subtract(@name@ a, @name@ b, @name@ *out) { - *out = a - b; - if ((*out^a) >= 0 || (*out^~b) >= 0) - return; - generate_overflow_error(); - return; -} -/**end repeat**/ -/**begin repeat - #name=ubyte,ushort,uint,ulong,ulonglong# -**/ -static void -@name@_ctype_add(@name@ a, @name@ b, @name@ *out) { - *out = a + b; - if (*out >= a && *out >= b) - return; - generate_overflow_error(); - return; -} -static void -@name@_ctype_subtract(@name@ a, @name@ b, @name@ *out) { - *out = a - b; - if (a >= b) return; - generate_overflow_error(); - return; -} -/**end repeat**/ - -#ifndef SIZEOF_BYTE -#define SIZEOF_BYTE 1 -#endif - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong# - #big=(int,uint)*2,(longlong,ulonglong)*2# - #NAME=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG# - #SIZENAME=BYTE*2,SHORT*2,INT*2,LONG*2# - #SIZE=INT*4,LONGLONG*4# - #neg=(1,0)*4# -**/ -#if SIZEOF_@SIZE@ > SIZEOF_@SIZENAME@ -static void -@name@_ctype_multiply(@name@ a, @name@ b, @name@ *out) { - @big@ temp; - temp = ((@big@) a) * ((@big@) b); - *out = (@name@) temp; -#if @neg@ - if (temp > MAX_@NAME@ || temp < MIN_@NAME@) -#else - if (temp > MAX_@NAME@) -#endif - generate_overflow_error(); - return; -} -#endif -/**end repeat**/ - -/**begin repeat - #name=int,uint,long,ulong,longlong,ulonglong# - #SIZE=INT*2,LONG*2,LONGLONG*2# - #char=(s,u)*3# -**/ -#if SIZEOF_LONGLONG == SIZEOF_@SIZE@ -static void -@name@_ctype_multiply(@name@ a, @name@ b, @name@ *out) { - *out = a * b; - if (@char@longlong_overflow(a, b)) - generate_overflow_error(); - return; -} -#endif -/**end repeat**/ - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #neg=(1,0)*5# -**/ -static void -@name@_ctype_divide(@name@ a, @name@ b, @name@ *out) { - if (b == 0) { - generate_divbyzero_error(); - *out = 0; - } -#if @neg@ - else if (b == -1 && a < 0 && a == -a) { - generate_overflow_error(); - *out = a / b; - } -#endif - else { -#if @neg@ - @name@ tmp; - tmp = a / b; - if (((a > 0) != (b > 0)) && (a % b != 0)) tmp--; - *out = tmp; -#else - *out = a / b; -#endif - } -} -#define @name@_ctype_floor_divide @name@_ctype_divide -static void -@name@_ctype_remainder(@name@ a, @name@ b, @name@ *out) { - if (a == 0 || b == 0) { - if (b == 0) generate_divbyzero_error(); - *out = 0; - return; - } -#if @neg@ - else if ((a > 0) == (b > 0)) { - *out = a % b; - } - else { /* handled like Python does */ - *out = a % b; - if (*out) *out += b; - } -#else - *out = a % b; -#endif -} -/**end repeat**/ - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #otyp=float*4, double*6# -**/ -#define @name@_ctype_true_divide(a, b, out) \ - *(out) = ((@otyp@) (a)) / ((@otyp@) (b)); -/**end repeat**/ - -/* b will always be positive in this call */ -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #upc=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# -**/ -static void -@name@_ctype_power(@name@ a, @name@ b, @name@ *out) { - @name@ temp, ix, mult; - /* code from Python's intobject.c, with overflow checking removed. */ - temp = a; - ix = 1; - while (b > 0) { - if (b & 1) { - @name@_ctype_multiply(ix, temp, &mult); - ix = mult; - if (temp == 0) - break; /* Avoid ix / 0 */ - } - b >>= 1; /* Shift exponent down by 1 bit */ - if (b==0) break; - /* Square the value of temp */ - @name@_ctype_multiply(temp, temp, &mult); - temp = mult; - } - *out = ix; -} -/**end repeat**/ - - - -/* QUESTION: Should we check for overflow / underflow in (l,r)shift? */ - -/**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong)*5# - #oper=and*10, xor*10, or*10, lshift*10, rshift*10# - #op=&*10, ^*10, |*10, <<*10, >>*10# -**/ -#define @name@_ctype_@oper@(arg1, arg2, out) *(out) = (arg1) @op@ (arg2) -/**end repeat**/ - -/**begin repeat - #name=float, double, longdouble# -**/ -static @name@ (*_basic_@name@_floor)(@name@); -static @name@ (*_basic_@name@_sqrt)(@name@); -static @name@ (*_basic_@name@_fmod)(@name@, @name@); -#define @name@_ctype_add(a, b, outp) *(outp) = a + b -#define @name@_ctype_subtract(a, b, outp) *(outp) = a - b -#define @name@_ctype_multiply(a, b, outp) *(outp) = a * b -#define @name@_ctype_divide(a, b, outp) *(outp) = a / b -#define @name@_ctype_true_divide @name@_ctype_divide -#define @name@_ctype_floor_divide(a, b, outp) \ - *(outp) = _basic_@name@_floor((a) / (b)) -/**end repeat**/ - -/**begin repeat - #name=cfloat, cdouble, clongdouble# - #rtype=float, double, longdouble# - #c=f,,l# -**/ -#define @name@_ctype_add(a, b, outp) do{ \ - (outp)->real = (a).real + (b).real; \ - (outp)->imag = (a).imag + (b).imag; \ - }while(0) -#define @name@_ctype_subtract(a, b, outp) do{ \ - (outp)->real = (a).real - (b).real; \ - (outp)->imag = (a).imag - (b).imag; \ - }while(0) -#define @name@_ctype_multiply(a, b, outp) do{ \ - (outp)->real = (a).real * (b).real - (a).imag * (b).imag; \ - (outp)->imag = (a).real * (b).imag + (a).imag * (b).real; \ - }while(0) -#define @name@_ctype_divide(a, b, outp) do{ \ - @rtype@ d = (b).real*(b).real + (b).imag*(b).imag; \ - (outp)->real = ((a).real*(b).real + (a).imag*(b).imag)/d; \ - (outp)->imag = ((a).imag*(b).real - (a).real*(b).imag)/d; \ - }while(0) -#define @name@_ctype_true_divide @name@_ctype_divide -#define @name@_ctype_floor_divide(a, b, outp) do { \ - (outp)->real = _basic_@rtype@_floor \ - (((a).real*(b).real + (a).imag*(b).imag) / \ - ((b).real*(b).real + (b).imag*(b).imag)); \ - (outp)->imag = 0; \ - }while(0) -/**end repeat**/ - -/**begin repeat - #name=float,double,longdouble# -**/ -static void -@name@_ctype_remainder(@name@ a, @name@ b, @name@ *out) { - @name@ mod; - mod = _basic_@name@_fmod(a, b); - if (mod && (((b < 0) != (mod < 0)))) mod += b; - *out = mod; -} -/**end repeat**/ - - - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# -**/ -#define @name@_ctype_divmod(a, b, out, out2) { \ - @name@_ctype_floor_divide(a, b, out); \ - @name@_ctype_remainder(a, b, out2); \ - } -/**end repeat**/ - -/**begin repeat - #name= float, double, longdouble# -**/ -static @name@ (*_basic_@name@_pow)(@name@ a, @name@ b); -static void -@name@_ctype_power(@name@ a, @name@ b, @name@ *out) { - *out = _basic_@name@_pow(a, b); -} -/**end repeat**/ - -/**begin repeat - #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# - #uns=(1,0)*5,0*3# -**/ -static void -@name@_ctype_negative(@name@ a, @name@ *out) -{ -#if @uns@ - generate_overflow_error(); -#endif - *out = -a; -} -/**end repeat**/ - - -/**begin repeat - #name= cfloat, cdouble, clongdouble# -**/ -static void -@name@_ctype_negative(@name@ a, @name@ *out) -{ - out->real = -a.real; - out->imag = -a.imag; -} -/**end repeat**/ - -/**begin repeat - #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# -**/ -static void -@name@_ctype_positive(@name@ a, @name@ *out) -{ - *out = a; -} -/**end repeat**/ - -/* Get the nc_powf, nc_pow, and nc_powl functions from - the data area of the power ufunc in umathmodule. -*/ - -/**begin repeat - #name=cfloat, cdouble, clongdouble# -**/ -static void -@name@_ctype_positive(@name@ a, @name@ *out) -{ - out->real = a.real; - out->imag = a.imag; -} -static void (*_basic_@name@_pow)(@name@ *, @name@ *, @name@ *); -static void -@name@_ctype_power(@name@ a, @name@ b, @name@ *out) -{ - _basic_@name@_pow(&a, &b, out); -} -/**end repeat**/ - - -/**begin repeat - #name=ubyte, ushort, uint, ulong, ulonglong# -**/ -#define @name@_ctype_absolute @name@_ctype_positive -/**end repeat**/ - - -/**begin repeat - #name=byte, short, int, long, longlong, float, double, longdouble# -**/ -static void -@name@_ctype_absolute(@name@ a, @name@ *out) -{ - *out = (a < 0 ? -a : a); -} -/**end repeat**/ - -/**begin repeat - #name= cfloat, cdouble, clongdouble# - #rname= float, double, longdouble# -**/ -static void -@name@_ctype_absolute(@name@ a, @rname@ *out) -{ - *out = _basic_@rname@_sqrt(a.real*a.real + a.imag*a.imag); -} -/**end repeat**/ - -/**begin repeat - #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong# -**/ -#define @name@_ctype_invert(a, out) *(out) = ~a; -/**end repeat**/ - -/*** END OF BASIC CODE **/ - - -/* The general strategy for commutative binary operators is to - - 1) Convert the types to the common type if both are scalars (0 return) - 2) If both are not scalars use ufunc machinery (-2 return) - 3) If both are scalars but cannot be cast to the right type - return NotImplmented (-1 return) - - 4) Perform the function on the C-type. - 5) If an error condition occurred, check to see - what the current error-handling is and handle the error. - - 6) Construct and return the output scalar. -*/ - - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #Name=Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - #NAME=BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE# -**/ - -static int -_@name@_convert_to_ctype(PyObject *a, @name@ *arg1) -{ - PyObject *temp; - - if (PyArray_IsScalar(a, @Name@)) { - *arg1 = PyArrayScalar_VAL(a, @Name@); - return 0; - } - else if (PyArray_IsScalar(a, Generic)) { - PyArray_Descr *descr1; - int ret; - if (!PyArray_IsScalar(a, Number)) return -1; - descr1 = PyArray_DescrFromTypeObject((PyObject *)(a->ob_type)); - if (PyArray_CanCastSafely(descr1->type_num, PyArray_@NAME@)) { - PyArray_CastScalarDirect(a, descr1, arg1, PyArray_@NAME@); - ret = 0; - } - else ret = -1; - Py_DECREF(descr1); - return ret; - } - else if ((temp = PyArray_ScalarFromObject(a)) != NULL) { - int retval; - retval = _@name@_convert_to_ctype(temp, arg1); - Py_DECREF(temp); - return retval; - } - return -2; -} - -static int -_@name@_convert2_to_ctypes(PyObject *a, @name@ *arg1, - PyObject *b, @name@ *arg2) -{ - int ret; - ret = _@name@_convert_to_ctype(a, arg1); - if (ret < 0) return ret; - ret = _@name@_convert_to_ctype(b, arg2); - if (ret < 0) return ret; - return 0; -} - -/**end repeat**/ - -/**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong)*13, (float, double, longdouble, cfloat, cdouble, clongdouble)*6, (float, double, longdouble)*2# - #Name=(Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong)*13, (Float, Double, LongDouble, CFloat, CDouble, CLongDouble)*6, (Float, Double, LongDouble)*2# - #oper=add*10, subtract*10, multiply*10, divide*10, remainder*10, divmod*10, floor_divide*10, lshift*10, rshift*10, and*10, or*10, xor*10, true_divide*10, add*6, subtract*6, multiply*6, divide*6, floor_divide*6, true_divide*6, divmod*3, remainder*3# - #fperr=1*70,0*50,1*52# - #twoout=0*50,1*10,0*106,1*3,0*3# - #otyp=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong)*12, float*4, double*6, (float, double, longdouble, cfloat, cdouble, clongdouble)*6, (float, double, longdouble)*2# - #OName=(Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong)*12, Float*4, Double*6, (Float, Double, LongDouble, CFloat, CDouble, CLongDouble)*6, (Float, Double, LongDouble)*2# -**/ - -static PyObject * -@name@_@oper@(PyObject *a, PyObject *b) -{ - PyObject *ret; - @name@ arg1, arg2; - @otyp@ out; -#if @twoout@ - @otyp@ out2; - PyObject *obj; -#endif - -#if @fperr@ - int retstatus; - int first; -#endif - - switch(_@name@_convert2_to_ctypes(a, &arg1, b, &arg2)) { - case 0: - break; - case -1: /* one of them can't be cast safely - must be mixed-types*/ - return PyArray_Type.tp_as_number->nb_@oper@(a,b); - case -2: /* use default handling */ - if (PyErr_Occurred()) return NULL; - return PyGenericArrType_Type.tp_as_number->nb_@oper@(a,b); - } - -#if @fperr@ - PyUFunc_clearfperr(); -#endif - - /* here we do the actual calculation with arg1 and arg2 */ - /* as a function call. */ -#if @twoout@ - @name@_ctype_@oper@(arg1, arg2, &out, &out2); -#else - @name@_ctype_@oper@(arg1, arg2, &out); -#endif - -#if @fperr@ - /* Check status flag. If it is set, then look up what to do */ - retstatus = PyUFunc_getfperr(); - if (retstatus) { - int bufsize, errmask; - PyObject *errobj; - if (PyUFunc_GetPyValues("@name@_scalars", &bufsize, &errmask, - &errobj) < 0) - return NULL; - first = 1; - if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) - return NULL; - } -#endif - - -#if @twoout@ - ret = PyTuple_New(2); - if (ret==NULL) return NULL; - obj = PyArrayScalar_New(@OName@); - if (obj == NULL) {Py_DECREF(ret); return NULL;} - PyArrayScalar_ASSIGN(obj, @OName@, out); - PyTuple_SET_ITEM(ret, 0, obj); - obj = PyArrayScalar_New(@OName@); - if (obj == NULL) {Py_DECREF(ret); return NULL;} - PyArrayScalar_ASSIGN(obj, @OName@, out2); - PyTuple_SET_ITEM(ret, 1, obj); -#else - ret = PyArrayScalar_New(@OName@); - if (ret==NULL) return NULL; - PyArrayScalar_ASSIGN(ret, @OName@, out); -#endif - return ret; -} -/**end repeat**/ - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float, double, longdouble, cfloat, cdouble, clongdouble# - #Name=Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - #otyp=float*4, double*6, float, double, longdouble, cfloat, cdouble, clongdouble# - #OName=Float*4, Double*6, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - #isint=(1,0)*5,0*6# - #cmplx=0*13,1*3# -**/ - -static PyObject * -@name@_power(PyObject *a, PyObject *b, PyObject *c) -{ - PyObject *ret; - @name@ arg1, arg2; - int retstatus; - int first; - -#if @cmplx@ - @name@ out = {0,0}; - @otyp@ out1; - out1.real = out.imag = 0; -#else - @name@ out = 0; - @otyp@ out1=0; -#endif - - switch(_@name@_convert2_to_ctypes(a, &arg1, b, &arg2)) { - case 0: - break; - case -1: /* can't cast both safely - mixed-types? */ - return PyArray_Type.tp_as_number->nb_power(a,b,NULL); - case -2: /* use default handling */ - if (PyErr_Occurred()) return NULL; - return PyGenericArrType_Type.tp_as_number->nb_power(a,b,NULL); - } - - PyUFunc_clearfperr(); - - /* here we do the actual calculation with arg1 and arg2 */ - /* as a function call. */ -#if @cmplx@ - if (arg2.real == 0 && arg1.real == 0) { - out1.real = out.real = 1; - out1.imag = out.imag = 0; - } -#else - if (arg2 == 0) { - out1 = out = 1; - } -#endif -#if @isint@ - else if (arg2 < 0) { - @name@_ctype_power(arg1, -arg2, &out); - out1 = (@otyp@) (1.0 / out); - } -#endif - else { - @name@_ctype_power(arg1, arg2, &out); - } - - /* Check status flag. If it is set, then look up what to do */ - retstatus = PyUFunc_getfperr(); - if (retstatus) { - int bufsize, errmask; - PyObject *errobj; - if (PyUFunc_GetPyValues("@name@_scalars", &bufsize, &errmask, - &errobj) < 0) - return NULL; - first = 1; - if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) - return NULL; - } - -#if @isint@ - if (arg2 < 0) { - ret = PyArrayScalar_New(@OName@); - if (ret==NULL) return NULL; - PyArrayScalar_ASSIGN(ret, @OName@, out1); - } - else { - ret = PyArrayScalar_New(@Name@); - if (ret==NULL) return NULL; - PyArrayScalar_ASSIGN(ret, @Name@, out); - } -#else - ret = PyArrayScalar_New(@Name@); - if (ret==NULL) return NULL; - PyArrayScalar_ASSIGN(ret, @Name@, out); -#endif - - return ret; -} -/**end repeat**/ - - -/**begin repeat - #name=(cfloat,cdouble,clongdouble)*2# - #oper=divmod*3,remainder*3# -**/ -#define @name@_@oper@ NULL -/**end repeat**/ - -/**begin repeat - #name=(float,double,longdouble,cfloat,cdouble,clongdouble)*5# - #oper=lshift*6, rshift*6, and*6, or*6, xor*6# -**/ -#define @name@_@oper@ NULL -/**end repeat**/ - - -/**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*3, byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #otyp=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,float,double,longdouble,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #OName=(Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble)*2, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, Float, Double, LongDouble, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong# - #oper=negative*16, positive*16, absolute*16, invert*10# -**/ -static PyObject * -@name@_@oper@(PyObject *a) -{ - @name@ arg1; - @otyp@ out; - PyObject *ret; - - switch(_@name@_convert_to_ctype(a, &arg1)) { - case 0: - break; - case -1: /* can't cast both safely use different add function */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - case -2: /* use default handling */ - if (PyErr_Occurred()) return NULL; - return PyGenericArrType_Type.tp_as_number->nb_@oper@(a); - } - - /* here we do the actual calculation with arg1 and arg2 */ - /* make it a function call. */ - - @name@_ctype_@oper@(arg1, &out); - - ret = PyArrayScalar_New(@OName@); - PyArrayScalar_ASSIGN(ret, @OName@, out); - - return ret; -} -/**end repeat**/ - -/**begin repeat - #name=float,double,longdouble,cfloat,cdouble,clongdouble# -**/ -#define @name@_invert NULL -/**end repeat**/ - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #simp=1*13,0*3# -**/ -static int -@name@_nonzero(PyObject *a) -{ - int ret; - @name@ arg1; - - if (_@name@_convert_to_ctype(a, &arg1) < 0) { - if (PyErr_Occurred()) return -1; - return PyGenericArrType_Type.tp_as_number->nb_nonzero(a); - } - - /* here we do the actual calculation with arg1 and arg2 */ - /* make it a function call. */ - -#if @simp@ - ret = (arg1 != 0); -#else - ret = ((arg1.real != 0) || (arg1.imag != 0)); -#endif - - return ret; -} -/**end repeat**/ - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #Name=Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Float,Double,LongDouble,CFloat,CDouble,CLongDouble# - #cmplx=,,,,,,,,,,,,,.real,.real,.real# - #sign=(signed,unsigned)*5,,,,,,# - #ctype=long*8,PY_LONG_LONG*2,double*6# - #realtyp=0*10,1*6# - #func=(PyLong_FromLong,PyLong_FromUnsignedLong)*4,PyLong_FromLongLong,PyLong_FromUnsignedLongLong,PyLong_FromDouble*6# -**/ -static PyObject * -@name@_int(PyObject *obj) -{ - @sign@ @ctype@ x= PyArrayScalar_VAL(obj, @Name@)@cmplx@; -#if @realtyp@ - double ix; - modf(x, &ix); - x = ix; -#endif - if(LONG_MIN < x && x < LONG_MAX) - return PyInt_FromLong(x); - return @func@(x); -} -/**end repeat**/ - -/**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2# - #Name=(Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Float,Double,LongDouble,CFloat,CDouble,CLongDouble)*2# - #cmplx=(,,,,,,,,,,,,,.real,.real,.real)*2# - #which=long*16,float*16# - #func=(PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,PyLong_FromDouble*6,PyFloat_FromDouble*16# -**/ -static PyObject * -@name@_@which@(PyObject *obj) -{ - return @func@((PyArrayScalar_VAL(obj, @Name@))@cmplx@); -} -/**end repeat**/ - - -/**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2# - #oper=oct*16, hex*16# - #kind=(int*5, long*5, int, long*2, int, long*2)*2# - #cap=(Int*5, Long*5, Int, Long*2, Int, Long*2)*2# -**/ -static PyObject * -@name@_@oper@(PyObject *obj) -{ - PyObject *pyint; - pyint = @name@_@kind@(obj); - if (pyint == NULL) return NULL; - return Py@cap@_Type.tp_as_number->nb_@oper@(pyint); -} -/**end repeat**/ - - -/**begin repeat - #oper=le,ge,lt,gt,eq,ne# - #op=<=,>=,<,>,==,!=# -**/ -#define def_cmp_@oper@(arg1, arg2) (arg1 @op@ arg2) -#define cmplx_cmp_@oper@(arg1, arg2) ((arg1.real == arg2.real) ? \ - arg1.imag @op@ arg2.imag : \ - arg1.real @op@ arg2.real) -/**end repeat**/ - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #simp=def*13,cmplx*3# -**/ -static PyObject* -@name@_richcompare(PyObject *self, PyObject *other, int cmp_op) -{ - @name@ arg1, arg2; - int out=0; - - switch(_@name@_convert2_to_ctypes(self, &arg1, other, &arg2)) { - case 0: - break; - case -1: /* can't cast both safely use different add function */ - case -2: /* use ufunc */ - if (PyErr_Occurred()) return NULL; - return PyGenericArrType_Type.tp_richcompare(self, other, cmp_op); - } - - /* here we do the actual calculation with arg1 and arg2 */ - switch (cmp_op) { - case Py_EQ: - out = @simp@_cmp_eq(arg1, arg2); - break; - case Py_NE: - out = @simp@_cmp_ne(arg1, arg2); - break; - case Py_LE: - out = @simp@_cmp_le(arg1, arg2); - break; - case Py_GE: - out = @simp@_cmp_ge(arg1, arg2); - break; - case Py_LT: - out = @simp@_cmp_lt(arg1, arg2); - break; - case Py_GT: - out = @simp@_cmp_gt(arg1, arg2); - break; - } - - if (out) { - PyArrayScalar_RETURN_TRUE; - } - else { - PyArrayScalar_RETURN_FALSE; - } -} -/**end repeat**/ - - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# -**/ -static PyNumberMethods @name@_as_number = { - (binaryfunc)@name@_add, /*nb_add*/ - (binaryfunc)@name@_subtract, /*nb_subtract*/ - (binaryfunc)@name@_multiply, /*nb_multiply*/ - (binaryfunc)@name@_divide, /*nb_divide*/ - (binaryfunc)@name@_remainder, /*nb_remainder*/ - (binaryfunc)@name@_divmod, /*nb_divmod*/ - (ternaryfunc)@name@_power, /*nb_power*/ - (unaryfunc)@name@_negative, - (unaryfunc)@name@_positive, /*nb_pos*/ - (unaryfunc)@name@_absolute, /*nb_abs*/ - (inquiry)@name@_nonzero, /*nb_nonzero*/ - (unaryfunc)@name@_invert, /*nb_invert*/ - (binaryfunc)@name@_lshift, /*nb_lshift*/ - (binaryfunc)@name@_rshift, /*nb_rshift*/ - (binaryfunc)@name@_and, /*nb_and*/ - (binaryfunc)@name@_xor, /*nb_xor*/ - (binaryfunc)@name@_or, /*nb_or*/ - 0, /*nb_coerce*/ - (unaryfunc)@name@_int, /*nb_int*/ - (unaryfunc)@name@_long, /*nb_long*/ - (unaryfunc)@name@_float, /*nb_float*/ - (unaryfunc)@name@_oct, /*nb_oct*/ - (unaryfunc)@name@_hex, /*nb_hex*/ - 0, /*inplace_add*/ - 0, /*inplace_subtract*/ - 0, /*inplace_multiply*/ - 0, /*inplace_divide*/ - 0, /*inplace_remainder*/ - 0, /*inplace_power*/ - 0, /*inplace_lshift*/ - 0, /*inplace_rshift*/ - 0, /*inplace_and*/ - 0, /*inplace_xor*/ - 0, /*inplace_or*/ - (binaryfunc)@name@_floor_divide, /*nb_floor_divide*/ - (binaryfunc)@name@_true_divide, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ -#if PY_VERSION_HEX >= 0x02050000 - (unaryfunc)NULL, /*nb_index*/ -#endif -}; -/**end repeat**/ - -static void *saved_tables_arrtype[9]; - -static void -add_scalarmath(void) -{ - /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #NAME=Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - **/ -#if PY_VERSION_HEX >= 0x02050000 - @name@_as_number.nb_index = Py@NAME@ArrType_Type.tp_as_number->nb_index; -#endif - Py@NAME@ArrType_Type.tp_as_number = &(@name@_as_number); - Py@NAME@ArrType_Type.tp_richcompare = @name@_richcompare; - /**end repeat**/ - - saved_tables_arrtype[0] = PyLongArrType_Type.tp_as_number; - saved_tables_arrtype[1] = PyLongArrType_Type.tp_compare; - saved_tables_arrtype[2] = PyLongArrType_Type.tp_richcompare; - saved_tables_arrtype[3] = PyDoubleArrType_Type.tp_as_number; - saved_tables_arrtype[4] = PyDoubleArrType_Type.tp_compare; - saved_tables_arrtype[5] = PyDoubleArrType_Type.tp_richcompare; - saved_tables_arrtype[6] = PyCDoubleArrType_Type.tp_as_number; - saved_tables_arrtype[7] = PyCDoubleArrType_Type.tp_compare; - saved_tables_arrtype[8] = PyCDoubleArrType_Type.tp_richcompare; -} - -static int -get_functions(void) -{ - PyObject *mm, *obj; - void **funcdata; - char *signatures; - int i, j; - int ret = -1; - - /* Get the nc_pow functions */ - /* Get the pow functions */ - mm = PyImport_ImportModule("numpy.core.umath"); - if (mm == NULL) return -1; - - obj = PyObject_GetAttrString(mm, "power"); - if (obj == NULL) goto fail; - funcdata = ((PyUFuncObject *)obj)->data; - signatures = ((PyUFuncObject *)obj)->types; - - i = 0; - j = 0; - while(signatures[i] != PyArray_FLOAT) {i+=3; j++;} - _basic_float_pow = funcdata[j]; - _basic_double_pow = funcdata[j+1]; - _basic_longdouble_pow = funcdata[j+2]; - _basic_cfloat_pow = funcdata[j+3]; - _basic_cdouble_pow = funcdata[j+4]; - _basic_clongdouble_pow = funcdata[j+5]; - Py_DECREF(obj); - - /* Get the floor functions */ - obj = PyObject_GetAttrString(mm, "floor"); - if (obj == NULL) goto fail; - funcdata = ((PyUFuncObject *)obj)->data; - signatures = ((PyUFuncObject *)obj)->types; - i = 0; - j = 0; - while(signatures[i] != PyArray_FLOAT) {i+=2; j++;} - _basic_float_floor = funcdata[j]; - _basic_double_floor = funcdata[j+1]; - _basic_longdouble_floor = funcdata[j+2]; - Py_DECREF(obj); - - /* Get the sqrt functions */ - obj = PyObject_GetAttrString(mm, "sqrt"); - if (obj == NULL) goto fail; - funcdata = ((PyUFuncObject *)obj)->data; - signatures = ((PyUFuncObject *)obj)->types; - i = 0; - j = 0; - while(signatures[i] != PyArray_FLOAT) {i+=2; j++;} - _basic_float_sqrt = funcdata[j]; - _basic_double_sqrt = funcdata[j+1]; - _basic_longdouble_sqrt = funcdata[j+2]; - Py_DECREF(obj); - - /* Get the fmod functions */ - obj = PyObject_GetAttrString(mm, "fmod"); - if (obj == NULL) goto fail; - funcdata = ((PyUFuncObject *)obj)->data; - signatures = ((PyUFuncObject *)obj)->types; - i = 0; - j = 0; - while(signatures[i] != PyArray_FLOAT) {i+=3; j++;} - _basic_float_fmod = funcdata[j]; - _basic_double_fmod = funcdata[j+1]; - _basic_longdouble_fmod = funcdata[j+2]; - Py_DECREF(obj); - return - - ret = 0; - fail: - Py_DECREF(mm); - return ret; -} - -static void *saved_tables[9]; - -char doc_alterpyscalars[] = ""; - -static PyObject * -alter_pyscalars(PyObject *dummy, PyObject *args) -{ - int n; - PyObject *obj; - n = PyTuple_GET_SIZE(args); - while(n--) { - obj = PyTuple_GET_ITEM(args, n); - if (obj == (PyObject *)(&PyInt_Type)) { - PyInt_Type.tp_as_number = PyLongArrType_Type.tp_as_number; - PyInt_Type.tp_compare = PyLongArrType_Type.tp_compare; - PyInt_Type.tp_richcompare = PyLongArrType_Type.tp_richcompare; - } - else if (obj == (PyObject *)(&PyFloat_Type)) { - PyFloat_Type.tp_as_number = PyDoubleArrType_Type.tp_as_number; - PyFloat_Type.tp_compare = PyDoubleArrType_Type.tp_compare; - PyFloat_Type.tp_richcompare = PyDoubleArrType_Type.tp_richcompare; - } - else if (obj == (PyObject *)(&PyComplex_Type)) { - PyComplex_Type.tp_as_number = PyCDoubleArrType_Type.tp_as_number; - PyComplex_Type.tp_compare = PyCDoubleArrType_Type.tp_compare; - PyComplex_Type.tp_richcompare = \ - PyCDoubleArrType_Type.tp_richcompare; - } - else { - PyErr_SetString(PyExc_ValueError, - "arguments must be int, float, or complex"); - return NULL; - } - } - Py_INCREF(Py_None); - return Py_None; -} - -char doc_restorepyscalars[] = ""; -static PyObject * -restore_pyscalars(PyObject *dummy, PyObject *args) -{ - int n; - PyObject *obj; - n = PyTuple_GET_SIZE(args); - while(n--) { - obj = PyTuple_GET_ITEM(args, n); - if (obj == (PyObject *)(&PyInt_Type)) { - PyInt_Type.tp_as_number = saved_tables[0]; - PyInt_Type.tp_compare = saved_tables[1]; - PyInt_Type.tp_richcompare = saved_tables[2]; - } - else if (obj == (PyObject *)(&PyFloat_Type)) { - PyFloat_Type.tp_as_number = saved_tables[3]; - PyFloat_Type.tp_compare = saved_tables[4]; - PyFloat_Type.tp_richcompare = saved_tables[5]; - } - else if (obj == (PyObject *)(&PyComplex_Type)) { - PyComplex_Type.tp_as_number = saved_tables[6]; - PyComplex_Type.tp_compare = saved_tables[7]; - PyComplex_Type.tp_richcompare = saved_tables[8]; - } - else { - PyErr_SetString(PyExc_ValueError, - "arguments must be int, float, or complex"); - return NULL; - } - } - Py_INCREF(Py_None); - return Py_None; -} - -char doc_usepythonmath[] = ""; -static PyObject * -use_pythonmath(PyObject *dummy, PyObject *args) -{ - int n; - PyObject *obj; - n = PyTuple_GET_SIZE(args); - while(n--) { - obj = PyTuple_GET_ITEM(args, n); - if (obj == (PyObject *)(&PyInt_Type)) { - PyLongArrType_Type.tp_as_number = saved_tables[0]; - PyLongArrType_Type.tp_compare = saved_tables[1]; - PyLongArrType_Type.tp_richcompare = saved_tables[2]; - } - else if (obj == (PyObject *)(&PyFloat_Type)) { - PyDoubleArrType_Type.tp_as_number = saved_tables[3]; - PyDoubleArrType_Type.tp_compare = saved_tables[4]; - PyDoubleArrType_Type.tp_richcompare = saved_tables[5]; - } - else if (obj == (PyObject *)(&PyComplex_Type)) { - PyCDoubleArrType_Type.tp_as_number = saved_tables[6]; - PyCDoubleArrType_Type.tp_compare = saved_tables[7]; - PyCDoubleArrType_Type.tp_richcompare = saved_tables[8]; - } - else { - PyErr_SetString(PyExc_ValueError, - "arguments must be int, float, or complex"); - return NULL; - } - } - Py_INCREF(Py_None); - return Py_None; -} - -char doc_usescalarmath[] = ""; -static PyObject * -use_scalarmath(PyObject *dummy, PyObject *args) -{ - int n; - PyObject *obj; - n = PyTuple_GET_SIZE(args); - while(n--) { - obj = PyTuple_GET_ITEM(args, n); - if (obj == (PyObject *)(&PyInt_Type)) { - PyLongArrType_Type.tp_as_number = saved_tables_arrtype[0]; - PyLongArrType_Type.tp_compare = saved_tables_arrtype[1]; - PyLongArrType_Type.tp_richcompare = saved_tables_arrtype[2]; - } - else if (obj == (PyObject *)(&PyFloat_Type)) { - PyDoubleArrType_Type.tp_as_number = saved_tables_arrtype[3]; - PyDoubleArrType_Type.tp_compare = saved_tables_arrtype[4]; - PyDoubleArrType_Type.tp_richcompare = saved_tables_arrtype[5]; - } - else if (obj == (PyObject *)(&PyComplex_Type)) { - PyCDoubleArrType_Type.tp_as_number = saved_tables_arrtype[6]; - PyCDoubleArrType_Type.tp_compare = saved_tables_arrtype[7]; - PyCDoubleArrType_Type.tp_richcompare = saved_tables_arrtype[8]; - } - else { - PyErr_SetString(PyExc_ValueError, - "arguments must be int, float, or complex"); - return NULL; - } - } - Py_INCREF(Py_None); - return Py_None; -} - -static struct PyMethodDef methods[] = { - {"alter_pythonmath", (PyCFunction) alter_pyscalars, - METH_VARARGS, doc_alterpyscalars}, - {"restore_pythonmath", (PyCFunction) restore_pyscalars, - METH_VARARGS, doc_restorepyscalars}, - {"use_pythonmath", (PyCFunction) use_pythonmath, - METH_VARARGS, doc_usepythonmath}, - {"use_scalarmath", (PyCFunction) use_scalarmath, - METH_VARARGS, doc_usescalarmath}, - {NULL, NULL, 0} -}; - -PyMODINIT_FUNC initscalarmath(void) { - - Py_InitModule("scalarmath", methods); - - import_array(); - import_umath(); - - if (get_functions() < 0) return; - - add_scalarmath(); - - saved_tables[0] = PyInt_Type.tp_as_number; - saved_tables[1] = PyInt_Type.tp_compare; - saved_tables[2] = PyInt_Type.tp_richcompare; - saved_tables[3] = PyFloat_Type.tp_as_number; - saved_tables[4] = PyFloat_Type.tp_compare; - saved_tables[5] = PyFloat_Type.tp_richcompare; - saved_tables[6] = PyComplex_Type.tp_as_number; - saved_tables[7] = PyComplex_Type.tp_compare; - saved_tables[8] = PyComplex_Type.tp_richcompare; - - return; -} diff --git a/numpy/core/src/scalartypes.inc.src b/numpy/core/src/scalartypes.inc.src deleted file mode 100644 index 3a0ddbcce..000000000 --- a/numpy/core/src/scalartypes.inc.src +++ /dev/null @@ -1,2779 +0,0 @@ -/* -*- c -*- */ - -#ifndef _MULTIARRAYMODULE -#define _MULTIARRAYMODULE -#endif -#include "numpy/arrayscalars.h" - -static PyBoolScalarObject _PyArrayScalar_BoolValues[2] = { - {PyObject_HEAD_INIT(&PyBoolArrType_Type) 0}, - {PyObject_HEAD_INIT(&PyBoolArrType_Type) 1}, -}; - -/* Inheritance established later when tp_bases is set (or tp_base for - single inheritance) */ - -/**begin repeat - -#name=number, integer, signedinteger, unsignedinteger, inexact, floating, complexfloating, flexible, -character# -#NAME=Number, Integer, SignedInteger, UnsignedInteger, Inexact, Floating, ComplexFloating, Flexible, Character# -*/ - -static PyTypeObject Py@NAME@ArrType_Type = { - PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.@name@", /*tp_name*/ - sizeof(PyObject), /*tp_basicsize*/ -}; -/**end repeat**/ - -static void * -scalar_value(PyObject *scalar, PyArray_Descr *descr) -{ - int type_num; - int align; - intp memloc; - if (descr == NULL) { - descr = PyArray_DescrFromScalar(scalar); - type_num = descr->type_num; - Py_DECREF(descr); - } else { - type_num = descr->type_num; - } - switch (type_num) { -#define CASE(ut,lt) case NPY_##ut: return &(((Py##lt##ScalarObject *)scalar)->obval) - CASE(BOOL, Bool); - CASE(BYTE, Byte); - CASE(UBYTE, UByte); - CASE(SHORT, Short); - CASE(USHORT, UShort); - CASE(INT, Int); - CASE(UINT, UInt); - CASE(LONG, Long); - CASE(ULONG, ULong); - CASE(LONGLONG, LongLong); - CASE(ULONGLONG, ULongLong); - CASE(FLOAT, Float); - CASE(DOUBLE, Double); - CASE(LONGDOUBLE, LongDouble); - CASE(CFLOAT, CFloat); - CASE(CDOUBLE, CDouble); - CASE(CLONGDOUBLE, CLongDouble); - CASE(OBJECT, Object); -#undef CASE - case NPY_STRING: return (void *)PyString_AS_STRING(scalar); - case NPY_UNICODE: return (void *)PyUnicode_AS_DATA(scalar); - case NPY_VOID: return ((PyVoidScalarObject *)scalar)->obval; - } - - /* Must be a user-defined type --- check to see which - scalar it inherits from. */ - -#define _CHK(cls) (PyObject_IsInstance(scalar, \ - (PyObject *)&Py##cls##ArrType_Type)) -#define _OBJ(lt) &(((Py##lt##ScalarObject *)scalar)->obval) -#define _IFCASE(cls) if _CHK(cls) return _OBJ(cls) - - if _CHK(Number) { - if _CHK(Integer) { - if _CHK(SignedInteger) { - _IFCASE(Byte); - _IFCASE(Short); - _IFCASE(Int); - _IFCASE(Long); - _IFCASE(LongLong); - } - else { /* Unsigned Integer */ - _IFCASE(UByte); - _IFCASE(UShort); - _IFCASE(UInt); - _IFCASE(ULong); - _IFCASE(ULongLong); - } - } - else { /* Inexact */ - if _CHK(Floating) { - _IFCASE(Float); - _IFCASE(Double); - _IFCASE(LongDouble); - } - else { /*ComplexFloating */ - _IFCASE(CFloat); - _IFCASE(CDouble); - _IFCASE(CLongDouble); - } - } - } - else if _CHK(Bool) return _OBJ(Bool); - else if _CHK(Flexible) { - if _CHK(String) return (void *)PyString_AS_STRING(scalar); - if _CHK(Unicode) return (void *)PyUnicode_AS_DATA(scalar); - if _CHK(Void) return ((PyVoidScalarObject *)scalar)->obval; - } - else _IFCASE(Object); - - - /* Use the alignment flag to figure out where the data begins - after a PyObject_HEAD - */ - memloc = (intp)scalar; - memloc += sizeof(PyObject); - /* now round-up to the nearest alignment value - */ - align = descr->alignment; - if (align > 1) memloc = ((memloc + align - 1)/align)*align; - return (void *)memloc; -#undef _IFCASE -#undef _OBJ -#undef _CHK -} - -/* no error checking is performed -- ctypeptr must be same type as scalar */ -/* in case of flexible type, the data is not copied - into ctypeptr which is expected to be a pointer to pointer */ -/*OBJECT_API - Convert to c-type -*/ -static void -PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) -{ - PyArray_Descr *typecode; - void *newptr; - typecode = PyArray_DescrFromScalar(scalar); - newptr = scalar_value(scalar, typecode); - - if (PyTypeNum_ISEXTENDED(typecode->type_num)) { - void **ct = (void **)ctypeptr; - *ct = newptr; - } else { - memcpy(ctypeptr, newptr, typecode->elsize); - } - Py_DECREF(typecode); - return; -} - -/* The output buffer must be large-enough to receive the value */ -/* Even for flexible types which is different from ScalarAsCtype - where only a reference for flexible types is returned -*/ - -/* This may not work right on narrow builds for NumPy unicode scalars. - */ - -/*OBJECT_API - Cast Scalar to c-type -*/ -static int -PyArray_CastScalarToCtype(PyObject *scalar, void *ctypeptr, - PyArray_Descr *outcode) -{ - PyArray_Descr* descr; - PyArray_VectorUnaryFunc* castfunc; - - descr = PyArray_DescrFromScalar(scalar); - castfunc = PyArray_GetCastFunc(descr, outcode->type_num); - if (castfunc == NULL) return -1; - if (PyTypeNum_ISEXTENDED(descr->type_num) || - PyTypeNum_ISEXTENDED(outcode->type_num)) { - PyArrayObject *ain, *aout; - - ain = (PyArrayObject *)PyArray_FromScalar(scalar, NULL); - if (ain == NULL) {Py_DECREF(descr); return -1;} - aout = (PyArrayObject *) - PyArray_NewFromDescr(&PyArray_Type, - outcode, - 0, NULL, - NULL, ctypeptr, - CARRAY, NULL); - if (aout == NULL) {Py_DECREF(ain); return -1;} - castfunc(ain->data, aout->data, 1, ain, aout); - Py_DECREF(ain); - Py_DECREF(aout); - } - else { - castfunc(scalar_value(scalar, descr), ctypeptr, 1, NULL, NULL); - } - Py_DECREF(descr); - return 0; -} - -/*OBJECT_API - Cast Scalar to c-type -*/ -static int -PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr *indescr, - void *ctypeptr, int outtype) -{ - PyArray_VectorUnaryFunc* castfunc; - void *ptr; - castfunc = PyArray_GetCastFunc(indescr, outtype); - if (castfunc == NULL) return -1; - ptr = scalar_value(scalar, indescr); - castfunc(ptr, ctypeptr, 1, NULL, NULL); - return 0; -} - -/* 0-dim array from array-scalar object */ -/* always contains a copy of the data - unless outcode is NULL, it is of void type and the referrer does - not own it either. -*/ - -/* steals reference to outcode */ -/*OBJECT_API - Get 0-dim array from scalar -*/ -static PyObject * -PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) -{ - PyArray_Descr *typecode; - PyObject *r; - char *memptr; - PyObject *ret; - - /* convert to 0-dim array of scalar typecode */ - typecode = PyArray_DescrFromScalar(scalar); - if ((typecode->type_num == PyArray_VOID) && - !(((PyVoidScalarObject *)scalar)->flags & OWNDATA) && - outcode == NULL) { - r = PyArray_NewFromDescr(&PyArray_Type, - typecode, - 0, NULL, NULL, - ((PyVoidScalarObject *)scalar)->obval, - ((PyVoidScalarObject *)scalar)->flags, - NULL); - PyArray_BASE(r) = (PyObject *)scalar; - Py_INCREF(scalar); - return r; - } - r = PyArray_NewFromDescr(&PyArray_Type, - typecode, - 0, NULL, - NULL, NULL, 0, NULL); - if (r==NULL) {Py_XDECREF(outcode); return NULL;} - - if (PyDataType_FLAGCHK(typecode, NPY_USE_SETITEM)) { - if (typecode->f->setitem(scalar, PyArray_DATA(r), r) < 0) { - Py_XDECREF(outcode); Py_DECREF(r); - return NULL; - } - goto finish; - } - - memptr = scalar_value(scalar, typecode); - -#ifndef Py_UNICODE_WIDE - if (typecode->type_num == PyArray_UNICODE) { - PyUCS2Buffer_AsUCS4((Py_UNICODE *)memptr, - (PyArray_UCS4 *)PyArray_DATA(r), - PyUnicode_GET_SIZE(scalar), - PyArray_ITEMSIZE(r) >> 2); - } else -#endif - { - memcpy(PyArray_DATA(r), memptr, PyArray_ITEMSIZE(r)); - if (PyDataType_FLAGCHK(typecode, NPY_ITEM_HASOBJECT)) { - Py_INCREF(*((PyObject **)memptr)); - } - } - - finish: - if (outcode == NULL) return r; - - if (outcode->type_num == typecode->type_num) { - if (!PyTypeNum_ISEXTENDED(typecode->type_num) || - (outcode->elsize == typecode->elsize)) - return r; - } - - /* cast if necessary to desired output typecode */ - ret = PyArray_CastToType((PyArrayObject *)r, outcode, 0); - Py_DECREF(r); - return ret; -} - -/*OBJECT_API - Get an Array Scalar From a Python Object - Returns NULL if unsuccessful but error is only - set if another error occurred. Currently only Numeric-like - object supported. - */ -static PyObject * -PyArray_ScalarFromObject(PyObject *object) -{ - PyObject *ret=NULL; - if (PyArray_IsZeroDim(object)) { - return PyArray_ToScalar(PyArray_DATA(object), object); - } - if (PyInt_Check(object)) { - ret = PyArrayScalar_New(Long); - if (ret == NULL) return NULL; - PyArrayScalar_VAL(ret, Long) = PyInt_AS_LONG(object); - } - else if (PyFloat_Check(object)) { - ret = PyArrayScalar_New(Double); - if (ret == NULL) return NULL; - PyArrayScalar_VAL(ret, Double) = PyFloat_AS_DOUBLE(object); - } - else if (PyComplex_Check(object)) { - ret = PyArrayScalar_New(CDouble); - if (ret == NULL) return NULL; - PyArrayScalar_VAL(ret, CDouble).real = \ - ((PyComplexObject *)object)->cval.real; - PyArrayScalar_VAL(ret, CDouble).imag = \ - ((PyComplexObject *)object)->cval.imag; - } - else if (PyLong_Check(object)) { - longlong val; - val = PyLong_AsLongLong(object); - if (val==-1 && PyErr_Occurred()) { - PyErr_Clear(); - return NULL; - } - ret = PyArrayScalar_New(LongLong); - if (ret == NULL) return NULL; - PyArrayScalar_VAL(ret, LongLong) = val; - } - else if (PyBool_Check(object)) { - if (object == Py_True) { - PyArrayScalar_RETURN_TRUE; - } - else { - PyArrayScalar_RETURN_FALSE; - } - } - return ret; -} - - -static PyObject * -gentype_alloc(PyTypeObject *type, Py_ssize_t nitems) -{ - PyObject *obj; - const size_t size = _PyObject_VAR_SIZE(type, nitems+1); - - obj = (PyObject *)_pya_malloc(size); - memset(obj, 0, size); - if (type->tp_itemsize == 0) - PyObject_INIT(obj, type); - else - (void) PyObject_INIT_VAR((PyVarObject *)obj, type, nitems); - return obj; -} - -static void -gentype_dealloc(PyObject *v) -{ - v->ob_type->tp_free(v); -} - - -static PyObject * -gentype_power(PyObject *m1, PyObject *m2, PyObject *m3) -{ - PyObject *arr, *ret, *arg2; - char *msg="unsupported operand type(s) for ** or pow()"; - - if (!PyArray_IsScalar(m1,Generic)) { - if (PyArray_Check(m1)) { - ret = m1->ob_type->tp_as_number->nb_power(m1,m2, - Py_None); - } - else { - if (!PyArray_IsScalar(m2,Generic)) { - PyErr_SetString(PyExc_TypeError, msg); - return NULL; - } - arr = PyArray_FromScalar(m2, NULL); - if (arr == NULL) return NULL; - ret = arr->ob_type->tp_as_number->nb_power(m1, arr, - Py_None); - Py_DECREF(arr); - } - return ret; - } - if (!PyArray_IsScalar(m2, Generic)) { - if (PyArray_Check(m2)) { - ret = m2->ob_type->tp_as_number->nb_power(m1,m2, - Py_None); - } - else { - if (!PyArray_IsScalar(m1, Generic)) { - PyErr_SetString(PyExc_TypeError, msg); - return NULL; - } - arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return NULL; - ret = arr->ob_type->tp_as_number->nb_power(arr, m2, - Py_None); - Py_DECREF(arr); - } - return ret; - } - arr=arg2=NULL; - arr = PyArray_FromScalar(m1, NULL); - arg2 = PyArray_FromScalar(m2, NULL); - if (arr == NULL || arg2 == NULL) { - Py_XDECREF(arr); Py_XDECREF(arg2); return NULL; - } - ret = arr->ob_type->tp_as_number->nb_power(arr, arg2, Py_None); - Py_DECREF(arr); - Py_DECREF(arg2); - return ret; -} - -static PyObject * -gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, - char *str) -{ - PyObject *arr, *meth, *ret; - - arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; - meth = PyObject_GetAttrString(arr, str); - if (meth == NULL) {Py_DECREF(arr); return NULL;} - if (kwds == NULL) - ret = PyObject_CallObject(meth, args); - else - ret = PyObject_Call(meth, args, kwds); - Py_DECREF(meth); - Py_DECREF(arr); - if (ret && PyArray_Check(ret)) - return PyArray_Return((PyArrayObject *)ret); - else - return ret; -} - -/**begin repeat - -#name=add, subtract, divide, remainder, divmod, lshift, rshift, and, xor, or, floor_divide, true_divide# -#PYNAME=Add, Subtract, Divide, Remainder, Divmod, Lshift, Rshift, And, Xor, Or, FloorDivide, TrueDivide# -*/ - -static PyObject * -gentype_@name@(PyObject *m1, PyObject *m2) -{ - return PyArray_Type.tp_as_number->nb_@name@(m1, m2); -} -/**end repeat**/ - - -static PyObject * -gentype_multiply(PyObject *m1, PyObject *m2) -{ - PyObject *ret=NULL; - long repeat; - - if (!PyArray_IsScalar(m1, Generic) && - ((m1->ob_type->tp_as_number == NULL) || - (m1->ob_type->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m2 to an int and try sequence - repeat */ - repeat = PyInt_AsLong(m2); - if (repeat == -1 && PyErr_Occurred()) return NULL; - ret = PySequence_Repeat(m1, (int) repeat); - } - else if (!PyArray_IsScalar(m2, Generic) && - ((m2->ob_type->tp_as_number == NULL) || - (m2->ob_type->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m1 to an int and try sequence - repeat */ - repeat = PyInt_AsLong(m1); - if (repeat == -1 && PyErr_Occurred()) return NULL; - ret = PySequence_Repeat(m2, (int) repeat); - } - if (ret==NULL) { - PyErr_Clear(); /* no effect if not set */ - ret = PyArray_Type.tp_as_number->nb_multiply(m1, m2); - } - return ret; -} - -/**begin repeat - -#name=positive, negative, absolute, invert, int, long, float, oct, hex# -*/ - -static PyObject * -gentype_@name@(PyObject *m1) -{ - PyObject *arr, *ret; - - arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return NULL; - ret = arr->ob_type->tp_as_number->nb_@name@(arr); - Py_DECREF(arr); - return ret; -} -/**end repeat**/ - -static int -gentype_nonzero_number(PyObject *m1) -{ - PyObject *arr; - int ret; - - arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return -1; - ret = arr->ob_type->tp_as_number->nb_nonzero(arr); - Py_DECREF(arr); - return ret; -} - -static PyObject * -gentype_str(PyObject *self) -{ - PyArrayObject *arr; - PyObject *ret; - - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr==NULL) return NULL; - ret = PyObject_Str((PyObject *)arr); - Py_DECREF(arr); - return ret; -} - - -static PyObject * -gentype_repr(PyObject *self) -{ - PyArrayObject *arr; - PyObject *ret; - - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr==NULL) return NULL; - ret = PyObject_Str((PyObject *)arr); - Py_DECREF(arr); - return ret; -} - -static void -format_longdouble(char *buf, size_t buflen, longdouble val, - unsigned int precision) -{ - char *cp; - - PyOS_snprintf(buf, buflen, "%.*" LONGDOUBLE_FMT, precision, val); - cp = buf; - if (*cp == '-') - cp++; - for (; *cp != '\0'; cp++) { - if (!isdigit(Py_CHARMASK(*cp))) - break; - } - if (*cp == '\0') { - *cp++ = '.'; - *cp++ = '0'; - *cp++ = '\0'; - } -} - -/* over-ride repr and str of array-scalar strings and unicode to - remove NULL bytes and then call the corresponding functions - of string and unicode. - */ - -/**begin repeat -#name=string*2,unicode*2# -#form=(repr,str)*2# -#Name=String*2,Unicode*2# -#NAME=STRING*2,UNICODE*2# -#extra=AndSize*2,,# -#type=char*2, Py_UNICODE*2# -*/ -static PyObject * -@name@type_@form@(PyObject *self) -{ - const @type@ *dptr, *ip; - int len; - PyObject *new; - PyObject *ret; - - ip = dptr = Py@Name@_AS_@NAME@(self); - len = Py@Name@_GET_SIZE(self); - dptr += len-1; - while(len > 0 && *dptr-- == 0) len--; - new = Py@Name@_From@Name@@extra@(ip, len); - if (new == NULL) return PyString_FromString(""); - ret = Py@Name@_Type.tp_@form@(new); - Py_DECREF(new); - return ret; -} -/**end repeat**/ - - - -#if SIZEOF_LONGDOUBLE == SIZEOF_DOUBLE -#define PREC_REPR 17 -#define PREC_STR 17 -#else -#define PREC_REPR 21 -#define PREC_STR 21 -#endif - -static PyObject * -longdoubletype_repr(PyObject *self) -{ - static char buf[100]; - format_longdouble(buf, sizeof(buf), - ((PyLongDoubleScalarObject *)self)->obval, PREC_REPR); - return PyString_FromString(buf); -} - -static PyObject * -clongdoubletype_repr(PyObject *self) -{ - static char buf1[100]; - static char buf2[100]; - static char buf3[202]; - clongdouble x; - x = ((PyCLongDoubleScalarObject *)self)->obval; - format_longdouble(buf1, sizeof(buf1), x.real, PREC_REPR); - format_longdouble(buf2, sizeof(buf2), x.imag, PREC_REPR); - - snprintf(buf3, sizeof(buf3), "(%s+%sj)", buf1, buf2); - return PyString_FromString(buf3); -} - -#define longdoubletype_str longdoubletype_repr -#define clongdoubletype_str clongdoubletype_repr - -/** Could improve this with a PyLong_FromLongDouble(longdouble ldval) - but this would need some more work... -**/ - -/**begin repeat - -#name=(int, long, hex, oct, float)*2# -#KIND=(Long*4, Float)*2# -#char=,,,,,c*5# -#CHAR=,,,,,C*5# -#POST=,,,,,.real*5# -*/ -static PyObject * -@char@longdoubletype_@name@(PyObject *self) -{ - double dval; - PyObject *obj, *ret; - - dval = (double)(((Py@CHAR@LongDoubleScalarObject *)self)->obval)@POST@; - obj = Py@KIND@_FromDouble(dval); - ret = obj->ob_type->tp_as_number->nb_@name@(obj); - Py_DECREF(obj); - return ret; -} -/**end repeat**/ - - -static PyNumberMethods gentype_as_number = { - (binaryfunc)gentype_add, /*nb_add*/ - (binaryfunc)gentype_subtract, /*nb_subtract*/ - (binaryfunc)gentype_multiply, /*nb_multiply*/ - (binaryfunc)gentype_divide, /*nb_divide*/ - (binaryfunc)gentype_remainder, /*nb_remainder*/ - (binaryfunc)gentype_divmod, /*nb_divmod*/ - (ternaryfunc)gentype_power, /*nb_power*/ - (unaryfunc)gentype_negative, - (unaryfunc)gentype_positive, /*nb_pos*/ - (unaryfunc)gentype_absolute, /*(unaryfunc)gentype_abs,*/ - (inquiry)gentype_nonzero_number, /*nb_nonzero*/ - (unaryfunc)gentype_invert, /*nb_invert*/ - (binaryfunc)gentype_lshift, /*nb_lshift*/ - (binaryfunc)gentype_rshift, /*nb_rshift*/ - (binaryfunc)gentype_and, /*nb_and*/ - (binaryfunc)gentype_xor, /*nb_xor*/ - (binaryfunc)gentype_or, /*nb_or*/ - 0, /*nb_coerce*/ - (unaryfunc)gentype_int, /*nb_int*/ - (unaryfunc)gentype_long, /*nb_long*/ - (unaryfunc)gentype_float, /*nb_float*/ - (unaryfunc)gentype_oct, /*nb_oct*/ - (unaryfunc)gentype_hex, /*nb_hex*/ - 0, /*inplace_add*/ - 0, /*inplace_subtract*/ - 0, /*inplace_multiply*/ - 0, /*inplace_divide*/ - 0, /*inplace_remainder*/ - 0, /*inplace_power*/ - 0, /*inplace_lshift*/ - 0, /*inplace_rshift*/ - 0, /*inplace_and*/ - 0, /*inplace_xor*/ - 0, /*inplace_or*/ - (binaryfunc)gentype_floor_divide, /*nb_floor_divide*/ - (binaryfunc)gentype_true_divide, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ -#if PY_VERSION_HEX >= 0x02050000 - (unaryfunc)NULL, /* nb_index */ -#endif -}; - - -static PyObject * -gentype_richcompare(PyObject *self, PyObject *other, int cmp_op) -{ - - PyObject *arr, *ret; - - arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; - ret = arr->ob_type->tp_richcompare(arr, other, cmp_op); - Py_DECREF(arr); - return ret; -} - -static PyObject * -gentype_ndim_get(PyObject *self) -{ - return PyInt_FromLong(0); -} - -static PyObject * -gentype_flags_get(PyObject *self) -{ - return PyArray_NewFlagsObject(NULL); -} - -static PyObject * -voidtype_flags_get(PyVoidScalarObject *self) -{ - PyObject *flagobj; - flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); - if (flagobj == NULL) return NULL; - ((PyArrayFlagsObject *)flagobj)->arr = NULL; - ((PyArrayFlagsObject *)flagobj)->flags = self->flags; - return flagobj; -} - -static PyObject * -voidtype_dtypedescr_get(PyVoidScalarObject *self) -{ - Py_INCREF(self->descr); - return (PyObject *)self->descr; -} - - -static PyObject * -gentype_data_get(PyObject *self) -{ - return PyBuffer_FromObject(self, 0, Py_END_OF_BUFFER); -} - - -static PyObject * -gentype_itemsize_get(PyObject *self) -{ - PyArray_Descr *typecode; - PyObject *ret; - int elsize; - - typecode = PyArray_DescrFromScalar(self); - elsize = typecode->elsize; -#ifndef Py_UNICODE_WIDE - if (typecode->type_num == NPY_UNICODE) { - elsize >>= 1; - } -#endif - ret = PyInt_FromLong((long) elsize); - Py_DECREF(typecode); - return ret; -} - -static PyObject * -gentype_size_get(PyObject *self) -{ - return PyInt_FromLong(1); -} - -static void -gentype_struct_free(void *ptr, void *arg) -{ - PyArrayInterface *arrif = (PyArrayInterface *)ptr; - Py_DECREF((PyObject *)arg); - Py_XDECREF(arrif->descr); - _pya_free(arrif->shape); - _pya_free(arrif); -} - -static PyObject * -gentype_struct_get(PyObject *self) -{ - PyArrayObject *arr; - PyArrayInterface *inter; - - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - inter = (PyArrayInterface *)_pya_malloc(sizeof(PyArrayInterface)); - inter->two = 2; - inter->nd = 0; - inter->flags = arr->flags; - inter->flags &= ~(UPDATEIFCOPY | OWNDATA); - inter->flags |= NPY_NOTSWAPPED; - inter->typekind = arr->descr->kind; - inter->itemsize = arr->descr->elsize; - inter->strides = NULL; - inter->shape = NULL; - inter->data = arr->data; - inter->descr = NULL; - - return PyCObject_FromVoidPtrAndDesc(inter, arr, gentype_struct_free); -} - -static PyObject * -gentype_priority_get(PyObject *self) -{ - return PyFloat_FromDouble(NPY_SCALAR_PRIORITY); -} - -static PyObject * -gentype_shape_get(PyObject *self) -{ - return PyTuple_New(0); -} - - -static PyObject * -gentype_interface_get(PyObject *self) -{ - PyArrayObject *arr; - PyObject *inter; - - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; - inter = PyObject_GetAttrString((PyObject *)arr, "__array_interface__"); - Py_DECREF(arr); - return inter; -} - - - -static PyObject * -gentype_typedescr_get(PyObject *self) -{ - return (PyObject *)PyArray_DescrFromScalar(self); -} - - -static PyObject * -gentype_base_get(PyObject *self) -{ - Py_INCREF(Py_None); - return Py_None; -} - - -static PyArray_Descr * -_realdescr_fromcomplexscalar(PyObject *self, int *typenum) -{ - if (PyArray_IsScalar(self, CDouble)) { - *typenum = PyArray_CDOUBLE; - return PyArray_DescrFromType(PyArray_DOUBLE); - } - if (PyArray_IsScalar(self, CFloat)) { - *typenum = PyArray_CFLOAT; - return PyArray_DescrFromType(PyArray_FLOAT); - } - if (PyArray_IsScalar(self, CLongDouble)) { - *typenum = PyArray_CLONGDOUBLE; - return PyArray_DescrFromType(PyArray_LONGDOUBLE); - } - return NULL; -} - -static PyObject * -gentype_real_get(PyObject *self) -{ - PyArray_Descr *typecode; - PyObject *ret; - int typenum; - - if (PyArray_IsScalar(self, ComplexFloating)) { - void *ptr; - typecode = _realdescr_fromcomplexscalar(self, &typenum); - ptr = scalar_value(self, NULL); - ret = PyArray_Scalar(ptr, typecode, NULL); - Py_DECREF(typecode); - return ret; - } - else if (PyArray_IsScalar(self, Object)) { - PyObject *obj = ((PyObjectScalarObject *)self)->obval; - ret = PyObject_GetAttrString(obj, "real"); - if (ret != NULL) return ret; - PyErr_Clear(); - } - Py_INCREF(self); - return (PyObject *)self; -} - -static PyObject * -gentype_imag_get(PyObject *self) -{ - PyArray_Descr *typecode=NULL; - PyObject *ret; - int typenum; - - if (PyArray_IsScalar(self, ComplexFloating)) { - char *ptr; - typecode = _realdescr_fromcomplexscalar(self, &typenum); - ptr = (char *)scalar_value(self, NULL); - ret = PyArray_Scalar(ptr + typecode->elsize, - typecode, NULL); - } - else if (PyArray_IsScalar(self, Object)) { - PyObject *obj = ((PyObjectScalarObject *)self)->obval; - PyArray_Descr *newtype; - ret = PyObject_GetAttrString(obj, "imag"); - if (ret == NULL) { - PyErr_Clear(); - obj = PyInt_FromLong(0); - newtype = PyArray_DescrFromType(PyArray_OBJECT); - ret = PyArray_Scalar((char *)&obj, newtype, NULL); - Py_DECREF(newtype); - Py_DECREF(obj); - } - } - else { - char *temp; - int elsize; - typecode = PyArray_DescrFromScalar(self); - elsize = typecode->elsize; - temp = PyDataMem_NEW(elsize); - memset(temp, '\0', elsize); - ret = PyArray_Scalar(temp, typecode, NULL); - PyDataMem_FREE(temp); - } - - Py_XDECREF(typecode); - return ret; -} - -static PyObject * -gentype_flat_get(PyObject *self) -{ - PyObject *ret, *arr; - - arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; - ret = PyArray_IterNew(arr); - Py_DECREF(arr); - return ret; -} - - -static PyObject * -gentype_transpose_get(PyObject *self) -{ - Py_INCREF(self); - return self; -} - - -static PyGetSetDef gentype_getsets[] = { - {"ndim", - (getter)gentype_ndim_get, - (setter) 0, - "number of array dimensions"}, - {"flags", - (getter)gentype_flags_get, - (setter)0, - "integer value of flags"}, - {"shape", - (getter)gentype_shape_get, - (setter)0, - "tuple of array dimensions"}, - {"strides", - (getter)gentype_shape_get, - (setter) 0, - "tuple of bytes steps in each dimension"}, - {"data", - (getter)gentype_data_get, - (setter) 0, - "pointer to start of data"}, - {"itemsize", - (getter)gentype_itemsize_get, - (setter)0, - "length of one element in bytes"}, - {"size", - (getter)gentype_size_get, - (setter)0, - "number of elements in the gentype"}, - {"nbytes", - (getter)gentype_itemsize_get, - (setter)0, - "length of item in bytes"}, - {"base", - (getter)gentype_base_get, - (setter)0, - "base object"}, - {"dtype", - (getter)gentype_typedescr_get, - NULL, - "get array data-descriptor"}, - {"real", - (getter)gentype_real_get, - (setter)0, - "real part of scalar"}, - {"imag", - (getter)gentype_imag_get, - (setter)0, - "imaginary part of scalar"}, - {"flat", - (getter)gentype_flat_get, - (setter)0, - "a 1-d view of scalar"}, - {"T", - (getter)gentype_transpose_get, - (setter)0, - "transpose"}, - {"__array_interface__", - (getter)gentype_interface_get, - NULL, - "Array protocol: Python side"}, - {"__array_struct__", - (getter)gentype_struct_get, - NULL, - "Array protocol: struct"}, - {"__array_priority__", - (getter)gentype_priority_get, - NULL, - "Array priority."}, - {NULL, NULL, NULL, NULL} /* Sentinel */ -}; - - -/* 0-dim array from scalar object */ - -static char doc_getarray[] = "sc.__array__(|type) return 0-dim array"; - -static PyObject * -gentype_getarray(PyObject *scalar, PyObject *args) -{ - PyArray_Descr *outcode=NULL; - PyObject *ret; - - if (!PyArg_ParseTuple(args, "|O&", &PyArray_DescrConverter, - &outcode)) return NULL; - ret = PyArray_FromScalar(scalar, outcode); - return ret; -} - -static char doc_sc_wraparray[] = "sc.__array_wrap__(obj) return scalar from array"; - -static PyObject * -gentype_wraparray(PyObject *scalar, PyObject *args) -{ - PyObject *arr; - - if (PyTuple_Size(args) < 1) { - PyErr_SetString(PyExc_TypeError, - "only accepts 1 argument."); - return NULL; - } - arr = PyTuple_GET_ITEM(args, 0); - if (!PyArray_Check(arr)) { - PyErr_SetString(PyExc_TypeError, - "can only be called with ndarray object"); - return NULL; - } - - return PyArray_Scalar(PyArray_DATA(arr), PyArray_DESCR(arr), arr); -} - - -/**begin repeat - -#name=tolist, item, tostring, astype, copy, __deepcopy__, searchsorted, view, swapaxes, conj, conjugate, nonzero, flatten, ravel, fill, transpose, newbyteorder# -*/ - -static PyObject * -gentype_@name@(PyObject *self, PyObject *args) -{ - return gentype_generic_method(self, args, NULL, "@name@"); -} -/**end repeat**/ - -static PyObject * -gentype_itemset(PyObject *self, PyObject *args) -{ - PyErr_SetString(PyExc_ValueError, "array-scalars are immutable"); - return NULL; -} - -static PyObject * -gentype_squeeze(PyObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) return NULL; - Py_INCREF(self); - return self; -} - -static Py_ssize_t -gentype_getreadbuf(PyObject *, Py_ssize_t, void **); - -static PyObject * -gentype_byteswap(PyObject *self, PyObject *args) -{ - Bool inplace=FALSE; - - if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) - return NULL; - - if (inplace) { - PyErr_SetString(PyExc_ValueError, - "cannot byteswap a scalar in-place"); - return NULL; - } - else { - /* get the data, copyswap it and pass it to a new Array scalar - */ - char *data; - int numbytes; - PyArray_Descr *descr; - PyObject *new; - char *newmem; - - numbytes = gentype_getreadbuf(self, 0, (void **)&data); - descr = PyArray_DescrFromScalar(self); - newmem = _pya_malloc(descr->elsize); - if (newmem == NULL) {Py_DECREF(descr); return PyErr_NoMemory();} - else memcpy(newmem, data, descr->elsize); - byte_swap_vector(newmem, 1, descr->elsize); - new = PyArray_Scalar(newmem, descr, NULL); - _pya_free(newmem); - Py_DECREF(descr); - return new; - } -} - - -/**begin repeat - -#name=take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, round, argmax, argmin, max, min, ptp, any, all, resize, reshape, choose# -*/ - -static PyObject * -gentype_@name@(PyObject *self, PyObject *args, PyObject *kwds) -{ - return gentype_generic_method(self, args, kwds, "@name@"); -} -/**end repeat**/ - -static PyObject * -voidtype_getfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *ret; - - ret = gentype_generic_method((PyObject *)self, args, kwds, "getfield"); - if (!ret) return ret; - if (PyArray_IsScalar(ret, Generic) && \ - (!PyArray_IsScalar(ret, Void))) { - PyArray_Descr *new; - void *ptr; - if (!PyArray_ISNBO(self->descr->byteorder)) { - new = PyArray_DescrFromScalar(ret); - ptr = scalar_value(ret, new); - byte_swap_vector(ptr, 1, new->elsize); - Py_DECREF(new); - } - } - return ret; -} - -static PyObject * -gentype_setfield(PyObject *self, PyObject *args, PyObject *kwds) -{ - - PyErr_SetString(PyExc_TypeError, - "Can't set fields in a non-void array scalar."); - return NULL; -} - -static PyObject * -voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) -{ - PyArray_Descr *typecode; - int offset = 0; - PyObject *value, *src; - int mysize; - char *dptr; - static char *kwlist[] = {"value", "dtype", "offset", 0}; - - if ((self->flags & WRITEABLE) != WRITEABLE) { - PyErr_SetString(PyExc_RuntimeError, - "Can't write to memory"); - return NULL; - } - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|i", kwlist, - &value, - PyArray_DescrConverter, - &typecode, &offset)) return NULL; - - mysize = self->ob_size; - - if (offset < 0 || (offset + typecode->elsize) > mysize) { - PyErr_Format(PyExc_ValueError, - "Need 0 <= offset <= %d for requested type " \ - "but received offset = %d", - mysize-typecode->elsize, offset); - Py_DECREF(typecode); - return NULL; - } - - dptr = self->obval + offset; - - if (typecode->type_num == PyArray_OBJECT) { - PyObject **temp; - Py_INCREF(value); - temp = (PyObject **)dptr; - Py_XDECREF(*temp); - memcpy(temp, &value, sizeof(PyObject *)); - } - else { - /* Copy data from value to correct place in dptr */ - src = PyArray_FromAny(value, typecode, 0, 0, CARRAY, NULL); - if (src == NULL) return NULL; - typecode->f->copyswap(dptr, PyArray_DATA(src), - !PyArray_ISNBO(self->descr->byteorder), - src); - Py_DECREF(src); - } - Py_INCREF(Py_None); - return Py_None; -} - - -static PyObject * -gentype_reduce(PyObject *self, PyObject *args) -{ - PyObject *ret=NULL, *obj=NULL, *mod=NULL; - const char *buffer; - Py_ssize_t buflen; - - /* Return a tuple of (callable object, arguments) */ - - ret = PyTuple_New(2); - if (ret == NULL) return NULL; - if (PyObject_AsReadBuffer(self, (const void **)&buffer, &buflen)<0) { - Py_DECREF(ret); return NULL; - } - mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) return NULL; - obj = PyObject_GetAttrString(mod, "scalar"); - Py_DECREF(mod); - if (obj == NULL) return NULL; - PyTuple_SET_ITEM(ret, 0, obj); - obj = PyObject_GetAttrString((PyObject *)self, "dtype"); - if (PyArray_IsScalar(self, Object)) { - mod = ((PyObjectScalarObject *)self)->obval; - PyTuple_SET_ITEM(ret, 1, - Py_BuildValue("NO", obj, mod)); - } - else { -#ifndef Py_UNICODE_WIDE - /* We need to expand the buffer so that we always write - UCS4 to disk for pickle of unicode scalars. - - This could be in a unicode_reduce function, but - that would require re-factoring. - */ - int alloc=0; - char *tmp; - int newlen; - - if (PyArray_IsScalar(self, Unicode)) { - tmp = _pya_malloc(buflen*2); - if (tmp == NULL) { - Py_DECREF(ret); - return PyErr_NoMemory(); - } - alloc = 1; - newlen = PyUCS2Buffer_AsUCS4((Py_UNICODE *)buffer, - (PyArray_UCS4 *)tmp, - buflen / 2, buflen / 2); - buflen = newlen*4; - buffer = tmp; - } -#endif - mod = PyString_FromStringAndSize(buffer, buflen); - if (mod == NULL) { - Py_DECREF(ret); -#ifndef Py_UNICODE_WIDE - ret = NULL; - goto fail; -#else - return NULL; -#endif - } - PyTuple_SET_ITEM(ret, 1, - Py_BuildValue("NN", obj, mod)); -#ifndef Py_UNICODE_WIDE - fail: - if (alloc) _pya_free((char *)buffer); -#endif - } - return ret; -} - -/* ignores everything */ -static PyObject * -gentype_setstate(PyObject *self, PyObject *args) -{ - Py_INCREF(Py_None); - return (Py_None); -} - -static PyObject * -gentype_dump(PyObject *self, PyObject *args) -{ - PyObject *file=NULL; - int ret; - - if (!PyArg_ParseTuple(args, "O", &file)) - return NULL; - ret = PyArray_Dump(self, file, 2); - if (ret < 0) return NULL; - Py_INCREF(Py_None); - return Py_None; -} - -static PyObject * -gentype_dumps(PyObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) - return NULL; - return PyArray_Dumps(self, 2); -} - - -/* setting flags cannot be done for scalars */ -static PyObject * -gentype_setflags(PyObject *self, PyObject *args, PyObject *kwds) -{ - Py_INCREF(Py_None); - return Py_None; -} - -/* need to fill in doc-strings for these methods on import -- copy from - array docstrings -*/ -static PyMethodDef gentype_methods[] = { - {"tolist", (PyCFunction)gentype_tolist, 1, NULL}, - {"item", (PyCFunction)gentype_item, METH_VARARGS, NULL}, - {"itemset", (PyCFunction)gentype_itemset, METH_VARARGS, NULL}, - {"tofile", (PyCFunction)gentype_tofile, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"tostring", (PyCFunction)gentype_tostring, METH_VARARGS, NULL}, - {"byteswap", (PyCFunction)gentype_byteswap,1, NULL}, - {"astype", (PyCFunction)gentype_astype, 1, NULL}, - {"getfield", (PyCFunction)gentype_getfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"setfield", (PyCFunction)gentype_setfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"copy", (PyCFunction)gentype_copy, 1, NULL}, - {"resize", (PyCFunction)gentype_resize, - METH_VARARGS|METH_KEYWORDS, NULL}, - - {"__array__", (PyCFunction)gentype_getarray, 1, doc_getarray}, - {"__array_wrap__", (PyCFunction)gentype_wraparray, 1, doc_sc_wraparray}, - - /* for the copy module */ - {"__copy__", (PyCFunction)gentype_copy, 1, NULL}, - {"__deepcopy__", (PyCFunction)gentype___deepcopy__, 1, NULL}, - - - {"__reduce__", (PyCFunction) gentype_reduce, 1, NULL}, - /* For consistency does nothing */ - {"__setstate__", (PyCFunction) gentype_setstate, 1, NULL}, - - {"dumps", (PyCFunction) gentype_dumps, 1, NULL}, - {"dump", (PyCFunction) gentype_dump, 1, NULL}, - - /* Methods for array */ - {"fill", (PyCFunction)gentype_fill, - METH_VARARGS, NULL}, - {"transpose", (PyCFunction)gentype_transpose, - METH_VARARGS, NULL}, - {"take", (PyCFunction)gentype_take, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"put", (PyCFunction)gentype_put, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"repeat", (PyCFunction)gentype_repeat, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"choose", (PyCFunction)gentype_choose, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"sort", (PyCFunction)gentype_sort, - METH_VARARGS, NULL}, - {"argsort", (PyCFunction)gentype_argsort, - METH_VARARGS, NULL}, - {"searchsorted", (PyCFunction)gentype_searchsorted, - METH_VARARGS, NULL}, - {"argmax", (PyCFunction)gentype_argmax, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"argmin", (PyCFunction)gentype_argmin, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"reshape", (PyCFunction)gentype_reshape, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"squeeze", (PyCFunction)gentype_squeeze, - METH_VARARGS, NULL}, - {"view", (PyCFunction)gentype_view, - METH_VARARGS, NULL}, - {"swapaxes", (PyCFunction)gentype_swapaxes, - METH_VARARGS, NULL}, - {"max", (PyCFunction)gentype_max, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"min", (PyCFunction)gentype_min, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"ptp", (PyCFunction)gentype_ptp, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"mean", (PyCFunction)gentype_mean, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"trace", (PyCFunction)gentype_trace, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"diagonal", (PyCFunction)gentype_diagonal, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"clip", (PyCFunction)gentype_clip, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"conj", (PyCFunction)gentype_conj, - METH_VARARGS, NULL}, - {"conjugate", (PyCFunction)gentype_conjugate, - METH_VARARGS, NULL}, - {"nonzero", (PyCFunction)gentype_nonzero, - METH_VARARGS, NULL}, - {"std", (PyCFunction)gentype_std, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"var", (PyCFunction)gentype_var, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"sum", (PyCFunction)gentype_sum, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"cumsum", (PyCFunction)gentype_cumsum, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"prod", (PyCFunction)gentype_prod, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"cumprod", (PyCFunction)gentype_cumprod, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"all", (PyCFunction)gentype_all, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"any", (PyCFunction)gentype_any, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"compress", (PyCFunction)gentype_compress, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"flatten", (PyCFunction)gentype_flatten, - METH_VARARGS, NULL}, - {"ravel", (PyCFunction)gentype_ravel, - METH_VARARGS, NULL}, - {"round", (PyCFunction)gentype_round, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"setflags", (PyCFunction)gentype_setflags, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"newbyteorder", (PyCFunction)gentype_newbyteorder, - METH_VARARGS, NULL}, - {NULL, NULL} /* sentinel */ -}; - - -static PyGetSetDef voidtype_getsets[] = { - {"flags", - (getter)voidtype_flags_get, - (setter)0, - "integer value of flags"}, - {"dtype", - (getter)voidtype_dtypedescr_get, - (setter)0, - "dtype object"}, - {NULL, NULL} -}; - -static PyMethodDef voidtype_methods[] = { - {"getfield", (PyCFunction)voidtype_getfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"setfield", (PyCFunction)voidtype_setfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {NULL, NULL} -}; - -/************* As_mapping functions for void array scalar ************/ - -static Py_ssize_t -voidtype_length(PyVoidScalarObject *self) -{ - if (!self->descr->names) { - return 0; - } - else { /* return the number of fields */ - return (Py_ssize_t) PyTuple_GET_SIZE(self->descr->names); - } -} - -static PyObject * -voidtype_item(PyVoidScalarObject *self, Py_ssize_t n) -{ - intp m; - PyObject *flist=NULL, *fieldinfo; - - if (!(PyDescr_HASFIELDS(self->descr))) { - PyErr_SetString(PyExc_IndexError, - "can't index void scalar without fields"); - return NULL; - } - flist = self->descr->names; - m = PyTuple_GET_SIZE(flist); - if (n < 0) n += m; - if (n < 0 || n >= m) { - PyErr_Format(PyExc_IndexError, "invalid index (%d)", (int) n); - return NULL; - } - fieldinfo = PyDict_GetItem(self->descr->fields, - PyTuple_GET_ITEM(flist, n)); - return voidtype_getfield(self, fieldinfo, NULL); -} - - -/* get field by name or number */ -static PyObject * -voidtype_subscript(PyVoidScalarObject *self, PyObject *ind) -{ - intp n; - PyObject *fieldinfo; - - if (!(PyDescr_HASFIELDS(self->descr))) { - PyErr_SetString(PyExc_IndexError, - "can't index void scalar without fields"); - return NULL; - } - - if (PyString_Check(ind) || PyUnicode_Check(ind)) { - /* look up in fields */ - fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) goto fail; - return voidtype_getfield(self, fieldinfo, NULL); - } - - /* try to convert it to a number */ - n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) goto fail; - - return voidtype_item(self, (Py_ssize_t)n); - - fail: - PyErr_SetString(PyExc_IndexError, "invalid index"); - return NULL; - -} - -static int -voidtype_ass_item(PyVoidScalarObject *self, Py_ssize_t n, PyObject *val) -{ - intp m; - PyObject *flist=NULL, *fieldinfo, *newtup; - PyObject *res; - - if (!(PyDescr_HASFIELDS(self->descr))) { - PyErr_SetString(PyExc_IndexError, - "can't index void scalar without fields"); - return -1; - } - - flist = self->descr->names; - m = PyTuple_GET_SIZE(flist); - if (n < 0) n += m; - if (n < 0 || n >= m) goto fail; - fieldinfo = PyDict_GetItem(self->descr->fields, - PyTuple_GET_ITEM(flist, n)); - newtup = Py_BuildValue("(OOO)", val, - PyTuple_GET_ITEM(fieldinfo, 0), - PyTuple_GET_ITEM(fieldinfo, 1)); - res = voidtype_setfield(self, newtup, NULL); - Py_DECREF(newtup); - if (!res) return -1; - Py_DECREF(res); - return 0; - - fail: - PyErr_Format(PyExc_IndexError, "invalid index (%d)", (int) n); - return -1; - -} - -static int -voidtype_ass_subscript(PyVoidScalarObject *self, PyObject *ind, PyObject *val) -{ - intp n; - char *msg = "invalid index"; - PyObject *fieldinfo, *newtup; - PyObject *res; - - if (!PyDescr_HASFIELDS(self->descr)) { - PyErr_SetString(PyExc_IndexError, - "can't index void scalar without fields"); - return -1; - } - - if (PyString_Check(ind) || PyUnicode_Check(ind)) { - /* look up in fields */ - fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) goto fail; - newtup = Py_BuildValue("(OOO)", val, - PyTuple_GET_ITEM(fieldinfo, 0), - PyTuple_GET_ITEM(fieldinfo, 1)); - res = voidtype_setfield(self, newtup, NULL); - Py_DECREF(newtup); - if (!res) return -1; - Py_DECREF(res); - return 0; - } - - /* try to convert it to a number */ - n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) goto fail; - return voidtype_ass_item(self, (Py_ssize_t)n, val); - - fail: - PyErr_SetString(PyExc_IndexError, msg); - return -1; -} - -static PyMappingMethods voidtype_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)voidtype_length, /*mp_length*/ -#else - (inquiry)voidtype_length, /*mp_length*/ -#endif - (binaryfunc)voidtype_subscript, /*mp_subscript*/ - (objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/ -}; - - -static PySequenceMethods voidtype_as_sequence = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)voidtype_length, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - (ssizeargfunc)voidtype_item, /*sq_item*/ - 0, /*sq_slice*/ - (ssizeobjargproc)voidtype_ass_item /*sq_ass_item*/ -#else - (inquiry)voidtype_length, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - (intargfunc)voidtype_item, /*sq_item*/ - 0, /*sq_slice*/ - (intobjargproc)voidtype_ass_item /*sq_ass_item*/ -#endif -}; - - - -static Py_ssize_t -gentype_getreadbuf(PyObject *self, Py_ssize_t segment, void **ptrptr) -{ - int numbytes; - PyArray_Descr *outcode; - - if (segment != 0) { - PyErr_SetString(PyExc_SystemError, - "Accessing non-existent array segment"); - return -1; - } - - outcode = PyArray_DescrFromScalar(self); - numbytes = outcode->elsize; - *ptrptr = (void *)scalar_value(self, outcode); - -#ifndef Py_UNICODE_WIDE - if (outcode->type_num == NPY_UNICODE) { - numbytes >>= 1; - } -#endif - Py_DECREF(outcode); - return numbytes; -} - -static Py_ssize_t -gentype_getsegcount(PyObject *self, Py_ssize_t *lenp) -{ - PyArray_Descr *outcode; - - outcode = PyArray_DescrFromScalar(self); - if (lenp) { - *lenp = outcode->elsize; -#ifndef Py_UNICODE_WIDE - if (outcode->type_num == NPY_UNICODE) { - *lenp >>= 1; - } -#endif - } - Py_DECREF(outcode); - return 1; -} - -static Py_ssize_t -gentype_getcharbuf(PyObject *self, Py_ssize_t segment, constchar **ptrptr) -{ - if (PyArray_IsScalar(self, String) || \ - PyArray_IsScalar(self, Unicode)) - return gentype_getreadbuf(self, segment, (void **)ptrptr); - else { - PyErr_SetString(PyExc_TypeError, - "Non-character array cannot be interpreted "\ - "as character buffer."); - return -1; - } -} - - -static PyBufferProcs gentype_as_buffer = { - gentype_getreadbuf, /*bf_getreadbuffer*/ - NULL, /*bf_getwritebuffer*/ - gentype_getsegcount, /*bf_getsegcount*/ - gentype_getcharbuf, /*bf_getcharbuffer*/ -}; - - -#define BASEFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES -#define LEAFFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES - -static PyTypeObject PyGenericArrType_Type = { - PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.generic", /*tp_name*/ - sizeof(PyObject), /*tp_basicsize*/ -}; - -static void -void_dealloc(PyVoidScalarObject *v) -{ - if (v->flags & OWNDATA) - PyDataMem_FREE(v->obval); - Py_XDECREF(v->descr); - Py_XDECREF(v->base); - v->ob_type->tp_free(v); -} - -static void -object_arrtype_dealloc(PyObject *v) -{ - Py_XDECREF(((PyObjectScalarObject *)v)->obval); - v->ob_type->tp_free(v); -} - -/* string and unicode inherit from Python Type first and so GET_ITEM is different to get to the Python Type. - */ -/* ok is a work-around for a bug in complex_new that doesn't allocate - memory from the sub-types memory allocator. -*/ - -#define _WORK(num) \ - if (type->tp_bases && (PyTuple_GET_SIZE(type->tp_bases)==2)) { \ - PyTypeObject *sup; \ - /* We are inheriting from a Python type as well so \ - give it first dibs on conversion */ \ - sup = (PyTypeObject *)PyTuple_GET_ITEM(type->tp_bases, num); \ - robj = sup->tp_new(type, args, kwds); \ - if (robj != NULL) goto finish; \ - if (PyTuple_GET_SIZE(args)!=1) return NULL; \ - PyErr_Clear(); \ - /* now do default conversion */ \ - } - -#define _WORK1 _WORK(1) -#define _WORKz _WORK(0) -#define _WORK0 - -/**begin repeat1 -#name=byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, string, unicode, object# -#TYPE=BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, OBJECT# -#work=0,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,z,z,1# -#default=0*16,1*2,2# -*/ -static PyObject * -@name@_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds) -{ - PyObject *obj=NULL; - PyObject *robj; - PyObject *arr; - PyArray_Descr *typecode=NULL; - int itemsize; - void *dest, *src; - - _WORK@work@ - - if (!PyArg_ParseTuple(args, "|O", &obj)) return NULL; - - typecode = PyArray_DescrFromType(PyArray_@TYPE@); - Py_INCREF(typecode); - if (obj == NULL) { -#if @default@ == 0 - char *mem; - mem = malloc(sizeof(@name@)); - memset(mem, 0, sizeof(@name@)); - robj = PyArray_Scalar(mem, typecode, NULL); - free(mem); -#elif @default@ == 1 - robj = PyArray_Scalar(NULL, typecode, NULL); -#elif @default@ == 2 - obj = Py_None; - robj = PyArray_Scalar(&obj, typecode, NULL); -#endif - goto finish; - } - - arr = PyArray_FromAny(obj, typecode, 0, 0, FORCECAST, NULL); - if ((arr==NULL) || (PyArray_NDIM(arr) > 0)) return arr; - robj = PyArray_Return((PyArrayObject *)arr); - - finish: - if ((robj==NULL) || (robj->ob_type == type)) return robj; - /* Need to allocate new type and copy data-area over */ - if (type->tp_itemsize) { - itemsize = PyString_GET_SIZE(robj); - } - else itemsize = 0; - obj = type->tp_alloc(type, itemsize); - if (obj == NULL) {Py_DECREF(robj); return NULL;} - if (typecode==NULL) - typecode = PyArray_DescrFromType(PyArray_@TYPE@); - dest = scalar_value(obj, typecode); - src = scalar_value(robj, typecode); - Py_DECREF(typecode); -#if @default@ == 0 - *((npy_@name@ *)dest) = *((npy_@name@ *)src); -#elif @default@ == 1 - if (itemsize == 0) { - itemsize = ((PyUnicodeObject *)robj)->length << 2; - } - memcpy(dest, src, itemsize); -#elif @default@ == 2 - memcpy(dest, src, sizeof(void *)); - Py_INCREF(*((PyObject **)dest)); -#endif - Py_DECREF(robj); - return obj; -} -/**end repeat**/ - -#undef _WORK1 -#undef _WORKz -#undef _WORK0 -#undef _WORK - -/* bool->tp_new only returns Py_True or Py_False */ -static PyObject * -bool_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds) -{ - PyObject *obj=NULL; - PyObject *arr; - - if (!PyArg_ParseTuple(args, "|O", &obj)) return NULL; - if (obj == NULL) - PyArrayScalar_RETURN_FALSE; - if (obj == Py_False) - PyArrayScalar_RETURN_FALSE; - if (obj == Py_True) - PyArrayScalar_RETURN_TRUE; - arr = PyArray_FROM_OTF(obj, PyArray_BOOL, FORCECAST); - if (arr && 0 == PyArray_NDIM(arr)) { - Bool val = *((Bool *)PyArray_DATA(arr)); - Py_DECREF(arr); - PyArrayScalar_RETURN_BOOL_FROM_LONG(val); - } - return PyArray_Return((PyArrayObject *)arr); -} - -static PyObject * -bool_arrtype_and(PyObject *a, PyObject *b) -{ - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) - PyArrayScalar_RETURN_BOOL_FROM_LONG - ((a == PyArrayScalar_True)&(b == PyArrayScalar_True)); - return PyGenericArrType_Type.tp_as_number->nb_and(a, b); -} - -static PyObject * -bool_arrtype_or(PyObject *a, PyObject *b) -{ - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) - PyArrayScalar_RETURN_BOOL_FROM_LONG - ((a == PyArrayScalar_True)|(b == PyArrayScalar_True)); - return PyGenericArrType_Type.tp_as_number->nb_or(a, b); -} - -static PyObject * -bool_arrtype_xor(PyObject *a, PyObject *b) -{ - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) - PyArrayScalar_RETURN_BOOL_FROM_LONG - ((a == PyArrayScalar_True)^(b == PyArrayScalar_True)); - return PyGenericArrType_Type.tp_as_number->nb_xor(a, b); -} - -static int -bool_arrtype_nonzero(PyObject *a) -{ - return a == PyArrayScalar_True; -} - -#if PY_VERSION_HEX >= 0x02050000 -/**begin repeat -#name=byte, short, int, long, ubyte, ushort, longlong, uint, ulong, ulonglong# -#Name=Byte, Short, Int, Long, UByte, UShort, LongLong, UInt, ULong, ULongLong# -#type=PyInt_FromLong*6, PyLong_FromLongLong*1, PyLong_FromUnsignedLong*2, PyLong_FromUnsignedLongLong# -*/ -static PyNumberMethods @name@_arrtype_as_number; -static PyObject * -@name@_index(PyObject *self) -{ - return @type@(PyArrayScalar_VAL(self, @Name@)); -} -/**end repeat**/ -static PyObject * -bool_index(PyObject *a) -{ - return PyInt_FromLong(PyArrayScalar_VAL(a, Bool)); -} -#endif - -/* Arithmetic methods -- only so we can override &, |, ^. */ -static PyNumberMethods bool_arrtype_as_number = { - 0, /* nb_add */ - 0, /* nb_subtract */ - 0, /* nb_multiply */ - 0, /* nb_divide */ - 0, /* nb_remainder */ - 0, /* nb_divmod */ - 0, /* nb_power */ - 0, /* nb_negative */ - 0, /* nb_positive */ - 0, /* nb_absolute */ - (inquiry)bool_arrtype_nonzero, /* nb_nonzero */ - 0, /* nb_invert */ - 0, /* nb_lshift */ - 0, /* nb_rshift */ - (binaryfunc)bool_arrtype_and, /* nb_and */ - (binaryfunc)bool_arrtype_xor, /* nb_xor */ - (binaryfunc)bool_arrtype_or, /* nb_or */ -}; - -static PyObject * -void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds) -{ - PyObject *obj, *arr; - ulonglong memu=1; - PyObject *new=NULL; - char *destptr; - - if (!PyArg_ParseTuple(args, "O", &obj)) return NULL; - /* For a VOID scalar first see if obj is an integer or long - and create new memory of that size (filled with 0) for the scalar - */ - - if (PyLong_Check(obj) || PyInt_Check(obj) || \ - PyArray_IsScalar(obj, Integer) || - (PyArray_Check(obj) && PyArray_NDIM(obj)==0 && \ - PyArray_ISINTEGER(obj))) { - new = obj->ob_type->tp_as_number->nb_long(obj); - } - if (new && PyLong_Check(new)) { - PyObject *ret; - memu = PyLong_AsUnsignedLongLong(new); - Py_DECREF(new); - if (PyErr_Occurred() || (memu > MAX_INT)) { - PyErr_Clear(); - PyErr_Format(PyExc_OverflowError, - "size must be smaller than %d", - (int) MAX_INT); - return NULL; - } - destptr = PyDataMem_NEW((int) memu); - if (destptr == NULL) return PyErr_NoMemory(); - ret = type->tp_alloc(type, 0); - if (ret == NULL) { - PyDataMem_FREE(destptr); - return PyErr_NoMemory(); - } - ((PyVoidScalarObject *)ret)->obval = destptr; - ((PyVoidScalarObject *)ret)->ob_size = (int) memu; - ((PyVoidScalarObject *)ret)->descr = \ - PyArray_DescrNewFromType(PyArray_VOID); - ((PyVoidScalarObject *)ret)->descr->elsize = (int) memu; - ((PyVoidScalarObject *)ret)->flags = BEHAVED | OWNDATA; - ((PyVoidScalarObject *)ret)->base = NULL; - memset(destptr, '\0', (size_t) memu); - return ret; - } - - arr = PyArray_FROM_OTF(obj, PyArray_VOID, FORCECAST); - return PyArray_Return((PyArrayObject *)arr); -} - - -/**************** Define Hash functions ********************/ - -/**begin repeat -#lname=bool,ubyte,ushort# -#name=Bool,UByte, UShort# - */ -static long -@lname@_arrtype_hash(PyObject *obj) -{ - return (long)(((Py@name@ScalarObject *)obj)->obval); -} -/**end repeat**/ - -/**begin repeat -#lname=byte,short,uint,ulong# -#name=Byte,Short,UInt,ULong# - */ -static long -@lname@_arrtype_hash(PyObject *obj) -{ - long x = (long)(((Py@name@ScalarObject *)obj)->obval); - if (x == -1) x=-2; - return x; -} -/**end repeat**/ - -#if SIZEOF_INT != SIZEOF_LONG -static long -int_arrtype_hash(PyObject *obj) -{ - long x = (long)(((PyIntScalarObject *)obj)->obval); - if (x == -1) x=-2; - return x; -} -#endif - -/**begin repeat -#char=,u# -#Char=,U# -#ext=&& (x >= LONG_MIN),# -*/ -#if SIZEOF_LONG != SIZEOF_LONGLONG -/* we assume SIZEOF_LONGLONG=2*SIZEOF_LONG */ -static long -@char@longlong_arrtype_hash(PyObject *obj) -{ - long y; - @char@longlong x = (((Py@Char@LongLongScalarObject *)obj)->obval); - - if ((x <= LONG_MAX)@ext@) { - y = (long) x; - } - else { - union Mask { - long hashvals[2]; - @char@longlong v; - } both; - - both.v = x; - y = both.hashvals[0] + (1000003)*both.hashvals[1]; - } - if (y == -1) y = -2; - return y; -} -#endif -/**end repeat**/ - -#if SIZEOF_LONG==SIZEOF_LONGLONG -static long -ulonglong_arrtype_hash(PyObject *obj) -{ - long x = (long)(((PyULongLongScalarObject *)obj)->obval); - if (x == -1) x=-2; - return x; -} -#endif - - - -/* Wrong thing to do for longdouble, but....*/ -/**begin repeat -#lname=float, longdouble# -#name=Float, LongDouble# - */ -static long -@lname@_arrtype_hash(PyObject *obj) -{ - return _Py_HashDouble((double) ((Py@name@ScalarObject *)obj)->obval); -} - -/* borrowed from complex_hash */ -static long -c@lname@_arrtype_hash(PyObject *obj) -{ - long hashreal, hashimag, combined; - hashreal = _Py_HashDouble((double) \ - (((PyC@name@ScalarObject *)obj)->obval).real); - - if (hashreal == -1) return -1; - hashimag = _Py_HashDouble((double) \ - (((PyC@name@ScalarObject *)obj)->obval).imag); - if (hashimag == -1) return -1; - - combined = hashreal + 1000003 * hashimag; - if (combined == -1) combined = -2; - return combined; -} -/**end repeat**/ - -static long -object_arrtype_hash(PyObject *obj) -{ - return PyObject_Hash(((PyObjectScalarObject *)obj)->obval); -} - -/* just hash the pointer */ -static long -void_arrtype_hash(PyObject *obj) -{ - return _Py_HashPointer((void *)(((PyVoidScalarObject *)obj)->obval)); -} - -/*object arrtype getattro and setattro */ -static PyObject * -object_arrtype_getattro(PyObjectScalarObject *obj, PyObject *attr) { - PyObject *res; - - /* first look in object and then hand off to generic type */ - - res = PyObject_GenericGetAttr(obj->obval, attr); - if (res) return res; - PyErr_Clear(); - return PyObject_GenericGetAttr((PyObject *)obj, attr); -} - -static int -object_arrtype_setattro(PyObjectScalarObject *obj, PyObject *attr, PyObject *val) { - int res; - /* first look in object and then hand off to generic type */ - - res = PyObject_GenericSetAttr(obj->obval, attr, val); - if (res >= 0) return res; - PyErr_Clear(); - return PyObject_GenericSetAttr((PyObject *)obj, attr, val); -} - -static PyObject * -object_arrtype_concat(PyObjectScalarObject *self, PyObject *other) -{ - return PySequence_Concat(self->obval, other); -} - -static Py_ssize_t -object_arrtype_length(PyObjectScalarObject *self) -{ - return PyObject_Length(self->obval); -} - -static PyObject * -object_arrtype_repeat(PyObjectScalarObject *self, Py_ssize_t count) -{ - return PySequence_Repeat(self->obval, count); -} - -static PyObject * -object_arrtype_subscript(PyObjectScalarObject *self, PyObject *key) -{ - return PyObject_GetItem(self->obval, key); -} - -static int -object_arrtype_ass_subscript(PyObjectScalarObject *self, PyObject *key, - PyObject *value) -{ - return PyObject_SetItem(self->obval, key, value); -} - -static int -object_arrtype_contains(PyObjectScalarObject *self, PyObject *ob) -{ - return PySequence_Contains(self->obval, ob); -} - -static PyObject * -object_arrtype_inplace_concat(PyObjectScalarObject *self, PyObject *o) -{ - return PySequence_InPlaceConcat(self->obval, o); -} - -static PyObject * -object_arrtype_inplace_repeat(PyObjectScalarObject *self, Py_ssize_t count) -{ - return PySequence_InPlaceRepeat(self->obval, count); -} - -static PySequenceMethods object_arrtype_as_sequence = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)object_arrtype_length, /*sq_length*/ - (binaryfunc)object_arrtype_concat, /*sq_concat*/ - (ssizeargfunc)object_arrtype_repeat, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - (objobjproc)object_arrtype_contains, /* sq_contains */ - (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ - (ssizeargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ -#else - (inquiry)object_arrtype_length, /*sq_length*/ - (binaryfunc)object_arrtype_concat, /*sq_concat*/ - (intargfunc)object_arrtype_repeat, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - (objobjproc)object_arrtype_contains, /* sq_contains */ - (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ - (intargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ -#endif -}; - -static PyMappingMethods object_arrtype_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)object_arrtype_length, - (binaryfunc)object_arrtype_subscript, - (objobjargproc)object_arrtype_ass_subscript, -#else - (inquiry)object_arrtype_length, - (binaryfunc)object_arrtype_subscript, - (objobjargproc)object_arrtype_ass_subscript, -#endif -}; - -static Py_ssize_t -object_arrtype_getsegcount(PyObjectScalarObject *self, Py_ssize_t *lenp) -{ - Py_ssize_t newlen; - int cnt; - PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - - if (pb == NULL || \ - pb->bf_getsegcount == NULL || \ - (cnt = (*pb->bf_getsegcount)(self->obval, &newlen)) != 1) - return 0; - - if (lenp) - *lenp = newlen; - - return cnt; -} - -static Py_ssize_t -object_arrtype_getreadbuf(PyObjectScalarObject *self, Py_ssize_t segment, void **ptrptr) -{ - PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - - if (pb == NULL || \ - pb->bf_getreadbuffer == NULL || - pb->bf_getsegcount == NULL) { - PyErr_SetString(PyExc_TypeError, - "expected a readable buffer object"); - return -1; - } - - return (*pb->bf_getreadbuffer)(self->obval, segment, ptrptr); -} - -static Py_ssize_t -object_arrtype_getwritebuf(PyObjectScalarObject *self, Py_ssize_t segment, void **ptrptr) -{ - PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - - if (pb == NULL || \ - pb->bf_getwritebuffer == NULL || - pb->bf_getsegcount == NULL) { - PyErr_SetString(PyExc_TypeError, - "expected a writeable buffer object"); - return -1; - } - - return (*pb->bf_getwritebuffer)(self->obval, segment, ptrptr); -} - -static Py_ssize_t -object_arrtype_getcharbuf(PyObjectScalarObject *self, Py_ssize_t segment, - constchar **ptrptr) -{ - PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - - if (pb == NULL || \ - pb->bf_getcharbuffer == NULL || - pb->bf_getsegcount == NULL) { - PyErr_SetString(PyExc_TypeError, - "expected a character buffer object"); - return -1; - } - - return (*pb->bf_getcharbuffer)(self->obval, segment, ptrptr); -} - -static PyBufferProcs object_arrtype_as_buffer = { -#if PY_VERSION_HEX >= 0x02050000 - (readbufferproc)object_arrtype_getreadbuf, - (writebufferproc)object_arrtype_getwritebuf, - (segcountproc)object_arrtype_getsegcount, - (charbufferproc)object_arrtype_getcharbuf, -#else - (getreadbufferproc)object_arrtype_getreadbuf, - (getwritebufferproc)object_arrtype_getwritebuf, - (getsegcountproc)object_arrtype_getsegcount, - (getcharbufferproc)object_arrtype_getcharbuf, -#endif -}; - -static PyObject * -object_arrtype_call(PyObjectScalarObject *obj, PyObject *args, PyObject *kwds) -{ - return PyObject_Call(obj->obval, args, kwds); -} - -static PyTypeObject PyObjectArrType_Type = { - PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.object_", /*tp_name*/ - sizeof(PyObjectScalarObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ - (destructor)object_arrtype_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - &object_arrtype_as_sequence, /* tp_as_sequence */ - &object_arrtype_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - (ternaryfunc)object_arrtype_call, /* tp_call */ - 0, /* tp_str */ - (getattrofunc)object_arrtype_getattro, /* tp_getattro */ - (setattrofunc)object_arrtype_setattro, /* tp_setattro */ - &object_arrtype_as_buffer, /* tp_as_buffer */ - 0, /* tp_flags */ -}; - -/**begin repeat -#name=bool, string, unicode, void# -#NAME=Bool, String, Unicode, Void# -#ex=_,_,_,# -*/ -static PyTypeObject Py@NAME@ArrType_Type = { - PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.@name@@ex@", /*tp_name*/ - sizeof(Py@NAME@ScalarObject), /*tp_basicsize*/ -}; -/**end repeat**/ - -/**begin repeat -#NAME=Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble# -#name=int*5, uint*5, float*3# -#CNAME=(CHAR, SHORT, INT, LONG, LONGLONG)*2, FLOAT, DOUBLE, LONGDOUBLE# -*/ -#if BITSOF_@CNAME@ == 8 -#define _THIS_SIZE "8" -#elif BITSOF_@CNAME@ == 16 -#define _THIS_SIZE "16" -#elif BITSOF_@CNAME@ == 32 -#define _THIS_SIZE "32" -#elif BITSOF_@CNAME@ == 64 -#define _THIS_SIZE "64" -#elif BITSOF_@CNAME@ == 80 -#define _THIS_SIZE "80" -#elif BITSOF_@CNAME@ == 96 -#define _THIS_SIZE "96" -#elif BITSOF_@CNAME@ == 128 -#define _THIS_SIZE "128" -#elif BITSOF_@CNAME@ == 256 -#define _THIS_SIZE "256" -#endif -static PyTypeObject Py@NAME@ArrType_Type = { - PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.@name@" _THIS_SIZE, /*tp_name*/ - sizeof(Py@NAME@ScalarObject), /*tp_basicsize*/ -}; - -#undef _THIS_SIZE -/**end repeat**/ - -/**begin repeat -#NAME=CFloat, CDouble, CLongDouble# -#name=complex*3# -#CNAME=FLOAT, DOUBLE, LONGDOUBLE# -*/ -#if BITSOF_@CNAME@ == 16 -#define _THIS_SIZE2 "16" -#define _THIS_SIZE1 "32" -#elif BITSOF_@CNAME@ == 32 -#define _THIS_SIZE2 "32" -#define _THIS_SIZE1 "64" -#elif BITSOF_@CNAME@ == 64 -#define _THIS_SIZE2 "64" -#define _THIS_SIZE1 "128" -#elif BITSOF_@CNAME@ == 80 -#define _THIS_SIZE2 "80" -#define _THIS_SIZE1 "160" -#elif BITSOF_@CNAME@ == 96 -#define _THIS_SIZE2 "96" -#define _THIS_SIZE1 "192" -#elif BITSOF_@CNAME@ == 128 -#define _THIS_SIZE2 "128" -#define _THIS_SIZE1 "256" -#elif BITSOF_@CNAME@ == 256 -#define _THIS_SIZE2 "256" -#define _THIS_SIZE1 "512" -#endif -static PyTypeObject Py@NAME@ArrType_Type = { - PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.@name@" _THIS_SIZE1, /*tp_name*/ - sizeof(Py@NAME@ScalarObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - 0, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash */ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT, /*tp_flags*/ - "Composed of two " _THIS_SIZE2 " bit floats", /* tp_doc */ -}; -#undef _THIS_SIZE1 -#undef _THIS_SIZE2 - -/**end repeat**/ - - - -static PyNumberMethods longdoubletype_as_number; -static PyNumberMethods clongdoubletype_as_number; - - -static void -initialize_numeric_types(void) -{ - PyGenericArrType_Type.tp_dealloc = (destructor)gentype_dealloc; - PyGenericArrType_Type.tp_as_number = &gentype_as_number; - PyGenericArrType_Type.tp_as_buffer = &gentype_as_buffer; - PyGenericArrType_Type.tp_flags = BASEFLAGS; - PyGenericArrType_Type.tp_methods = gentype_methods; - PyGenericArrType_Type.tp_getset = gentype_getsets; - PyGenericArrType_Type.tp_new = NULL; - PyGenericArrType_Type.tp_alloc = gentype_alloc; - PyGenericArrType_Type.tp_free = _pya_free; - PyGenericArrType_Type.tp_repr = gentype_repr; - PyGenericArrType_Type.tp_str = gentype_str; - PyGenericArrType_Type.tp_richcompare = gentype_richcompare; - - PyBoolArrType_Type.tp_as_number = &bool_arrtype_as_number; -#if PY_VERSION_HEX >= 0x02050000 - /* need to add dummy versions with filled-in nb_index - in-order for PyType_Ready to fill in .__index__() method - */ - /**begin repeat -#name=byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong# -#NAME=Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong# - */ - Py@NAME@ArrType_Type.tp_as_number = &@name@_arrtype_as_number; - Py@NAME@ArrType_Type.tp_as_number->nb_index = (unaryfunc)@name@_index; - - /**end repeat**/ - PyBoolArrType_Type.tp_as_number->nb_index = (unaryfunc)bool_index; -#endif - - PyStringArrType_Type.tp_alloc = NULL; - PyStringArrType_Type.tp_free = NULL; - - PyStringArrType_Type.tp_repr = stringtype_repr; - PyStringArrType_Type.tp_str = stringtype_str; - - PyUnicodeArrType_Type.tp_repr = unicodetype_repr; - PyUnicodeArrType_Type.tp_str = unicodetype_str; - - PyVoidArrType_Type.tp_methods = voidtype_methods; - PyVoidArrType_Type.tp_getset = voidtype_getsets; - PyVoidArrType_Type.tp_as_mapping = &voidtype_as_mapping; - PyVoidArrType_Type.tp_as_sequence = &voidtype_as_sequence; - - /**begin repeat -#NAME=Number, Integer, SignedInteger, UnsignedInteger, Inexact, Floating, -ComplexFloating, Flexible, Character# - */ - Py@NAME@ArrType_Type.tp_flags = BASEFLAGS; - /**end repeat**/ - - /**begin repeat -#name=bool, byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, string, unicode, void, object# -#NAME=Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble, String, Unicode, Void, Object# - */ - Py@NAME@ArrType_Type.tp_flags = BASEFLAGS; - Py@NAME@ArrType_Type.tp_new = @name@_arrtype_new; - Py@NAME@ArrType_Type.tp_richcompare = gentype_richcompare; - /**end repeat**/ - - /**begin repeat -#name=bool, byte, short, ubyte, ushort, uint, ulong, ulonglong, float, longdouble, cfloat, clongdouble, void, object# -#NAME=Bool, Byte, Short, UByte, UShort, UInt, ULong, ULongLong, Float, LongDouble, CFloat, CLongDouble, Void, Object# - */ - Py@NAME@ArrType_Type.tp_hash = @name@_arrtype_hash; - /**end repeat**/ - -#if SIZEOF_INT != SIZEOF_LONG - /* We won't be inheriting from Python Int type. */ - PyIntArrType_Type.tp_hash = int_arrtype_hash; -#endif - -#if SIZEOF_LONG != SIZEOF_LONGLONG - /* We won't be inheriting from Python Int type. */ - PyLongLongArrType_Type.tp_hash = longlong_arrtype_hash; -#endif - - /* These need to be coded specially because getitem does not - return a normal Python type - */ - PyLongDoubleArrType_Type.tp_as_number = &longdoubletype_as_number; - PyCLongDoubleArrType_Type.tp_as_number = &clongdoubletype_as_number; - - /**begin repeat -#name=int, long, hex, oct, float, repr, str# -#kind=tp_as_number->nb*5, tp*2# - */ - PyLongDoubleArrType_Type.@kind@_@name@ = longdoubletype_@name@; - PyCLongDoubleArrType_Type.@kind@_@name@ = clongdoubletype_@name@; - /**end repeat**/ - - PyStringArrType_Type.tp_itemsize = sizeof(char); - PyVoidArrType_Type.tp_dealloc = (destructor) void_dealloc; - - PyArrayIter_Type.tp_iter = PyObject_SelfIter; - PyArrayMapIter_Type.tp_iter = PyObject_SelfIter; -} - - -/* the order of this table is important */ -static PyTypeObject *typeobjects[] = { - &PyBoolArrType_Type, - &PyByteArrType_Type, - &PyUByteArrType_Type, - &PyShortArrType_Type, - &PyUShortArrType_Type, - &PyIntArrType_Type, - &PyUIntArrType_Type, - &PyLongArrType_Type, - &PyULongArrType_Type, - &PyLongLongArrType_Type, - &PyULongLongArrType_Type, - &PyFloatArrType_Type, - &PyDoubleArrType_Type, - &PyLongDoubleArrType_Type, - &PyCFloatArrType_Type, - &PyCDoubleArrType_Type, - &PyCLongDoubleArrType_Type, - &PyObjectArrType_Type, - &PyStringArrType_Type, - &PyUnicodeArrType_Type, - &PyVoidArrType_Type -}; - -static int -_typenum_fromtypeobj(PyObject *type, int user) -{ - int typenum, i; - - typenum = PyArray_NOTYPE; - i = 0; - while(i < PyArray_NTYPES) { - if (type == (PyObject *)typeobjects[i]) { - typenum = i; - break; - } - i++; - } - - if (!user) return typenum; - - /* Search any registered types */ - i = 0; - while (i < PyArray_NUMUSERTYPES) { - if (type == (PyObject *)(userdescrs[i]->typeobj)) { - typenum = i + PyArray_USERDEF; - break; - } - i++; - } - return typenum; -} - -static PyArray_Descr * -_descr_from_subtype(PyObject *type) -{ - PyObject *mro; - mro = ((PyTypeObject *)type)->tp_mro; - if (PyTuple_GET_SIZE(mro) < 2) { - return PyArray_DescrFromType(PyArray_OBJECT); - } - return PyArray_DescrFromTypeObject(PyTuple_GET_ITEM(mro, 1)); -} - -/*New reference */ -/*OBJECT_API - */ -static PyArray_Descr * -PyArray_DescrFromTypeObject(PyObject *type) -{ - int typenum; - PyArray_Descr *new, *conv=NULL; - - /* if it's a builtin type, then use the typenumber */ - typenum = _typenum_fromtypeobj(type,1); - if (typenum != PyArray_NOTYPE) { - new = PyArray_DescrFromType(typenum); - return new; - } - - /* Check the generic types */ - if ((type == (PyObject *) &PyNumberArrType_Type) || \ - (type == (PyObject *) &PyInexactArrType_Type) || \ - (type == (PyObject *) &PyFloatingArrType_Type)) - typenum = PyArray_DOUBLE; - else if (type == (PyObject *)&PyComplexFloatingArrType_Type) - typenum = PyArray_CDOUBLE; - else if ((type == (PyObject *)&PyIntegerArrType_Type) || \ - (type == (PyObject *)&PySignedIntegerArrType_Type)) - typenum = PyArray_LONG; - else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type) - typenum = PyArray_ULONG; - else if (type == (PyObject *) &PyCharacterArrType_Type) - typenum = PyArray_STRING; - else if ((type == (PyObject *) &PyGenericArrType_Type) || \ - (type == (PyObject *) &PyFlexibleArrType_Type)) - typenum = PyArray_VOID; - - if (typenum != PyArray_NOTYPE) { - return PyArray_DescrFromType(typenum); - } - - /* Otherwise --- type is a sub-type of an array scalar - not corresponding to a registered data-type object. - */ - - /* Do special thing for VOID sub-types - */ - if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) { - new = PyArray_DescrNewFromType(PyArray_VOID); - - conv = _arraydescr_fromobj(type); - if (conv) { - new->fields = conv->fields; - Py_INCREF(new->fields); - new->names = conv->names; - Py_INCREF(new->names); - new->elsize = conv->elsize; - new->subarray = conv->subarray; - conv->subarray = NULL; - Py_DECREF(conv); - } - Py_XDECREF(new->typeobj); - new->typeobj = (PyTypeObject *)type; - Py_INCREF(type); - return new; - } - return _descr_from_subtype(type); -} - -/*OBJECT_API - Return the tuple of ordered field names from a dictionary. -*/ -static PyObject * -PyArray_FieldNames(PyObject *fields) -{ - PyObject *tup; - PyObject *ret; - PyObject *_numpy_internal; - - if (!PyDict_Check(fields)) { - PyErr_SetString(PyExc_TypeError, - "Fields must be a dictionary"); - return NULL; - } - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - tup = PyObject_CallMethod(_numpy_internal, "_makenames_list", "O", fields); - Py_DECREF(_numpy_internal); - if (tup == NULL) return NULL; - ret = PyTuple_GET_ITEM(tup, 0); - ret = PySequence_Tuple(ret); - Py_DECREF(tup); - return ret; -} - -/* New reference */ -/*OBJECT_API - Return descr object from array scalar. -*/ -static PyArray_Descr * -PyArray_DescrFromScalar(PyObject *sc) -{ - int type_num; - PyArray_Descr *descr; - - if (PyArray_IsScalar(sc, Void)) { - descr = ((PyVoidScalarObject *)sc)->descr; - Py_INCREF(descr); - return descr; - } - descr = PyArray_DescrFromTypeObject((PyObject *)sc->ob_type); - if (descr->elsize == 0) { - PyArray_DESCR_REPLACE(descr); - type_num = descr->type_num; - if (type_num == PyArray_STRING) - descr->elsize = PyString_GET_SIZE(sc); - else if (type_num == PyArray_UNICODE) { - descr->elsize = PyUnicode_GET_DATA_SIZE(sc); -#ifndef Py_UNICODE_WIDE - descr->elsize <<= 1; -#endif - } - else { - descr->elsize = - ((PyVoidScalarObject *)sc)->ob_size; - descr->fields = PyObject_GetAttrString(sc, "fields"); - if (!descr->fields || !PyDict_Check(descr->fields) || - (descr->fields == Py_None)) { - Py_XDECREF(descr->fields); - descr->fields = NULL; - } - if (descr->fields) - descr->names = PyArray_FieldNames(descr->fields); - PyErr_Clear(); - } - } - return descr; -} - -/* New reference */ -/*OBJECT_API - Get a typeobject from a type-number -- can return NULL. -*/ -static PyObject * -PyArray_TypeObjectFromType(int type) -{ - PyArray_Descr *descr; - PyObject *obj; - - descr = PyArray_DescrFromType(type); - if (descr == NULL) return NULL; - obj = (PyObject *)descr->typeobj; - Py_XINCREF(obj); - Py_DECREF(descr); - return obj; -} diff --git a/numpy/core/src/ucsnarrow.c b/numpy/core/src/ucsnarrow.c deleted file mode 100644 index 9c4a45e9e..000000000 --- a/numpy/core/src/ucsnarrow.c +++ /dev/null @@ -1,108 +0,0 @@ -/* Functions only needed on narrow builds of Python - for converting back and forth between the NumPy Unicode data-type - (always 4-byte) - and the Python Unicode scalar (2-bytes on a narrow build). -*/ - -/* the ucs2 buffer must be large enough to hold 2*ucs4length characters - due to the use of surrogate pairs. - - The return value is the number of ucs2 bytes used-up which - is ucs4length + number of surrogate pairs found. - - values above 0xffff are converted to surrogate pairs. -*/ -static int -PyUCS2Buffer_FromUCS4(Py_UNICODE *ucs2, PyArray_UCS4 *ucs4, int ucs4length) -{ - register int i; - int numucs2 = 0; - PyArray_UCS4 chr; - for (i=0; i 0xffff) { - numucs2++; - chr -= 0x10000L; - *ucs2++ = 0xD800 + (Py_UNICODE) (chr >> 10); - *ucs2++ = 0xDC00 + (Py_UNICODE) (chr & 0x03FF); - } - else { - *ucs2++ = (Py_UNICODE) chr; - } - numucs2++; - } - return numucs2; -} - - -/* This converts a UCS2 buffer of the given length to UCS4 buffer. - It converts up to ucs4len characters of UCS2 - - It returns the number of characters converted which can - be less than ucs2len if there are surrogate pairs in ucs2. - - The return value is the actual size of the used part of the ucs4 buffer. -*/ - -static int -PyUCS2Buffer_AsUCS4(Py_UNICODE *ucs2, PyArray_UCS4 *ucs4, int ucs2len, int ucs4len) -{ - register int i; - register PyArray_UCS4 chr; - register Py_UNICODE ch; - register int numchars=0; - - for (i=0; (i < ucs2len) && (numchars < ucs4len); i++) { - ch = *ucs2++; - if (ch >= 0xd800 && ch <= 0xdfff) { - /* surrogate pair */ - chr = ((PyArray_UCS4)(ch-0xd800)) << 10; - chr += *ucs2++ + 0x2400; /* -0xdc00 + 0x10000 */ - i++; - } - else { - chr = (PyArray_UCS4) ch; - } - *ucs4++ = chr; - numchars++; - } - return numchars; -} - - -static PyObject * -MyPyUnicode_New(int length) -{ - PyUnicodeObject *unicode; - unicode = PyObject_New(PyUnicodeObject, &PyUnicode_Type); - if (unicode == NULL) return NULL; - unicode->str = PyMem_NEW(Py_UNICODE, length+1); - if (!unicode->str) { - _Py_ForgetReference((PyObject *)unicode); - PyObject_Del(unicode); - return PyErr_NoMemory(); - } - unicode->str[0] = 0; - unicode->str[length] = 0; - unicode->length = length; - unicode->hash = -1; - unicode->defenc = NULL; - return (PyObject *)unicode; -} - -static int -MyPyUnicode_Resize(PyUnicodeObject *uni, int length) -{ - void *oldstr; - - oldstr = uni->str; - PyMem_RESIZE(uni->str, Py_UNICODE, length+1); - if (!uni->str) { - uni->str = oldstr; - PyErr_NoMemory(); - return -1; - } - uni->str[length] = 0; - uni->length = length; - return 0; -} diff --git a/numpy/core/src/ufuncobject.c b/numpy/core/src/ufuncobject.c deleted file mode 100644 index 088f1029d..000000000 --- a/numpy/core/src/ufuncobject.c +++ /dev/null @@ -1,3893 +0,0 @@ -/* - Python Universal Functions Object -- Math for all types, plus fast - arrays math - - Full description - - This supports mathematical (and Boolean) functions on arrays and other python - objects. Math on large arrays of basic C types is rather efficient. - - Travis E. Oliphant 2005, 2006 oliphant@ee.byu.edu (oliphant.travis@ieee.org) - Brigham Young University - - based on the - - Original Implementation: - Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu - - with inspiration and code from - Numarray - Space Science Telescope Institute - J. Todd Miller - Perry Greenfield - Rick White - -*/ - - -typedef double (DoubleBinaryFunc)(double x, double y); -typedef float (FloatBinaryFunc)(float x, float y); -typedef longdouble (LongdoubleBinaryFunc)(longdouble x, longdouble y); - -typedef void (CdoubleBinaryFunc)(cdouble *x, cdouble *y, cdouble *res); -typedef void (CfloatBinaryFunc)(cfloat *x, cfloat *y, cfloat *res); -typedef void (ClongdoubleBinaryFunc)(clongdouble *x, clongdouble *y, \ - clongdouble *res); - -#define USE_USE_DEFAULTS 1 - -/*UFUNC_API*/ -static void -PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i, n=dimensions[0]; - register intp is1=steps[0],is2=steps[1],os=steps[2]; - char *ip1=args[0], *ip2=args[1], *op=args[2]; - - for(i=0; inin, nout=data->nout; - int ntot; - PyObject *tocall = data->callable; - char *ptrs[NPY_MAXARGS]; - PyObject *arglist, *result; - PyObject *in, **op; - - ntot = nin+nout; - - for (j=0; j < ntot; j++) ptrs[j] = args[j]; - for(i=0; i> UFUNC_SHIFT_##NAME, \ - errobj, str, retstatus, first) < 0) \ - return -1; \ - }} - -/*UFUNC_API*/ -static int -PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int *first) -{ - int handle; - if (errmask && retstatus) { - HANDLEIT(DIVIDEBYZERO, "divide by zero"); - HANDLEIT(OVERFLOW, "overflow"); - HANDLEIT(UNDERFLOW, "underflow"); - HANDLEIT(INVALID, "invalid value"); - } - return 0; -} - -#undef HANDLEIT - - -/*UFUNC_API*/ -static int -PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) -{ - int retstatus; - - /* 1. check hardware flag --- this is platform dependent code */ - retstatus = PyUFunc_getfperr(); - return PyUFunc_handlefperr(errmask, errobj, retstatus, first); -} - - -/* Checking the status flag clears it */ -/*UFUNC_API*/ -static void -PyUFunc_clearfperr() -{ - PyUFunc_getfperr(); -} - - -#define NO_UFUNCLOOP 0 -#define ZERO_EL_REDUCELOOP 0 -#define ONE_UFUNCLOOP 1 -#define ONE_EL_REDUCELOOP 1 -#define NOBUFFER_UFUNCLOOP 2 -#define NOBUFFER_REDUCELOOP 2 -#define BUFFER_UFUNCLOOP 3 -#define BUFFER_REDUCELOOP 3 - - -static char -_lowest_type(char intype) -{ - switch(intype) { - /* case PyArray_BYTE */ - case PyArray_SHORT: - case PyArray_INT: - case PyArray_LONG: - case PyArray_LONGLONG: - return PyArray_BYTE; - /* case PyArray_UBYTE */ - case PyArray_USHORT: - case PyArray_UINT: - case PyArray_ULONG: - case PyArray_ULONGLONG: - return PyArray_UBYTE; - /* case PyArray_FLOAT:*/ - case PyArray_DOUBLE: - case PyArray_LONGDOUBLE: - return PyArray_FLOAT; - /* case PyArray_CFLOAT:*/ - case PyArray_CDOUBLE: - case PyArray_CLONGDOUBLE: - return PyArray_CFLOAT; - default: - return intype; - } -} - -static char *_types_msg = "function not supported for these types, " \ - "and can't coerce safely to supported types"; - -/* Called for non-NULL user-defined functions. - The object should be a CObject pointing to a linked-list of functions - storing the function, data, and signature of all user-defined functions. - There must be a match with the input argument types or an error - will occur. -*/ -static int -_find_matching_userloop(PyObject *obj, int *arg_types, - PyArray_SCALARKIND *scalars, - PyUFuncGenericFunction *function, void **data, - int nargs, int nin) -{ - PyUFunc_Loop1d *funcdata; - int i; - funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); - while (funcdata != NULL) { - for (i=0; iarg_types[i], - scalars[i])) - break; - } - if (i==nin) { /* match found */ - *function = funcdata->func; - *data = funcdata->data; - /* Make sure actual arg_types supported - by the loop are used */ - for (i=0; iarg_types[i]; - } - return 0; - } - funcdata = funcdata->next; - } - PyErr_SetString(PyExc_TypeError, _types_msg); - return -1; -} - -/* if only one type is specified then it is the "first" output data-type - and the first signature matching this output data-type is returned. - - if a tuple of types is specified then an exact match to the signature - is searched and it much match exactly or an error occurs -*/ -static int -extract_specified_loop(PyUFuncObject *self, int *arg_types, - PyUFuncGenericFunction *function, void **data, - PyObject *type_tup, int userdef) -{ - Py_ssize_t n=1; - int *rtypenums; - static char msg[] = "loop written to specified type(s) not found"; - PyArray_Descr *dtype; - int nargs; - int i, j; - int strtype=0; - - nargs = self->nargs; - - if (PyTuple_Check(type_tup)) { - n = PyTuple_GET_SIZE(type_tup); - if (n != 1 && n != nargs) { - PyErr_Format(PyExc_ValueError, - "a type-tuple must be specified " \ - "of length 1 or %d for %s", nargs, - self->name ? self->name : "(unknown)"); - return -1; - } - } - else if PyString_Check(type_tup) { - Py_ssize_t slen; - char *thestr; - slen = PyString_GET_SIZE(type_tup); - thestr = PyString_AS_STRING(type_tup); - for (i=0; i < slen-2; i++) { - if (thestr[i] == '-' && thestr[i+1] == '>') - break; - } - if (i < slen-2) { - strtype = 1; - n = slen-2; - if (i != self->nin || - slen-2-i != self->nout) { - PyErr_Format(PyExc_ValueError, - "a type-string for %s, " \ - "requires %d typecode(s) before " \ - "and %d after the -> sign", - self->name ? self->name : "(unknown)", - self->nin, self->nout); - return -1; - } - } - } - rtypenums = (int *)_pya_malloc(n*sizeof(int)); - if (rtypenums==NULL) { - PyErr_NoMemory(); - return -1; - } - - if (strtype) { - char *ptr; - ptr = PyString_AS_STRING(type_tup); - i = 0; - while (i < n) { - if (*ptr == '-' || *ptr == '>') { - ptr++; - continue; - } - dtype = PyArray_DescrFromType((int) *ptr); - if (dtype == NULL) goto fail; - rtypenums[i] = dtype->type_num; - Py_DECREF(dtype); - ptr++; i++; - } - } - else if (PyTuple_Check(type_tup)) { - for (i=0; itype_num; - Py_DECREF(dtype); - } - } - else { - if (PyArray_DescrConverter(type_tup, &dtype) == NPY_FAIL) { - goto fail; - } - rtypenums[0] = dtype->type_num; - Py_DECREF(dtype); - } - - if (userdef > 0) { /* search in the user-defined functions */ - PyObject *key, *obj; - PyUFunc_Loop1d *funcdata; - obj = NULL; - key = PyInt_FromLong((long) userdef); - if (key == NULL) goto fail; - obj = PyDict_GetItem(self->userloops, key); - Py_DECREF(key); - if (obj == NULL) { - PyErr_SetString(PyExc_TypeError, - "user-defined type used in ufunc" \ - " with no registered loops"); - goto fail; - } - /* extract the correct function - data and argtypes - */ - funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); - while (funcdata != NULL) { - if (n != 1) { - for (i=0; iarg_types[i]) - break; - } - } - else if (rtypenums[0] == funcdata->arg_types[self->nin]) { - i = nargs; - } - else i = -1; - if (i == nargs) { - *function = funcdata->func; - *data = funcdata->data; - for (i=0; iarg_types[i]; - } - Py_DECREF(obj); - goto finish; - } - funcdata = funcdata->next; - } - PyErr_SetString(PyExc_TypeError, msg); - goto fail; - } - - /* look for match in self->functions */ - - for (j=0; jntypes; j++) { - if (n != 1) { - for (i=0; itypes[j*nargs + i]) - break; - } - } - else if (rtypenums[0] == self->types[j*nargs+self->nin]) { - i = nargs; - } - else i = -1; - if (i == nargs) { - *function = self->functions[j]; - *data = self->data[j]; - for (i=0; itypes[j*nargs+i]; - } - goto finish; - } - } - PyErr_SetString(PyExc_TypeError, msg); - - - fail: - _pya_free(rtypenums); - return -1; - - finish: - _pya_free(rtypenums); - return 0; - -} - - -/* Called to determine coercion - Can change arg_types. -*/ - -static int -select_types(PyUFuncObject *self, int *arg_types, - PyUFuncGenericFunction *function, void **data, - PyArray_SCALARKIND *scalars, - PyObject *typetup) -{ - int i, j; - char start_type; - int userdef=-1; - - if (self->userloops) { - for (i=0; inin; i++) { - if (PyTypeNum_ISUSERDEF(arg_types[i])) { - userdef = arg_types[i]; - break; - } - } - } - - if (typetup != NULL) - return extract_specified_loop(self, arg_types, function, data, - typetup, userdef); - - if (userdef > 0) { - PyObject *key, *obj; - int ret; - obj = NULL; - key = PyInt_FromLong((long) userdef); - if (key == NULL) return -1; - obj = PyDict_GetItem(self->userloops, key); - Py_DECREF(key); - if (obj == NULL) { - PyErr_SetString(PyExc_TypeError, - "user-defined type used in ufunc" \ - " with no registered loops"); - return -1; - } - /* extract the correct function - data and argtypes - */ - ret = _find_matching_userloop(obj, arg_types, scalars, - function, data, self->nargs, - self->nin); - return ret; - } - - start_type = arg_types[0]; - /* If the first argument is a scalar we need to place - the start type as the lowest type in the class - */ - if (scalars[0] != PyArray_NOSCALAR) { - start_type = _lowest_type(start_type); - } - - i = 0; - while (intypes && start_type > self->types[i*self->nargs]) - i++; - - for(;intypes; i++) { - for(j=0; jnin; j++) { - if (!PyArray_CanCoerceScalar(arg_types[j], - self->types[i*self->nargs+j], - scalars[j])) - break; - } - if (j == self->nin) break; - } - if(i>=self->ntypes) { - PyErr_SetString(PyExc_TypeError, _types_msg); - return -1; - } - for(j=0; jnargs; j++) - arg_types[j] = self->types[i*self->nargs+j]; - - if (self->data) - *data = self->data[i]; - else - *data = NULL; - *function = self->functions[i]; - - return 0; -} - -#if USE_USE_DEFAULTS==1 -static int PyUFunc_NUM_NODEFAULTS=0; -#endif -static PyObject *PyUFunc_PYVALS_NAME=NULL; - - -static int -_extract_pyvals(PyObject *ref, char *name, int *bufsize, - int *errmask, PyObject **errobj) -{ - PyObject *retval; - - *errobj = NULL; - if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) { - PyErr_Format(PyExc_TypeError, "%s must be a length 3 list.", - UFUNC_PYVALS_NAME); - return -1; - } - - *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0)); - if ((*bufsize == -1) && PyErr_Occurred()) return -1; - if ((*bufsize < PyArray_MIN_BUFSIZE) || \ - (*bufsize > PyArray_MAX_BUFSIZE) || \ - (*bufsize % 16 != 0)) { - PyErr_Format(PyExc_ValueError, - "buffer size (%d) is not in range " - "(%"INTP_FMT" - %"INTP_FMT") or not a multiple of 16", - *bufsize, (intp) PyArray_MIN_BUFSIZE, - (intp) PyArray_MAX_BUFSIZE); - return -1; - } - - *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1)); - if (*errmask < 0) { - if (PyErr_Occurred()) return -1; - PyErr_Format(PyExc_ValueError, \ - "invalid error mask (%d)", - *errmask); - return -1; - } - - retval = PyList_GET_ITEM(ref, 2); - if (retval != Py_None && !PyCallable_Check(retval)) { - PyObject *temp; - temp = PyObject_GetAttrString(retval, "write"); - if (temp == NULL || !PyCallable_Check(temp)) { - PyErr_SetString(PyExc_TypeError, - "python object must be callable or have " \ - "a callable write method"); - Py_XDECREF(temp); - return -1; - } - Py_DECREF(temp); - } - - *errobj = Py_BuildValue("NO", - PyString_FromString(name), - retval); - if (*errobj == NULL) return -1; - - return 0; -} - - - -/*UFUNC_API*/ -static int -PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject **errobj) -{ - PyObject *thedict; - PyObject *ref=NULL; - -#if USE_USE_DEFAULTS==1 - if (PyUFunc_NUM_NODEFAULTS != 0) { -#endif - if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = \ - PyString_InternFromString(UFUNC_PYVALS_NAME); - } - thedict = PyThreadState_GetDict(); - if (thedict == NULL) { - thedict = PyEval_GetBuiltins(); - } - ref = PyDict_GetItem(thedict, PyUFunc_PYVALS_NAME); -#if USE_USE_DEFAULTS==1 - } -#endif - if (ref == NULL) { - *errmask = UFUNC_ERR_DEFAULT; - *errobj = Py_BuildValue("NO", - PyString_FromString(name), - Py_None); - *bufsize = PyArray_BUFSIZE; - return 0; - } - return _extract_pyvals(ref, name, bufsize, errmask, errobj); -} - -/* Create copies for any arrays that are less than loop->bufsize - in total size and are mis-behaved or in need - of casting. -*/ - -static int -_create_copies(PyUFuncLoopObject *loop, int *arg_types, PyArrayObject **mps) -{ - int nin = loop->ufunc->nin; - int i; - intp size; - PyObject *new; - PyArray_Descr *ntype; - PyArray_Descr *atype; - - for (i=0; idescr; - atype = PyArray_DescrFromType(arg_types[i]); - if (PyArray_EquivTypes(atype, ntype)) { - arg_types[i] = ntype->type_num; - } - Py_DECREF(atype); - } - if (size < loop->bufsize) { - if (!(PyArray_ISBEHAVED_RO(mps[i])) || \ - PyArray_TYPE(mps[i]) != arg_types[i]) { - ntype = PyArray_DescrFromType(arg_types[i]); - new = PyArray_FromAny((PyObject *)mps[i], - ntype, 0, 0, - FORCECAST | ALIGNED, NULL); - if (new == NULL) return -1; - Py_DECREF(mps[i]); - mps[i] = (PyArrayObject *)new; - } - } - } - - return 0; -} - -#define _GETATTR_(str, rstr) if (strcmp(name, #str) == 0) { \ - return PyObject_HasAttrString(op, "__" #rstr "__");} - -static int -_has_reflected_op(PyObject *op, char *name) -{ - _GETATTR_(add, radd) - _GETATTR_(subtract, rsub) - _GETATTR_(multiply, rmul) - _GETATTR_(divide, rdiv) - _GETATTR_(true_divide, rtruediv) - _GETATTR_(floor_divide, rfloordiv) - _GETATTR_(remainder, rmod) - _GETATTR_(power, rpow) - _GETATTR_(left_shift, rrlshift) - _GETATTR_(right_shift, rrshift) - _GETATTR_(bitwise_and, rand) - _GETATTR_(bitwise_xor, rxor) - _GETATTR_(bitwise_or, ror) - return 0; -} - -#undef _GETATTR_ - -static Py_ssize_t -construct_arrays(PyUFuncLoopObject *loop, PyObject *args, PyArrayObject **mps, - PyObject *typetup) -{ - Py_ssize_t nargs; - int i; - int arg_types[NPY_MAXARGS]; - PyArray_SCALARKIND scalars[NPY_MAXARGS]; - PyArray_SCALARKIND maxarrkind, maxsckind, new; - PyUFuncObject *self=loop->ufunc; - Bool allscalars=TRUE; - PyTypeObject *subtype=&PyArray_Type; - PyObject *context=NULL; - PyObject *obj; - int flexible=0; - int object=0; - - /* Check number of arguments */ - nargs = PyTuple_Size(args); - if ((nargs < self->nin) || (nargs > self->nargs)) { - PyErr_SetString(PyExc_ValueError, - "invalid number of arguments"); - return -1; - } - - /* Get each input argument */ - maxarrkind = PyArray_NOSCALAR; - maxsckind = PyArray_NOSCALAR; - for (i=0; inin; i++) { - obj = PyTuple_GET_ITEM(args,i); - if (!PyArray_Check(obj) && !PyArray_IsScalar(obj, Generic)) { - context = Py_BuildValue("OOi", self, args, i); - } - else context = NULL; - mps[i] = (PyArrayObject *)PyArray_FromAny(obj, NULL, 0, 0, 0, context); - Py_XDECREF(context); - if (mps[i] == NULL) return -1; - arg_types[i] = PyArray_TYPE(mps[i]); - if (!flexible && PyTypeNum_ISFLEXIBLE(arg_types[i])) { - flexible = 1; - } - if (!object && PyTypeNum_ISOBJECT(arg_types[i])) { - object = 1; - } - /* - fprintf(stderr, "array %d has reference %d\n", i, - (mps[i])->ob_refcnt); - */ - - /* Scalars are 0-dimensional arrays - at this point - */ - - /* We need to keep track of whether or not scalars - are mixed with arrays of different kinds. - */ - - if (mps[i]->nd > 0) { - scalars[i] = PyArray_NOSCALAR; - allscalars=FALSE; - new = PyArray_ScalarKind(arg_types[i], NULL); - maxarrkind = NPY_MAX(new, maxarrkind); - } - else { - scalars[i] = PyArray_ScalarKind(arg_types[i], &(mps[i])); - maxsckind = NPY_MAX(scalars[i], maxsckind); - } - } - - if (flexible && !object) { - loop->notimplemented = 1; - return nargs; - } - - /* If everything is a scalar, or scalars mixed with arrays of - different kinds of lesser types then use normal coercion rules */ - if (allscalars || (maxsckind > maxarrkind)) { - for (i=0; inin; i++) { - scalars[i] = PyArray_NOSCALAR; - } - } - - /* Select an appropriate function for these argument types. */ - if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, typetup) == -1) - return -1; - - /* FAIL with NotImplemented if the other object has - the __r__ method and has __array_priority__ as - an attribute (signalling it can handle ndarray's) - and is not already an ndarray - */ - if ((arg_types[1] == PyArray_OBJECT) && \ - (loop->ufunc->nin==2) && (loop->ufunc->nout == 1)) { - PyObject *_obj = PyTuple_GET_ITEM(args, 1); - if (!PyArray_CheckExact(_obj) && \ - PyObject_HasAttrString(_obj, "__array_priority__") && \ - _has_reflected_op(_obj, loop->ufunc->name)) { - loop->notimplemented = 1; - return nargs; - } - } - - /* Create copies for some of the arrays if they are small - enough and not already contiguous */ - if (_create_copies(loop, arg_types, mps) < 0) return -1; - - /* Create Iterators for the Inputs */ - for (i=0; inin; i++) { - loop->iters[i] = (PyArrayIterObject *) \ - PyArray_IterNew((PyObject *)mps[i]); - if (loop->iters[i] == NULL) return -1; - } - - /* Broadcast the result */ - loop->numiter = self->nin; - if (PyArray_Broadcast((PyArrayMultiIterObject *)loop) < 0) - return -1; - - /* Get any return arguments */ - for (i=self->nin; ind != loop->nd || - !PyArray_CompareLists(mps[i]->dimensions, - loop->dimensions, loop->nd)) { - PyErr_SetString(PyExc_ValueError, - "invalid return array shape"); - Py_DECREF(mps[i]); - mps[i] = NULL; - return -1; - } - if (!PyArray_ISWRITEABLE(mps[i])) { - PyErr_SetString(PyExc_ValueError, - "return array is not writeable"); - Py_DECREF(mps[i]); - mps[i] = NULL; - return -1; - } - } - - /* construct any missing return arrays and make output iterators */ - - for (i=self->nin; inargs; i++) { - PyArray_Descr *ntype; - - if (mps[i] == NULL) { - mps[i] = (PyArrayObject *)PyArray_New(subtype, - loop->nd, - loop->dimensions, - arg_types[i], - NULL, NULL, - 0, 0, NULL); - if (mps[i] == NULL) return -1; - } - - /* reset types for outputs that are equivalent - -- no sense casting uselessly - */ - else { - if (mps[i]->descr->type_num != arg_types[i]) { - PyArray_Descr *atype; - ntype = mps[i]->descr; - atype = PyArray_DescrFromType(arg_types[i]); - if (PyArray_EquivTypes(atype, ntype)) { - arg_types[i] = ntype->type_num; - } - Py_DECREF(atype); - } - - /* still not the same -- or will we have to use buffers?*/ - if (mps[i]->descr->type_num != arg_types[i] || - !PyArray_ISBEHAVED_RO(mps[i])) { - if (loop->size < loop->bufsize) { - PyObject *new; - /* Copy the array to a temporary copy - and set the UPDATEIFCOPY flag - */ - ntype = PyArray_DescrFromType(arg_types[i]); - new = PyArray_FromAny((PyObject *)mps[i], - ntype, 0, 0, - FORCECAST | ALIGNED | - UPDATEIFCOPY, NULL); - if (new == NULL) return -1; - Py_DECREF(mps[i]); - mps[i] = (PyArrayObject *)new; - } - } - } - - loop->iters[i] = (PyArrayIterObject *) \ - PyArray_IterNew((PyObject *)mps[i]); - if (loop->iters[i] == NULL) return -1; - } - - - /* If any of different type, or misaligned or swapped - then must use buffers */ - - loop->bufcnt = 0; - loop->obj = 0; - - /* Determine looping method needed */ - loop->meth = NO_UFUNCLOOP; - - if (loop->size == 0) return nargs; - - - for (i=0; inargs; i++) { - loop->needbuffer[i] = 0; - if (arg_types[i] != mps[i]->descr->type_num || - !PyArray_ISBEHAVED_RO(mps[i])) { - loop->meth = BUFFER_UFUNCLOOP; - loop->needbuffer[i] = 1; - } - if (!loop->obj && ((mps[i]->descr->type_num == PyArray_OBJECT) || - (arg_types[i] == PyArray_OBJECT))) { - loop->obj = 1; - } - } - - if (loop->meth == NO_UFUNCLOOP) { - - loop->meth = ONE_UFUNCLOOP; - - /* All correct type and BEHAVED */ - /* Check for non-uniform stridedness */ - - for (i=0; inargs; i++) { - if (!(loop->iters[i]->contiguous)) { - /* may still have uniform stride - if (broadcated result) <= 1-d */ - if (mps[i]->nd != 0 && \ - (loop->iters[i]->nd_m1 > 0)) { - loop->meth = NOBUFFER_UFUNCLOOP; - break; - } - } - } - if (loop->meth == ONE_UFUNCLOOP) { - for (i=0; inargs; i++) { - loop->bufptr[i] = mps[i]->data; - } - } - } - - loop->numiter = self->nargs; - - /* Fill in steps */ - if (loop->meth != ONE_UFUNCLOOP) { - int ldim; - intp minsum; - intp maxdim; - PyArrayIterObject *it; - intp stride_sum[NPY_MAXDIMS]; - int j; - - /* Fix iterators */ - - /* Optimize axis the iteration takes place over - - The first thought was to have the loop go - over the largest dimension to minimize the number of loops - - However, on processors with slow memory bus and cache, - the slowest loops occur when the memory access occurs for - large strides. - - Thus, choose the axis for which strides of the last iterator is - smallest but non-zero. - */ - - for (i=0; ind; i++) { - stride_sum[i] = 0; - for (j=0; jnumiter; j++) { - stride_sum[i] += loop->iters[j]->strides[i]; - } - } - - ldim = loop->nd - 1; - minsum = stride_sum[loop->nd-1]; - for (i=loop->nd - 2; i>=0; i--) { - if (stride_sum[i] < minsum ) { - ldim = i; - minsum = stride_sum[i]; - } - } - - maxdim = loop->dimensions[ldim]; - loop->size /= maxdim; - loop->bufcnt = maxdim; - loop->lastdim = ldim; - - /* Fix the iterators so the inner loop occurs over the - largest dimensions -- This can be done by - setting the size to 1 in that dimension - (just in the iterators) - */ - - for (i=0; inumiter; i++) { - it = loop->iters[i]; - it->contiguous = 0; - it->size /= (it->dims_m1[ldim]+1); - it->dims_m1[ldim] = 0; - it->backstrides[ldim] = 0; - - /* (won't fix factors because we - don't use PyArray_ITER_GOTO1D - so don't change them) */ - - /* Set the steps to the strides in that dimension */ - loop->steps[i] = it->strides[ldim]; - } - - /* fix up steps where we will be copying data to - buffers and calculate the ninnerloops and leftover - values -- if step size is already zero that is not changed... - */ - if (loop->meth == BUFFER_UFUNCLOOP) { - loop->leftover = maxdim % loop->bufsize; - loop->ninnerloops = (maxdim / loop->bufsize) + 1; - for (i=0; inargs; i++) { - if (loop->needbuffer[i] && loop->steps[i]) { - loop->steps[i] = mps[i]->descr->elsize; - } - /* These are changed later if casting is needed */ - } - } - } - else { /* uniformly-strided case ONE_UFUNCLOOP */ - for (i=0; inargs; i++) { - if (PyArray_SIZE(mps[i]) == 1) - loop->steps[i] = 0; - else - loop->steps[i] = mps[i]->strides[mps[i]->nd-1]; - } - } - - - /* Finally, create memory for buffers if we need them */ - - /* buffers for scalars are specially made small -- scalars are - not copied multiple times */ - if (loop->meth == BUFFER_UFUNCLOOP) { - int cnt = 0, cntcast = 0; /* keeps track of bytes to allocate */ - int scnt = 0, scntcast = 0; - char *castptr; - char *bufptr; - int last_was_scalar=0; - int last_cast_was_scalar=0; - int oldbufsize=0; - int oldsize=0; - int scbufsize = 4*sizeof(double); - int memsize; - PyArray_Descr *descr; - - /* compute the element size */ - for (i=0; inargs;i++) { - if (!loop->needbuffer[i]) continue; - if (arg_types[i] != mps[i]->descr->type_num) { - descr = PyArray_DescrFromType(arg_types[i]); - if (loop->steps[i]) - cntcast += descr->elsize; - else - scntcast += descr->elsize; - if (i < self->nin) { - loop->cast[i] = \ - PyArray_GetCastFunc(mps[i]->descr, - arg_types[i]); - } - else { - loop->cast[i] = PyArray_GetCastFunc \ - (descr, mps[i]->descr->type_num); - } - Py_DECREF(descr); - if (!loop->cast[i]) return -1; - } - loop->swap[i] = !(PyArray_ISNOTSWAPPED(mps[i])); - if (loop->steps[i]) - cnt += mps[i]->descr->elsize; - else - scnt += mps[i]->descr->elsize; - } - memsize = loop->bufsize*(cnt+cntcast) + scbufsize*(scnt+scntcast); - loop->buffer[0] = PyDataMem_NEW(memsize); - - /* fprintf(stderr, "Allocated buffer at %p of size %d, cnt=%d, cntcast=%d\n", loop->buffer[0], loop->bufsize * (cnt + cntcast), cnt, cntcast); */ - - if (loop->buffer[0] == NULL) {PyErr_NoMemory(); return -1;} - if (loop->obj) memset(loop->buffer[0], 0, memsize); - castptr = loop->buffer[0] + loop->bufsize*cnt + scbufsize*scnt; - bufptr = loop->buffer[0]; - loop->objfunc = 0; - for (i=0; inargs; i++) { - if (!loop->needbuffer[i]) continue; - loop->buffer[i] = bufptr + (last_was_scalar ? scbufsize : \ - loop->bufsize)*oldbufsize; - last_was_scalar = (loop->steps[i] == 0); - bufptr = loop->buffer[i]; - oldbufsize = mps[i]->descr->elsize; - /* fprintf(stderr, "buffer[%d] = %p\n", i, loop->buffer[i]); */ - if (loop->cast[i]) { - PyArray_Descr *descr; - loop->castbuf[i] = castptr + (last_cast_was_scalar ? scbufsize : \ - loop->bufsize)*oldsize; - last_cast_was_scalar = last_was_scalar; - /* fprintf(stderr, "castbuf[%d] = %p\n", i, loop->castbuf[i]); */ - descr = PyArray_DescrFromType(arg_types[i]); - oldsize = descr->elsize; - Py_DECREF(descr); - loop->bufptr[i] = loop->castbuf[i]; - castptr = loop->castbuf[i]; - if (loop->steps[i]) - loop->steps[i] = oldsize; - } - else { - loop->bufptr[i] = loop->buffer[i]; - } - if (!loop->objfunc && loop->obj) { - if (arg_types[i] == PyArray_OBJECT) { - loop->objfunc = 1; - } - } - } - } - return nargs; -} - -static void -ufuncreduce_dealloc(PyUFuncReduceObject *self) -{ - if (self->ufunc) { - Py_XDECREF(self->it); - Py_XDECREF(self->rit); - Py_XDECREF(self->ret); - Py_XDECREF(self->errobj); - Py_XDECREF(self->decref); - if (self->buffer) PyDataMem_FREE(self->buffer); - Py_DECREF(self->ufunc); - } - _pya_free(self); -} - -static void -ufuncloop_dealloc(PyUFuncLoopObject *self) -{ - int i; - - if (self->ufunc != NULL) { - for (i=0; iufunc->nargs; i++) - Py_XDECREF(self->iters[i]); - if (self->buffer[0]) PyDataMem_FREE(self->buffer[0]); - Py_XDECREF(self->errobj); - Py_DECREF(self->ufunc); - } - _pya_free(self); -} - -static PyUFuncLoopObject * -construct_loop(PyUFuncObject *self, PyObject *args, PyObject *kwds, PyArrayObject **mps) -{ - PyUFuncLoopObject *loop; - int i; - PyObject *typetup=NULL; - PyObject *extobj=NULL; - char *name; - - if (self == NULL) { - PyErr_SetString(PyExc_ValueError, "function not supported"); - return NULL; - } - if ((loop = _pya_malloc(sizeof(PyUFuncLoopObject)))==NULL) { - PyErr_NoMemory(); return loop; - } - - loop->index = 0; - loop->ufunc = self; - Py_INCREF(self); - loop->buffer[0] = NULL; - for (i=0; inargs; i++) { - loop->iters[i] = NULL; - loop->cast[i] = NULL; - } - loop->errobj = NULL; - loop->notimplemented = 0; - loop->first = 1; - - name = self->name ? self->name : ""; - - /* Extract sig= keyword and - extobj= keyword if present - Raise an error if anything else present in the keyword dictionary - */ - if (kwds != NULL) { - PyObject *key, *value; - Py_ssize_t pos=0; - while (PyDict_Next(kwds, &pos, &key, &value)) { - if (!PyString_Check(key)) { - PyErr_SetString(PyExc_TypeError, - "invalid keyword"); - goto fail; - } - if (strncmp(PyString_AS_STRING(key),"extobj",6) == 0) { - extobj = value; - } - else if (strncmp(PyString_AS_STRING(key),"sig",5)==0) { - typetup = value; - } - else { - PyErr_Format(PyExc_TypeError, - "'%s' is an invalid keyword " \ - "to %s", - PyString_AS_STRING(key), name); - goto fail; - } - } - } - - if (extobj == NULL) { - if (PyUFunc_GetPyValues(name, - &(loop->bufsize), &(loop->errormask), - &(loop->errobj)) < 0) goto fail; - } - else { - if (_extract_pyvals(extobj, name, - &(loop->bufsize), &(loop->errormask), - &(loop->errobj)) < 0) goto fail; - } - - /* Setup the arrays */ - if (construct_arrays(loop, args, mps, typetup) < 0) goto fail; - - PyUFunc_clearfperr(); - - return loop; - - fail: - ufuncloop_dealloc(loop); - return NULL; -} - - -/* - static void - _printbytebuf(PyUFuncLoopObject *loop, int bufnum) - { - int i; - - fprintf(stderr, "Printing byte buffer %d\n", bufnum); - for (i=0; ibufcnt; i++) { - fprintf(stderr, " %d\n", *(((byte *)(loop->buffer[bufnum]))+i)); - } - } - - static void - _printlongbuf(PyUFuncLoopObject *loop, int bufnum) - { - int i; - - fprintf(stderr, "Printing long buffer %d\n", bufnum); - for (i=0; ibufcnt; i++) { - fprintf(stderr, " %ld\n", *(((long *)(loop->buffer[bufnum]))+i)); - } - } - - static void - _printlongbufptr(PyUFuncLoopObject *loop, int bufnum) - { - int i; - - fprintf(stderr, "Printing long buffer %d\n", bufnum); - for (i=0; ibufcnt; i++) { - fprintf(stderr, " %ld\n", *(((long *)(loop->bufptr[bufnum]))+i)); - } - } - - - - static void - _printcastbuf(PyUFuncLoopObject *loop, int bufnum) - { - int i; - - fprintf(stderr, "Printing long buffer %d\n", bufnum); - for (i=0; ibufcnt; i++) { - fprintf(stderr, " %ld\n", *(((long *)(loop->castbuf[bufnum]))+i)); - } - } - -*/ - - - - -/* currently generic ufuncs cannot be built for use on flexible arrays. - - The cast functions in the generic loop would need to be fixed to pass - in something besides NULL, NULL. - - Also the underlying ufunc loops would not know the element-size unless - that was passed in as data (which could be arranged). - -*/ - -/* This generic function is called with the ufunc object, the arguments to it, - and an array of (pointers to) PyArrayObjects which are NULL. The - arguments are parsed and placed in mps in construct_loop (construct_arrays) -*/ - -/*UFUNC_API*/ -static int -PyUFunc_GenericFunction(PyUFuncObject *self, PyObject *args, PyObject *kwds, - PyArrayObject **mps) -{ - PyUFuncLoopObject *loop; - int i; - NPY_BEGIN_THREADS_DEF - - if (!(loop = construct_loop(self, args, kwds, mps))) return -1; - if (loop->notimplemented) {ufuncloop_dealloc(loop); return -2;} - - NPY_LOOP_BEGIN_THREADS - - switch(loop->meth) { - case ONE_UFUNCLOOP: - /* Everything is contiguous, notswapped, aligned, - and of the right type. -- Fastest. - Or if not contiguous, then a single-stride - increment moves through the entire array. - */ - /*fprintf(stderr, "ONE...%d\n", loop->size);*/ - loop->function((char **)loop->bufptr, &(loop->size), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - break; - case NOBUFFER_UFUNCLOOP: - /* Everything is notswapped, aligned and of the - right type but not contiguous. -- Almost as fast. - */ - /*fprintf(stderr, "NOBUFFER...%d\n", loop->size);*/ - while (loop->index < loop->size) { - for (i=0; inargs; i++) - loop->bufptr[i] = loop->iters[i]->dataptr; - - loop->function((char **)loop->bufptr, &(loop->bufcnt), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - - /* Adjust loop pointers */ - - for (i=0; inargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - loop->index++; - } - break; - case BUFFER_UFUNCLOOP: { - PyArray_CopySwapNFunc *copyswapn[NPY_MAXARGS]; - PyArrayIterObject **iters=loop->iters; - int *swap=loop->swap; - char **dptr=loop->dptr; - int mpselsize[NPY_MAXARGS]; - intp laststrides[NPY_MAXARGS]; - int fastmemcpy[NPY_MAXARGS]; - int *needbuffer=loop->needbuffer; - intp index=loop->index, size=loop->size; - int bufsize; - intp bufcnt; - int copysizes[NPY_MAXARGS]; - char **bufptr = loop->bufptr; - char **buffer = loop->buffer; - char **castbuf = loop->castbuf; - intp *steps = loop->steps; - char *tptr[NPY_MAXARGS]; - int ninnerloops = loop->ninnerloops; - Bool pyobject[NPY_MAXARGS]; - int datasize[NPY_MAXARGS]; - int j, k, stopcondition; - char *myptr1, *myptr2; - - - for (i=0; inargs; i++) { - copyswapn[i] = mps[i]->descr->f->copyswapn; - mpselsize[i] = mps[i]->descr->elsize; - pyobject[i] = (loop->obj && \ - (mps[i]->descr->type_num == PyArray_OBJECT)); - laststrides[i] = iters[i]->strides[loop->lastdim]; - if (steps[i] && laststrides[i] != mpselsize[i]) fastmemcpy[i] = 0; - else fastmemcpy[i] = 1; - } - /* Do generic buffered looping here (works for any kind of - arrays -- some need buffers, some don't. - */ - - /* New algorithm: N is the largest dimension. B is the buffer-size. - quotient is loop->ninnerloops-1 - remainder is loop->leftover - - Compute N = quotient * B + remainder. - quotient = N / B # integer math - (store quotient + 1) as the number of innerloops - remainder = N % B # integer remainder - - On the inner-dimension we will have (quotient + 1) loops where - the size of the inner function is B for all but the last when the niter size is - remainder. - - So, the code looks very similar to NOBUFFER_LOOP except the inner-most loop is - replaced with... - - for(i=0; isize, - loop->ninnerloops, loop->leftover); - */ - /* - for (i=0; inargs; i++) { - fprintf(stderr, "iters[%d]->dataptr = %p, %p of size %d\n", i, - iters[i], iters[i]->ao->data, PyArray_NBYTES(iters[i]->ao)); - } - */ - - stopcondition = ninnerloops; - if (loop->leftover == 0) stopcondition--; - while (index < size) { - bufsize=loop->bufsize; - for (i=0; inargs; i++) { - tptr[i] = loop->iters[i]->dataptr; - if (needbuffer[i]) { - dptr[i] = bufptr[i]; - datasize[i] = (steps[i] ? bufsize : 1); - copysizes[i] = datasize[i] * mpselsize[i]; - } - else { - dptr[i] = tptr[i]; - } - } - - /* This is the inner function over the last dimension */ - for (k=1; k<=stopcondition; k++) { - if (k==ninnerloops) { - bufsize = loop->leftover; - for (i=0; inargs;i++) { - if (!needbuffer[i]) continue; - datasize[i] = (steps[i] ? bufsize : 1); - copysizes[i] = datasize[i] * mpselsize[i]; - } - } - - for (i=0; inin; i++) { - if (!needbuffer[i]) continue; - if (fastmemcpy[i]) - memcpy(buffer[i], tptr[i], - copysizes[i]); - else { - myptr1 = buffer[i]; - myptr2 = tptr[i]; - for (j=0; jcast[i]) { - /* fprintf(stderr, "casting... %d, %p %p\n", i, buffer[i]); */ - loop->cast[i](buffer[i], - castbuf[i], - (intp) datasize[i], - NULL, NULL); - } - } - - bufcnt = (intp) bufsize; - loop->function((char **)dptr, &bufcnt, steps, loop->funcdata); - - for (i=self->nin; inargs; i++) { - if (!needbuffer[i]) continue; - if (loop->cast[i]) { - /* fprintf(stderr, "casting back... %d, %p", i, castbuf[i]); */ - loop->cast[i](castbuf[i], - buffer[i], - (intp) datasize[i], - NULL, NULL); - } - if (swap[i]) { - copyswapn[i](buffer[i], mpselsize[i], NULL, -1, - (intp) datasize[i], 1, - mps[i]); - } - /* copy back to output arrays */ - /* decref what's already there for object arrays */ - if (pyobject[i]) { - myptr1 = tptr[i]; - for (j=0; jnargs; i++) { - tptr[i] += bufsize * laststrides[i]; - if (!needbuffer[i]) dptr[i] = tptr[i]; - } - } - /* end inner function over last dimension */ - - if (loop->objfunc) { /* DECREF castbuf when underlying function used object arrays - and casting was needed to get to object arrays */ - for (i=0; inargs; i++) { - if (loop->cast[i]) { - if (steps[i] == 0) { - Py_XDECREF(*((PyObject **)castbuf[i])); - } - else { - int size = loop->bufsize; - PyObject **objptr = (PyObject **)castbuf[i]; - /* size is loop->bufsize unless there - was only one loop */ - if (ninnerloops == 1) \ - size = loop->leftover; - - for (j=0; jnargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - index++; - } - } - } - - NPY_LOOP_END_THREADS - - ufuncloop_dealloc(loop); - return 0; - - fail: - NPY_LOOP_END_THREADS - - if (loop) ufuncloop_dealloc(loop); - return -1; -} - -static PyArrayObject * -_getidentity(PyUFuncObject *self, int otype, char *str) -{ - PyObject *obj, *arr; - PyArray_Descr *typecode; - - if (self->identity == PyUFunc_None) { - PyErr_Format(PyExc_ValueError, - "zero-size array to ufunc.%s " \ - "without identity", str); - return NULL; - } - if (self->identity == PyUFunc_One) { - obj = PyInt_FromLong((long) 1); - } else { - obj = PyInt_FromLong((long) 0); - } - - typecode = PyArray_DescrFromType(otype); - arr = PyArray_FromAny(obj, typecode, 0, 0, CARRAY, NULL); - Py_DECREF(obj); - return (PyArrayObject *)arr; -} - -static int -_create_reduce_copy(PyUFuncReduceObject *loop, PyArrayObject **arr, int rtype) -{ - intp maxsize; - PyObject *new; - PyArray_Descr *ntype; - - maxsize = PyArray_SIZE(*arr); - - if (maxsize < loop->bufsize) { - if (!(PyArray_ISBEHAVED_RO(*arr)) || \ - PyArray_TYPE(*arr) != rtype) { - ntype = PyArray_DescrFromType(rtype); - new = PyArray_FromAny((PyObject *)(*arr), - ntype, 0, 0, - FORCECAST | ALIGNED, NULL); - if (new == NULL) return -1; - *arr = (PyArrayObject *)new; - loop->decref = new; - } - } - - /* Don't decref *arr before re-assigning - because it was not going to be DECREF'd anyway. - - If a copy is made, then the copy will be removed - on deallocation of the loop structure by setting - loop->decref. - */ - - return 0; -} - -static PyUFuncReduceObject * -construct_reduce(PyUFuncObject *self, PyArrayObject **arr, PyArrayObject *out, - int axis, int otype, int operation, intp ind_size, char *str) -{ - PyUFuncReduceObject *loop; - PyArrayObject *idarr; - PyArrayObject *aar; - intp loop_i[MAX_DIMS], outsize=0; - int arg_types[3]; - PyArray_SCALARKIND scalars[3] = {PyArray_NOSCALAR, PyArray_NOSCALAR, - PyArray_NOSCALAR}; - int i, j, nd; - int flags; - /* Reduce type is the type requested of the input - during reduction */ - - nd = (*arr)->nd; - arg_types[0] = otype; - arg_types[1] = otype; - arg_types[2] = otype; - if ((loop = _pya_malloc(sizeof(PyUFuncReduceObject)))==NULL) { - PyErr_NoMemory(); return loop; - } - - loop->retbase=0; - loop->swap = 0; - loop->index = 0; - loop->ufunc = self; - Py_INCREF(self); - loop->cast = NULL; - loop->buffer = NULL; - loop->ret = NULL; - loop->it = NULL; - loop->rit = NULL; - loop->errobj = NULL; - loop->first = 1; - loop->decref=NULL; - loop->N = (*arr)->dimensions[axis]; - loop->instrides = (*arr)->strides[axis]; - - if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, NULL) == -1) goto fail; - - /* output type may change -- if it does - reduction is forced into that type - and we need to select the reduction function again - */ - if (otype != arg_types[2]) { - otype = arg_types[2]; - arg_types[0] = otype; - arg_types[1] = otype; - if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, NULL) == -1) - goto fail; - } - - /* get looping parameters from Python */ - if (PyUFunc_GetPyValues(str, &(loop->bufsize), &(loop->errormask), - &(loop->errobj)) < 0) goto fail; - - /* Make copy if misbehaved or not otype for small arrays */ - if (_create_reduce_copy(loop, arr, otype) < 0) goto fail; - aar = *arr; - - if (loop->N == 0) { - loop->meth = ZERO_EL_REDUCELOOP; - } - else if (PyArray_ISBEHAVED_RO(aar) && \ - otype == (aar)->descr->type_num) { - if (loop->N == 1) { - loop->meth = ONE_EL_REDUCELOOP; - } - else { - loop->meth = NOBUFFER_UFUNCLOOP; - loop->steps[1] = (aar)->strides[axis]; - loop->N -= 1; - } - } - else { - loop->meth = BUFFER_UFUNCLOOP; - loop->swap = !(PyArray_ISNOTSWAPPED(aar)); - } - - /* Determine if object arrays are involved */ - if (otype == PyArray_OBJECT || aar->descr->type_num == PyArray_OBJECT) - loop->obj = 1; - else - loop->obj = 0; - - if (loop->meth == ZERO_EL_REDUCELOOP) { - idarr = _getidentity(self, otype, str); - if (idarr == NULL) goto fail; - if (idarr->descr->elsize > UFUNC_MAXIDENTITY) { - PyErr_Format(PyExc_RuntimeError, - "UFUNC_MAXIDENTITY (%d)" \ - " is too small (needs to be at least %d)", - UFUNC_MAXIDENTITY, idarr->descr->elsize); - Py_DECREF(idarr); - goto fail; - } - memcpy(loop->idptr, idarr->data, idarr->descr->elsize); - Py_DECREF(idarr); - } - - /* Construct return array */ - flags = NPY_CARRAY | NPY_UPDATEIFCOPY | NPY_FORCECAST; - switch(operation) { - case UFUNC_REDUCE: - for (j=0, i=0; idimensions[i]; - - } - if (out == NULL) { - loop->ret = (PyArrayObject *) \ - PyArray_New(aar->ob_type, aar->nd-1, loop_i, - otype, NULL, NULL, 0, 0, - (PyObject *)aar); - } - else { - outsize = PyArray_MultiplyList(loop_i, aar->nd-1); - } - break; - case UFUNC_ACCUMULATE: - if (out == NULL) { - loop->ret = (PyArrayObject *) \ - PyArray_New(aar->ob_type, aar->nd, aar->dimensions, - otype, NULL, NULL, 0, 0, (PyObject *)aar); - } - else { - outsize = PyArray_MultiplyList(aar->dimensions, aar->nd); - } - break; - case UFUNC_REDUCEAT: - memcpy(loop_i, aar->dimensions, nd*sizeof(intp)); - /* Index is 1-d array */ - loop_i[axis] = ind_size; - if (out == NULL) { - loop->ret = (PyArrayObject *) \ - PyArray_New(aar->ob_type, aar->nd, loop_i, otype, - NULL, NULL, 0, 0, (PyObject *)aar); - } - else { - outsize = PyArray_MultiplyList(loop_i, aar->nd); - } - if (ind_size == 0) { - loop->meth = ZERO_EL_REDUCELOOP; - return loop; - } - if (loop->meth == ONE_EL_REDUCELOOP) - loop->meth = NOBUFFER_REDUCELOOP; - break; - } - if (out) { - if (PyArray_SIZE(out) != outsize) { - PyErr_SetString(PyExc_ValueError, - "wrong shape for output"); - goto fail; - } - loop->ret = (PyArrayObject *) \ - PyArray_FromArray(out, PyArray_DescrFromType(otype), - flags); - if (loop->ret && loop->ret != out) { - loop->retbase = 1; - } - } - if (loop->ret == NULL) goto fail; - loop->insize = aar->descr->elsize; - loop->outsize = loop->ret->descr->elsize; - loop->bufptr[0] = loop->ret->data; - - if (loop->meth == ZERO_EL_REDUCELOOP) { - loop->size = PyArray_SIZE(loop->ret); - return loop; - } - - loop->it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)aar); - if (loop->it == NULL) return NULL; - - if (loop->meth == ONE_EL_REDUCELOOP) { - loop->size = loop->it->size; - return loop; - } - - /* Fix iterator to loop over correct dimension */ - /* Set size in axis dimension to 1 */ - - loop->it->contiguous = 0; - loop->it->size /= (loop->it->dims_m1[axis]+1); - loop->it->dims_m1[axis] = 0; - loop->it->backstrides[axis] = 0; - - - loop->size = loop->it->size; - - if (operation == UFUNC_REDUCE) { - loop->steps[0] = 0; - } - else { - loop->rit = (PyArrayIterObject *) \ - PyArray_IterNew((PyObject *)(loop->ret)); - if (loop->rit == NULL) return NULL; - - /* Fix iterator to loop over correct dimension */ - /* Set size in axis dimension to 1 */ - - loop->rit->contiguous = 0; - loop->rit->size /= (loop->rit->dims_m1[axis]+1); - loop->rit->dims_m1[axis] = 0; - loop->rit->backstrides[axis] = 0; - - if (operation == UFUNC_ACCUMULATE) - loop->steps[0] = loop->ret->strides[axis]; - else - loop->steps[0] = 0; - } - loop->steps[2] = loop->steps[0]; - loop->bufptr[2] = loop->bufptr[0] + loop->steps[2]; - - - if (loop->meth == BUFFER_UFUNCLOOP) { - int _size; - loop->steps[1] = loop->outsize; - if (otype != aar->descr->type_num) { - _size=loop->bufsize*(loop->outsize + \ - aar->descr->elsize); - loop->buffer = PyDataMem_NEW(_size); - if (loop->buffer == NULL) goto fail; - if (loop->obj) memset(loop->buffer, 0, _size); - loop->castbuf = loop->buffer + \ - loop->bufsize*aar->descr->elsize; - loop->bufptr[1] = loop->castbuf; - loop->cast = PyArray_GetCastFunc(aar->descr, otype); - if (loop->cast == NULL) goto fail; - } - else { - _size = loop->bufsize * loop->outsize; - loop->buffer = PyDataMem_NEW(_size); - if (loop->buffer == NULL) goto fail; - if (loop->obj) memset(loop->buffer, 0, _size); - loop->bufptr[1] = loop->buffer; - } - } - - - PyUFunc_clearfperr(); - return loop; - - fail: - ufuncreduce_dealloc(loop); - return NULL; -} - - -/* We have two basic kinds of loops */ -/* One is used when arr is not-swapped and aligned and output type - is the same as input type. - and another using buffers when one of these is not satisfied. - - Zero-length and one-length axes-to-be-reduced are handled separately. -*/ - -static PyObject * -PyUFunc_Reduce(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, - int axis, int otype) -{ - PyArrayObject *ret=NULL; - PyUFuncReduceObject *loop; - intp i, n; - char *dptr; - NPY_BEGIN_THREADS_DEF - - /* Construct loop object */ - loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_REDUCE, 0, - "reduce"); - if (!loop) return NULL; - - NPY_LOOP_BEGIN_THREADS - switch(loop->meth) { - case ZERO_EL_REDUCELOOP: - /* fprintf(stderr, "ZERO..%d\n", loop->size); */ - for(i=0; isize; i++) { - if (loop->obj) Py_INCREF(*((PyObject **)loop->idptr)); - memmove(loop->bufptr[0], loop->idptr, loop->outsize); - loop->bufptr[0] += loop->outsize; - } - break; - case ONE_EL_REDUCELOOP: - /*fprintf(stderr, "ONEDIM..%d\n", loop->size); */ - while(loop->index < loop->size) { - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - PyArray_ITER_NEXT(loop->it); - loop->bufptr[0] += loop->outsize; - loop->index++; - } - break; - case NOBUFFER_UFUNCLOOP: - /*fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { - /* Copy first element to output */ - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - /* Adjust input pointer */ - loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; - loop->function((char **)loop->bufptr, - &(loop->N), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - - PyArray_ITER_NEXT(loop->it) - loop->bufptr[0] += loop->outsize; - loop->bufptr[2] = loop->bufptr[0]; - loop->index++; - } - break; - case BUFFER_UFUNCLOOP: - /* use buffer for arr */ - /* - For each row to reduce - 1. copy first item over to output (casting if necessary) - 2. Fill inner buffer - 3. When buffer is filled or end of row - a. Cast input buffers if needed - b. Call inner function. - 4. Repeat 2 until row is done. - */ - /* fprintf(stderr, "BUFFERED..%d %d\n", loop->size, - loop->swap); */ - while(loop->index < loop->size) { - loop->inptr = loop->it->dataptr; - /* Copy (cast) First term over to output */ - if (loop->cast) { - /* A little tricky because we need to - cast it first */ - arr->descr->f->copyswap(loop->buffer, - loop->inptr, - loop->swap, - NULL); - loop->cast(loop->buffer, loop->castbuf, - 1, NULL, NULL); - if (loop->obj) - Py_INCREF(*((PyObject **)loop->castbuf)); - memcpy(loop->bufptr[0], loop->castbuf, - loop->outsize); - } - else { /* Simple copy */ - arr->descr->f->copyswap(loop->bufptr[0], - loop->inptr, - loop->swap, NULL); - } - loop->inptr += loop->instrides; - n = 1; - while(n < loop->N) { - /* Copy up to loop->bufsize elements to - buffer */ - dptr = loop->buffer; - for (i=0; ibufsize; i++, n++) { - if (n == loop->N) break; - arr->descr->f->copyswap(dptr, - loop->inptr, - loop->swap, - NULL); - loop->inptr += loop->instrides; - dptr += loop->insize; - } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - i, NULL, NULL); - loop->function((char **)loop->bufptr, - &i, - loop->steps, loop->funcdata); - loop->bufptr[0] += loop->steps[0]*i; - loop->bufptr[2] += loop->steps[2]*i; - UFUNC_CHECK_ERROR(loop); - } - PyArray_ITER_NEXT(loop->it); - loop->bufptr[0] += loop->outsize; - loop->bufptr[2] = loop->bufptr[0]; - loop->index++; - } - } - - NPY_LOOP_END_THREADS - - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; - Py_INCREF(ret); - ufuncreduce_dealloc(loop); - return (PyObject *)ret; - - fail: - NPY_LOOP_END_THREADS - - if (loop) ufuncreduce_dealloc(loop); - return NULL; -} - - -static PyObject * -PyUFunc_Accumulate(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, - int axis, int otype) -{ - PyArrayObject *ret=NULL; - PyUFuncReduceObject *loop; - intp i, n; - char *dptr; - NPY_BEGIN_THREADS_DEF - - /* Construct loop object */ - loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_ACCUMULATE, 0, - "accumulate"); - if (!loop) return NULL; - - NPY_LOOP_BEGIN_THREADS - switch(loop->meth) { - case ZERO_EL_REDUCELOOP: /* Accumulate */ - /* fprintf(stderr, "ZERO..%d\n", loop->size); */ - for(i=0; isize; i++) { - if (loop->obj) - Py_INCREF(*((PyObject **)loop->idptr)); - memcpy(loop->bufptr[0], loop->idptr, loop->outsize); - loop->bufptr[0] += loop->outsize; - } - break; - case ONE_EL_REDUCELOOP: /* Accumulate */ - /* fprintf(stderr, "ONEDIM..%d\n", loop->size); */ - while(loop->index < loop->size) { - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memcpy(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - PyArray_ITER_NEXT(loop->it); - loop->bufptr[0] += loop->outsize; - loop->index++; - } - break; - case NOBUFFER_UFUNCLOOP: /* Accumulate */ - /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { - /* Copy first element to output */ - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memcpy(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - /* Adjust input pointer */ - loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; - loop->function((char **)loop->bufptr, - &(loop->N), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - - PyArray_ITER_NEXT(loop->it); - PyArray_ITER_NEXT(loop->rit); - loop->bufptr[0] = loop->rit->dataptr; - loop->bufptr[2] = loop->bufptr[0] + loop->steps[0]; - loop->index++; - } - break; - case BUFFER_UFUNCLOOP: /* Accumulate */ - /* use buffer for arr */ - /* - For each row to reduce - 1. copy identity over to output (casting if necessary) - 2. Fill inner buffer - 3. When buffer is filled or end of row - a. Cast input buffers if needed - b. Call inner function. - 4. Repeat 2 until row is done. - */ - /* fprintf(stderr, "BUFFERED..%d %p\n", loop->size, - loop->cast); */ - while(loop->index < loop->size) { - loop->inptr = loop->it->dataptr; - /* Copy (cast) First term over to output */ - if (loop->cast) { - /* A little tricky because we need to - cast it first */ - arr->descr->f->copyswap(loop->buffer, - loop->inptr, - loop->swap, - NULL); - loop->cast(loop->buffer, loop->castbuf, - 1, NULL, NULL); - if (loop->obj) - Py_INCREF(*((PyObject **)loop->castbuf)); - memcpy(loop->bufptr[0], loop->castbuf, - loop->outsize); - } - else { /* Simple copy */ - arr->descr->f->copyswap(loop->bufptr[0], - loop->inptr, - loop->swap, - NULL); - } - loop->inptr += loop->instrides; - n = 1; - while(n < loop->N) { - /* Copy up to loop->bufsize elements to - buffer */ - dptr = loop->buffer; - for (i=0; ibufsize; i++, n++) { - if (n == loop->N) break; - arr->descr->f->copyswap(dptr, - loop->inptr, - loop->swap, - NULL); - loop->inptr += loop->instrides; - dptr += loop->insize; - } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - i, NULL, NULL); - loop->function((char **)loop->bufptr, - &i, - loop->steps, loop->funcdata); - loop->bufptr[0] += loop->steps[0]*i; - loop->bufptr[2] += loop->steps[2]*i; - UFUNC_CHECK_ERROR(loop); - } - PyArray_ITER_NEXT(loop->it); - PyArray_ITER_NEXT(loop->rit); - loop->bufptr[0] = loop->rit->dataptr; - loop->bufptr[2] = loop->bufptr[0] + loop->steps[0]; - loop->index++; - } - } - - NPY_LOOP_END_THREADS - - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; - Py_INCREF(ret); - ufuncreduce_dealloc(loop); - return (PyObject *)ret; - - fail: - NPY_LOOP_END_THREADS - - if (loop) ufuncreduce_dealloc(loop); - return NULL; -} - -/* Reduceat performs a reduce over an axis using the indices as a guide - - op.reduceat(array,indices) computes - op.reduce(array[indices[i]:indices[i+1]] - for i=0..end with an implicit indices[i+1]=len(array) - assumed when i=end-1 - - if indices[i+1] <= indices[i]+1 - then the result is array[indices[i]] for that value - - op.accumulate(array) is the same as - op.reduceat(array,indices)[::2] - where indices is range(len(array)-1) with a zero placed in every other sample - indices = zeros(len(array)*2-1) - indices[1::2] = range(1,len(array)) - - output shape is based on the size of indices -*/ - -static PyObject * -PyUFunc_Reduceat(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *ind, - PyArrayObject *out, int axis, int otype) -{ - PyArrayObject *ret; - PyUFuncReduceObject *loop; - intp *ptr=(intp *)ind->data; - intp nn=ind->dimensions[0]; - intp mm=arr->dimensions[axis]-1; - intp n, i, j; - char *dptr; - NPY_BEGIN_THREADS_DEF - - /* Check for out-of-bounds values in indices array */ - for (i=0; i mm)) { - PyErr_Format(PyExc_IndexError, - "index out-of-bounds (0, %d)", (int) mm); - return NULL; - } - ptr++; - } - - ptr = (intp *)ind->data; - /* Construct loop object */ - loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_REDUCEAT, nn, - "reduceat"); - if (!loop) return NULL; - - NPY_LOOP_BEGIN_THREADS - switch(loop->meth) { - /* zero-length index -- return array immediately */ - case ZERO_EL_REDUCELOOP: - /* fprintf(stderr, "ZERO..\n"); */ - break; - /* NOBUFFER -- behaved array and same type */ - case NOBUFFER_UFUNCLOOP: /* Reduceat */ - /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { - ptr = (intp *)ind->data; - for (i=0; ibufptr[1] = loop->it->dataptr + \ - (*ptr)*loop->instrides; - if (loop->obj) - Py_INCREF(*((PyObject **)loop->bufptr[1])); - memcpy(loop->bufptr[0], loop->bufptr[1], - loop->outsize); - mm = (i==nn-1 ? arr->dimensions[axis]-*ptr : \ - *(ptr+1) - *ptr) - 1; - if (mm > 0) { - loop->bufptr[1] += loop->instrides; - loop->bufptr[2] = loop->bufptr[0]; - loop->function((char **)loop->bufptr, - &mm, loop->steps, - loop->funcdata); - UFUNC_CHECK_ERROR(loop); - } - loop->bufptr[0] += loop->ret->strides[axis]; - ptr++; - } - PyArray_ITER_NEXT(loop->it); - PyArray_ITER_NEXT(loop->rit); - loop->bufptr[0] = loop->rit->dataptr; - loop->index++; - } - break; - - /* BUFFER -- misbehaved array or different types */ - case BUFFER_UFUNCLOOP: /* Reduceat */ - /* fprintf(stderr, "BUFFERED..%d\n", loop->size); */ - while(loop->index < loop->size) { - ptr = (intp *)ind->data; - for (i=0; iobj) - Py_INCREF(*((PyObject **)loop->idptr)); - memcpy(loop->bufptr[0], loop->idptr, - loop->outsize); - n = 0; - mm = (i==nn-1 ? arr->dimensions[axis] - *ptr :\ - *(ptr+1) - *ptr); - if (mm < 1) mm = 1; - loop->inptr = loop->it->dataptr + \ - (*ptr)*loop->instrides; - while (n < mm) { - /* Copy up to loop->bufsize elements - to buffer */ - dptr = loop->buffer; - for (j=0; jbufsize; j++, n++) { - if (n == mm) break; - arr->descr->f->copyswap\ - (dptr, - loop->inptr, - loop->swap, NULL); - loop->inptr += loop->instrides; - dptr += loop->insize; - } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - j, NULL, NULL); - loop->bufptr[2] = loop->bufptr[0]; - loop->function((char **)loop->bufptr, - &j, loop->steps, - loop->funcdata); - UFUNC_CHECK_ERROR(loop); - loop->bufptr[0] += j*loop->steps[0]; - } - loop->bufptr[0] += loop->ret->strides[axis]; - ptr++; - } - PyArray_ITER_NEXT(loop->it); - PyArray_ITER_NEXT(loop->rit); - loop->bufptr[0] = loop->rit->dataptr; - loop->index++; - } - break; - } - - NPY_LOOP_END_THREADS - - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; - Py_INCREF(ret); - ufuncreduce_dealloc(loop); - return (PyObject *)ret; - - fail: - NPY_LOOP_END_THREADS - - if (loop) ufuncreduce_dealloc(loop); - return NULL; -} - - -/* This code handles reduce, reduceat, and accumulate - (accumulate and reduce are special cases of the more general reduceat - but they are handled separately for speed) -*/ - -static PyObject * -PyUFunc_GenericReduction(PyUFuncObject *self, PyObject *args, - PyObject *kwds, int operation) -{ - int axis=0; - PyArrayObject *mp, *ret = NULL; - PyObject *op, *res=NULL; - PyObject *obj_ind, *context; - PyArrayObject *indices = NULL; - PyArray_Descr *otype=NULL; - PyArrayObject *out=NULL; - static char *kwlist1[] = {"array", "axis", "dtype", "out", NULL}; - static char *kwlist2[] = {"array", "indices", "axis", "dtype", "out", NULL}; - static char *_reduce_type[] = {"reduce", "accumulate", \ - "reduceat", NULL}; - if (self == NULL) { - PyErr_SetString(PyExc_ValueError, "function not supported"); - return NULL; - } - - if (self->nin != 2) { - PyErr_Format(PyExc_ValueError, - "%s only supported for binary functions", - _reduce_type[operation]); - return NULL; - } - if (self->nout != 1) { - PyErr_Format(PyExc_ValueError, - "%s only supported for functions " \ - "returning a single value", - _reduce_type[operation]); - return NULL; - } - - if (operation == UFUNC_REDUCEAT) { - PyArray_Descr *indtype; - indtype = PyArray_DescrFromType(PyArray_INTP); - if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO|iO&O&", kwlist2, - &op, &obj_ind, &axis, - PyArray_DescrConverter2, - &otype, - PyArray_OutputConverter, - &out)) return NULL; - indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype, - 1, 1, CARRAY, NULL); - if (indices == NULL) return NULL; - } - else { - if(!PyArg_ParseTupleAndKeywords(args, kwds, "O|iO&O&", kwlist1, - &op, &axis, - PyArray_DescrConverter2, - &otype, - PyArray_OutputConverter, - &out)) return NULL; - } - - /* Ensure input is an array */ - if (!PyArray_Check(op) && !PyArray_IsScalar(op, Generic)) { - context = Py_BuildValue("O(O)i", self, op, 0); - } - else { - context = NULL; - } - mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, context); - Py_XDECREF(context); - if (mp == NULL) return NULL; - - /* Check to see if input is zero-dimensional */ - if (mp->nd == 0) { - PyErr_Format(PyExc_TypeError, "cannot %s on a scalar", - _reduce_type[operation]); - Py_DECREF(mp); - return NULL; - } - - /* Check to see that type (and otype) is not FLEXIBLE */ - if (PyArray_ISFLEXIBLE(mp) || - (otype && PyTypeNum_ISFLEXIBLE(otype->type_num))) { - PyErr_Format(PyExc_TypeError, - "cannot perform %s with flexible type", - _reduce_type[operation]); - Py_DECREF(mp); - return NULL; - } - - if (axis < 0) axis += mp->nd; - if (axis < 0 || axis >= mp->nd) { - PyErr_SetString(PyExc_ValueError, "axis not in array"); - Py_DECREF(mp); - return NULL; - } - - /* If out is specified it determines otype unless otype - already specified. - */ - if (otype == NULL && out != NULL) { - otype = out->descr; - Py_INCREF(otype); - } - - if (otype == NULL) { - /* For integer types --- make sure at - least a long is used for add and multiply - reduction --- to avoid overflow */ - int typenum = PyArray_TYPE(mp); - if ((typenum < NPY_FLOAT) && \ - ((strcmp(self->name,"add")==0) || \ - (strcmp(self->name,"multiply")==0))) { - if (PyTypeNum_ISBOOL(typenum)) - typenum = PyArray_LONG; - else if (mp->descr->elsize < sizeof(long)) { - if (PyTypeNum_ISUNSIGNED(typenum)) - typenum = PyArray_ULONG; - else - typenum = PyArray_LONG; - } - } - otype = PyArray_DescrFromType(typenum); - } - - - switch(operation) { - case UFUNC_REDUCE: - ret = (PyArrayObject *)PyUFunc_Reduce(self, mp, out, axis, - otype->type_num); - break; - case UFUNC_ACCUMULATE: - ret = (PyArrayObject *)PyUFunc_Accumulate(self, mp, out, axis, - otype->type_num); - break; - case UFUNC_REDUCEAT: - ret = (PyArrayObject *)PyUFunc_Reduceat(self, mp, indices, out, - axis, otype->type_num); - Py_DECREF(indices); - break; - } - Py_DECREF(mp); - Py_DECREF(otype); - if (ret==NULL) return NULL; - if (op->ob_type != ret->ob_type) { - res = PyObject_CallMethod(op, "__array_wrap__", "O", ret); - if (res == NULL) PyErr_Clear(); - else if (res == Py_None) Py_DECREF(res); - else { - Py_DECREF(ret); - return res; - } - } - return PyArray_Return(ret); - -} - -/* This function analyzes the input arguments - and determines an appropriate __array_wrap__ function to call - for the outputs. - - If an output argument is provided, then it is wrapped - with its own __array_wrap__ not with the one determined by - the input arguments. - - if the provided output argument is already an array, - the wrapping function is None (which means no wrapping will - be done --- not even PyArray_Return). - - A NULL is placed in output_wrap for outputs that - should just have PyArray_Return called. -*/ - -static void -_find_array_wrap(PyObject *args, PyObject **output_wrap, int nin, int nout) -{ - Py_ssize_t nargs; - int i; - int np = 0; - double priority, maxpriority; - PyObject *with_wrap[NPY_MAXARGS], *wraps[NPY_MAXARGS]; - PyObject *obj, *wrap = NULL; - - nargs = PyTuple_GET_SIZE(args); - for (i=0; i= 2) { - wrap = wraps[0]; - maxpriority = PyArray_GetPriority(with_wrap[0], - PyArray_SUBTYPE_PRIORITY); - for (i = 1; i < np; ++i) { - priority = \ - PyArray_GetPriority(with_wrap[i], - PyArray_SUBTYPE_PRIORITY); - if (priority > maxpriority) { - maxpriority = priority; - Py_DECREF(wrap); - wrap = wraps[i]; - } else { - Py_DECREF(wraps[i]); - } - } - } - - /* Here wrap is the wrapping function determined from the - input arrays (could be NULL). - - For all the output arrays decide what to do. - - 1) Use the wrap function determined from the input arrays - This is the default if the output array is not - passed in. - - 2) Use the __array_wrap__ method of the output object - passed in. -- this is special cased for - exact ndarray so that no PyArray_Return is - done in that case. - */ - - for (i=0; inargs; i++) mps[i] = NULL; - - errval = PyUFunc_GenericFunction(self, args, kwds, mps); - if (errval < 0) { - for(i=0; inargs; i++) { - PyArray_XDECREF_ERR(mps[i]); - } - if (errval == -1) - return NULL; - else { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - } - - for(i=0; inin; i++) Py_DECREF(mps[i]); - - - /* Use __array_wrap__ on all outputs - if present on one of the input arguments. - If present for multiple inputs: - use __array_wrap__ of input object with largest - __array_priority__ (default = 0.0) - */ - - /* Exception: we should not wrap outputs for items already - passed in as output-arguments. These items should either - be left unwrapped or wrapped by calling their own __array_wrap__ - routine. - - For each output argument, wrap will be either - NULL --- call PyArray_Return() -- default if no output arguments given - None --- array-object passed in don't call PyArray_Return - method --- the __array_wrap__ method to call. - */ - _find_array_wrap(args, wraparr, self->nin, self->nout); - - /* wrap outputs */ - for (i=0; inout; i++) { - int j=self->nin+i; - PyObject *wrap; - /* check to see if any UPDATEIFCOPY flags are set - which meant that a temporary output was generated - */ - if (mps[j]->flags & UPDATEIFCOPY) { - PyObject *old = mps[j]->base; - Py_INCREF(old); /* we want to hang on to this */ - Py_DECREF(mps[j]); /* should trigger the copy - back into old */ - mps[j] = (PyArrayObject *)old; - } - wrap = wraparr[i]; - if (wrap != NULL) { - if (wrap == Py_None) { - Py_DECREF(wrap); - retobj[i] = (PyObject *)mps[j]; - continue; - } - res = PyObject_CallFunction(wrap, "O(OOi)", - mps[j], self, args, i); - if (res == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - res = PyObject_CallFunctionObjArgs(wrap, - mps[j], - NULL); - } - Py_DECREF(wrap); - if (res == NULL) goto fail; - else if (res == Py_None) Py_DECREF(res); - else { - Py_DECREF(mps[j]); - retobj[i] = res; - continue; - } - } - /* default behavior */ - retobj[i] = PyArray_Return(mps[j]); - } - - if (self->nout == 1) { - return retobj[0]; - } else { - ret = (PyTupleObject *)PyTuple_New(self->nout); - for(i=0; inout; i++) { - PyTuple_SET_ITEM(ret, i, retobj[i]); - } - return (PyObject *)ret; - } - fail: - for(i=self->nin; inargs; i++) Py_XDECREF(mps[i]); - return NULL; -} - -static PyObject * -ufunc_geterr(PyObject *dummy, PyObject *args) -{ - PyObject *thedict; - PyObject *res; - - if (!PyArg_ParseTuple(args, "")) return NULL; - - if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = PyString_InternFromString(UFUNC_PYVALS_NAME); - } - thedict = PyThreadState_GetDict(); - if (thedict == NULL) { - thedict = PyEval_GetBuiltins(); - } - res = PyDict_GetItem(thedict, PyUFunc_PYVALS_NAME); - if (res != NULL) { - Py_INCREF(res); - return res; - } - /* Construct list of defaults */ - res = PyList_New(3); - if (res == NULL) return NULL; - PyList_SET_ITEM(res, 0, PyInt_FromLong(PyArray_BUFSIZE)); - PyList_SET_ITEM(res, 1, PyInt_FromLong(UFUNC_ERR_DEFAULT)); - PyList_SET_ITEM(res, 2, Py_None); Py_INCREF(Py_None); - return res; -} - -#if USE_USE_DEFAULTS==1 -/* - This is a strategy to buy a little speed up and avoid the dictionary - look-up in the default case. It should work in the presence of - threads. If it is deemed too complicated or it doesn't actually work - it could be taken out. -*/ -static int -ufunc_update_use_defaults(void) -{ - PyObject *errobj=NULL; - int errmask, bufsize; - int res; - - PyUFunc_NUM_NODEFAULTS += 1; - res = PyUFunc_GetPyValues("test", &bufsize, &errmask, - &errobj); - PyUFunc_NUM_NODEFAULTS -= 1; - - if (res < 0) {Py_XDECREF(errobj); return -1;} - - if ((errmask != UFUNC_ERR_DEFAULT) || \ - (bufsize != PyArray_BUFSIZE) || \ - (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { - PyUFunc_NUM_NODEFAULTS += 1; - } - else if (PyUFunc_NUM_NODEFAULTS > 0) { - PyUFunc_NUM_NODEFAULTS -= 1; - } - Py_XDECREF(errobj); - return 0; -} -#endif - -static PyObject * -ufunc_seterr(PyObject *dummy, PyObject *args) -{ - PyObject *thedict; - int res; - PyObject *val; - static char *msg = "Error object must be a list of length 3"; - - if (!PyArg_ParseTuple(args, "O", &val)) return NULL; - - if (!PyList_CheckExact(val) || PyList_GET_SIZE(val) != 3) { - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = PyString_InternFromString(UFUNC_PYVALS_NAME); - } - thedict = PyThreadState_GetDict(); - if (thedict == NULL) { - thedict = PyEval_GetBuiltins(); - } - res = PyDict_SetItem(thedict, PyUFunc_PYVALS_NAME, val); - if (res < 0) return NULL; -#if USE_USE_DEFAULTS==1 - if (ufunc_update_use_defaults() < 0) return NULL; -#endif - Py_INCREF(Py_None); - return Py_None; -} - - - -static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om}; - -static char -doc_frompyfunc[] = "frompyfunc(func, nin, nout) take an arbitrary python function that takes nin objects as input and returns nout objects and return a universal function (ufunc). This ufunc always returns PyObject arrays"; - -static PyObject * -ufunc_frompyfunc(PyObject *dummy, PyObject *args, PyObject *kwds) { - /* Keywords are ignored for now */ - - PyObject *function, *pyname=NULL; - int nin, nout, i; - PyUFunc_PyFuncData *fdata; - PyUFuncObject *self; - char *fname, *str; - Py_ssize_t fname_len=-1; - int offset[2]; - - if (!PyArg_ParseTuple(args, "Oii", &function, &nin, &nout)) return NULL; - - if (!PyCallable_Check(function)) { - PyErr_SetString(PyExc_TypeError, "function must be callable"); - return NULL; - } - - self = _pya_malloc(sizeof(PyUFuncObject)); - if (self == NULL) return NULL; - PyObject_Init((PyObject *)self, &PyUFunc_Type); - - self->userloops = NULL; - self->nin = nin; - self->nout = nout; - self->nargs = nin+nout; - self->identity = PyUFunc_None; - self->functions = pyfunc_functions; - - self->ntypes = 1; - self->check_return = 0; - - pyname = PyObject_GetAttrString(function, "__name__"); - if (pyname) - (void) PyString_AsStringAndSize(pyname, &fname, &fname_len); - - if (PyErr_Occurred()) { - fname = "?"; - fname_len = 1; - PyErr_Clear(); - } - Py_XDECREF(pyname); - - - - /* self->ptr holds a pointer for enough memory for - self->data[0] (fdata) - self->data - self->name - self->types - - To be safest, all of these need their memory aligned on void * pointers - Therefore, we may need to allocate extra space. - */ - offset[0] = sizeof(PyUFunc_PyFuncData); - i = (sizeof(PyUFunc_PyFuncData) % sizeof(void *)); - if (i) offset[0] += (sizeof(void *) - i); - offset[1] = self->nargs; - i = (self->nargs % sizeof(void *)); - if (i) offset[1] += (sizeof(void *)-i); - - self->ptr = _pya_malloc(offset[0] + offset[1] + sizeof(void *) + \ - (fname_len+14)); - - if (self->ptr == NULL) return PyErr_NoMemory(); - Py_INCREF(function); - self->obj = function; - fdata = (PyUFunc_PyFuncData *)(self->ptr); - fdata->nin = nin; - fdata->nout = nout; - fdata->callable = function; - - self->data = (void **)(((char *)self->ptr) + offset[0]); - self->data[0] = (void *)fdata; - - self->types = (char *)self->data + sizeof(void *); - for (i=0; inargs; i++) self->types[i] = PyArray_OBJECT; - - str = self->types + offset[1]; - memcpy(str, fname, fname_len); - memcpy(str+fname_len, " (vectorized)", 14); - - self->name = str; - - /* Do a better job someday */ - self->doc = "dynamic ufunc based on a python function"; - - - return (PyObject *)self; -} - -/*UFUNC_API*/ -static int -PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func, - PyUFuncGenericFunction newfunc, - int *signature, - PyUFuncGenericFunction *oldfunc) -{ - int i,j; - int res = -1; - /* Find the location of the matching signature */ - for (i=0; intypes; i++) { - for (j=0; jnargs; j++) { - if (signature[j] != func->types[i*func->nargs+j]) - break; - } - if (j < func->nargs) continue; - - if (oldfunc != NULL) { - *oldfunc = func->functions[i]; - } - func->functions[i] = newfunc; - res = 0; - break; - } - return res; -} - -/*UFUNC_API*/ -static PyObject * -PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void **data, - char *types, int ntypes, - int nin, int nout, int identity, - char *name, char *doc, int check_return) -{ - PyUFuncObject *self; - - self = _pya_malloc(sizeof(PyUFuncObject)); - if (self == NULL) return NULL; - PyObject_Init((PyObject *)self, &PyUFunc_Type); - - self->nin = nin; - self->nout = nout; - self->nargs = nin+nout; - self->identity = identity; - - self->functions = func; - self->data = data; - self->types = types; - self->ntypes = ntypes; - self->check_return = check_return; - self->ptr = NULL; - self->obj = NULL; - self->userloops=NULL; - - if (name == NULL) self->name = "?"; - else self->name = name; - - if (doc == NULL) self->doc = "NULL"; - else self->doc = doc; - - return (PyObject *)self; -} - -/* This is the first-part of the CObject structure. - - I don't think this will change, but if it should, then - this needs to be fixed. The exposed C-API was insufficient - because I needed to replace the pointer and it wouldn't - let me with a destructor set (even though it works fine - with the destructor). -*/ - -typedef struct { - PyObject_HEAD - void *c_obj; -} _simple_cobj; - -#define _SETCPTR(cobj, val) ((_simple_cobj *)(cobj))->c_obj = (val) - -/* return 1 if arg1 > arg2, 0 if arg1 == arg2, and -1 if arg1 < arg2 - */ -static int -cmp_arg_types(int *arg1, int *arg2, int n) -{ - while (n--) { - if (PyArray_EquivTypenums(*arg1, *arg2)) continue; - if (PyArray_CanCastSafely(*arg1, *arg2)) - return -1; - return 1; - } - return 0; -} - -/* This frees the linked-list structure - when the CObject is destroyed (removed - from the internal dictionary) -*/ -static void -_loop1d_list_free(void *ptr) -{ - PyUFunc_Loop1d *funcdata; - if (ptr == NULL) return; - funcdata = (PyUFunc_Loop1d *)ptr; - if (funcdata == NULL) return; - _pya_free(funcdata->arg_types); - _loop1d_list_free(funcdata->next); - _pya_free(funcdata); -} - - -/*UFUNC_API*/ -static int -PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, - int usertype, - PyUFuncGenericFunction function, - int *arg_types, - void *data) -{ - PyArray_Descr *descr; - PyUFunc_Loop1d *funcdata; - PyObject *key, *cobj; - int i; - int *newtypes=NULL; - - descr=PyArray_DescrFromType(usertype); - if ((usertype < PyArray_USERDEF) || (descr==NULL)) { - PyErr_SetString(PyExc_TypeError, - "unknown user-defined type"); - return -1; - } - Py_DECREF(descr); - - if (ufunc->userloops == NULL) { - ufunc->userloops = PyDict_New(); - } - key = PyInt_FromLong((long) usertype); - if (key == NULL) return -1; - funcdata = _pya_malloc(sizeof(PyUFunc_Loop1d)); - if (funcdata == NULL) goto fail; - newtypes = _pya_malloc(sizeof(int)*ufunc->nargs); - if (newtypes == NULL) goto fail; - if (arg_types != NULL) { - for (i=0; inargs; i++) { - newtypes[i] = arg_types[i]; - } - } - else { - for (i=0; inargs; i++) { - newtypes[i] = usertype; - } - } - - funcdata->func = function; - funcdata->arg_types = newtypes; - funcdata->data = data; - funcdata->next = NULL; - - /* Get entry for this user-defined type*/ - cobj = PyDict_GetItem(ufunc->userloops, key); - - /* If it's not there, then make one and return. */ - if (cobj == NULL) { - cobj = PyCObject_FromVoidPtr((void *)funcdata, - _loop1d_list_free); - if (cobj == NULL) goto fail; - PyDict_SetItem(ufunc->userloops, key, cobj); - Py_DECREF(cobj); - Py_DECREF(key); - return 0; - } - else { - PyUFunc_Loop1d *current, *prev=NULL; - int cmp=1; - /* There is already at least 1 loop. Place this one in - lexicographic order. If the next one signature - is exactly like this one, then just replace. - Otherwise insert. - */ - current = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(cobj); - while (current != NULL) { - cmp = cmp_arg_types(current->arg_types, newtypes, - ufunc->nargs); - if (cmp >= 0) break; - prev = current; - current = current->next; - } - if (cmp == 0) { /* just replace it with new function */ - current->func = function; - current->data = data; - _pya_free(newtypes); - _pya_free(funcdata); - } - else { /* insert it before the current one - by hacking the internals of cobject to - replace the function pointer --- - can't use CObject API because destructor is set. - */ - funcdata->next = current; - if (prev == NULL) { /* place this at front */ - _SETCPTR(cobj, funcdata); - } - else { - prev->next = funcdata; - } - } - } - Py_DECREF(key); - return 0; - - - fail: - Py_DECREF(key); - _pya_free(funcdata); - _pya_free(newtypes); - if (!PyErr_Occurred()) PyErr_NoMemory(); - return -1; -} - -#undef _SETCPTR - - -static void -ufunc_dealloc(PyUFuncObject *self) -{ - if (self->ptr) _pya_free(self->ptr); - Py_XDECREF(self->userloops); - Py_XDECREF(self->obj); - _pya_free(self); -} - -static PyObject * -ufunc_repr(PyUFuncObject *self) -{ - char buf[100]; - - sprintf(buf, "", self->name); - - return PyString_FromString(buf); -} - - -/* -------------------------------------------------------- */ - -/* op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b) - where a has b.ndim NewAxis terms appended. - - The result has dimensions a.ndim + b.ndim -*/ - -static PyObject * -ufunc_outer(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - int i; - PyObject *ret; - PyArrayObject *ap1=NULL, *ap2=NULL, *ap_new=NULL; - PyObject *new_args, *tmp; - PyObject *shape1, *shape2, *newshape; - - if(self->nin != 2) { - PyErr_SetString(PyExc_ValueError, - "outer product only supported "\ - "for binary functions"); - return NULL; - } - - if (PySequence_Length(args) != 2) { - PyErr_SetString(PyExc_TypeError, - "exactly two arguments expected"); - return NULL; - } - - tmp = PySequence_GetItem(args, 0); - if (tmp == NULL) return NULL; - ap1 = (PyArrayObject *) \ - PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); - Py_DECREF(tmp); - if (ap1 == NULL) return NULL; - - tmp = PySequence_GetItem(args, 1); - if (tmp == NULL) return NULL; - ap2 = (PyArrayObject *)PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); - Py_DECREF(tmp); - if (ap2 == NULL) {Py_DECREF(ap1); return NULL;} - - /* Construct new shape tuple */ - shape1 = PyTuple_New(ap1->nd); - if (shape1 == NULL) goto fail; - for (i=0; ind; i++) - PyTuple_SET_ITEM(shape1, i, - PyLong_FromLongLong((longlong)ap1-> \ - dimensions[i])); - - shape2 = PyTuple_New(ap2->nd); - for (i=0; ind; i++) - PyTuple_SET_ITEM(shape2, i, PyInt_FromLong((long) 1)); - if (shape2 == NULL) {Py_DECREF(shape1); goto fail;} - newshape = PyNumber_Add(shape1, shape2); - Py_DECREF(shape1); - Py_DECREF(shape2); - if (newshape == NULL) goto fail; - - ap_new = (PyArrayObject *)PyArray_Reshape(ap1, newshape); - Py_DECREF(newshape); - if (ap_new == NULL) goto fail; - - new_args = Py_BuildValue("(OO)", ap_new, ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - Py_DECREF(ap_new); - ret = ufunc_generic_call(self, new_args, kwds); - Py_DECREF(new_args); - return ret; - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ap_new); - return NULL; -} - - -static PyObject * -ufunc_reduce(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCE); -} - -static PyObject * -ufunc_accumulate(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_ACCUMULATE); -} - -static PyObject * -ufunc_reduceat(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCEAT); -} - - -static struct PyMethodDef ufunc_methods[] = { - {"reduce", (PyCFunction)ufunc_reduce, METH_VARARGS | METH_KEYWORDS}, - {"accumulate", (PyCFunction)ufunc_accumulate, - METH_VARARGS | METH_KEYWORDS}, - {"reduceat", (PyCFunction)ufunc_reduceat, - METH_VARARGS | METH_KEYWORDS}, - {"outer", (PyCFunction)ufunc_outer, METH_VARARGS | METH_KEYWORDS}, - {NULL, NULL} /* sentinel */ -}; - - - -/* construct the string - y1,y2,...,yn -*/ -static PyObject * -_makeargs(int num, char *ltr) -{ - PyObject *str; - int i; - switch (num) { - case 0: - return PyString_FromString(""); - case 1: - return PyString_FromString(ltr); - } - str = PyString_FromFormat("%s1,%s2", ltr, ltr); - for(i = 3; i <= num; ++i) { - PyString_ConcatAndDel(&str, PyString_FromFormat(",%s%d", ltr, i)); - } - return str; -} - -static char -_typecharfromnum(int num) { - PyArray_Descr *descr; - char ret; - - descr = PyArray_DescrFromType(num); - ret = descr->type; - Py_DECREF(descr); - return ret; -} - -static PyObject * -ufunc_get_doc(PyUFuncObject *self) -{ - /* Put docstring first or FindMethod finds it...*/ - /* could so some introspection on name and nin + nout */ - /* to automate the first part of it */ - /* the doc string shouldn't need the calling convention */ - /* construct - y1,y2,,... = name(x1,x2,...) __doc__ - */ - PyObject *outargs, *inargs, *doc; - outargs = _makeargs(self->nout, "y"); - inargs = _makeargs(self->nin, "x"); - doc = PyString_FromFormat("%s = %s(%s) %s", - PyString_AS_STRING(outargs), - self->name, - PyString_AS_STRING(inargs), - self->doc); - Py_DECREF(outargs); - Py_DECREF(inargs); - return doc; -} - -static PyObject * -ufunc_get_nin(PyUFuncObject *self) -{ - return PyInt_FromLong(self->nin); -} - -static PyObject * -ufunc_get_nout(PyUFuncObject *self) -{ - return PyInt_FromLong(self->nout); -} - -static PyObject * -ufunc_get_nargs(PyUFuncObject *self) -{ - return PyInt_FromLong(self->nargs); -} - -static PyObject * -ufunc_get_ntypes(PyUFuncObject *self) -{ - return PyInt_FromLong(self->ntypes); -} - -static PyObject * -ufunc_get_types(PyUFuncObject *self) -{ - /* return a list with types grouped - input->output */ - PyObject *list; - PyObject *str; - int k, j, n, nt=self->ntypes; - int ni = self->nin; - int no = self->nout; - char *t; - list = PyList_New(nt); - if (list == NULL) return NULL; - t = _pya_malloc(no+ni+2); - n = 0; - for (k=0; ktypes[n]); - n++; - } - t[ni] = '-'; - t[ni+1] = '>'; - for (j=0; jtypes[n]); - n++; - } - str = PyString_FromStringAndSize(t, no+ni+2); - PyList_SET_ITEM(list, k, str); - } - _pya_free(t); - return list; -} - -static PyObject * -ufunc_get_name(PyUFuncObject *self) -{ - return PyString_FromString(self->name); -} - -static PyObject * -ufunc_get_identity(PyUFuncObject *self) -{ - switch(self->identity) { - case PyUFunc_One: - return PyInt_FromLong(1); - case PyUFunc_Zero: - return PyInt_FromLong(0); - } - return Py_None; -} - - -#undef _typecharfromnum - -static char Ufunctype__doc__[] = - "Optimized functions make it possible to implement arithmetic "\ - "with arrays efficiently"; - -static PyGetSetDef ufunc_getset[] = { - {"__doc__", (getter)ufunc_get_doc, NULL, "documentation string"}, - {"nin", (getter)ufunc_get_nin, NULL, "number of inputs"}, - {"nout", (getter)ufunc_get_nout, NULL, "number of outputs"}, - {"nargs", (getter)ufunc_get_nargs, NULL, "number of arguments"}, - {"ntypes", (getter)ufunc_get_ntypes, NULL, "number of types"}, - {"types", (getter)ufunc_get_types, NULL, "return a list with types grouped input->output"}, - {"__name__", (getter)ufunc_get_name, NULL, "function name"}, - {"identity", (getter)ufunc_get_identity, NULL, "identity value"}, - {NULL, NULL, NULL, NULL}, /* Sentinel */ -}; - -static PyTypeObject PyUFunc_Type = { - PyObject_HEAD_INIT(0) - 0, /*ob_size*/ - "numpy.ufunc", /*tp_name*/ - sizeof(PyUFuncObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - /* methods */ - (destructor)ufunc_dealloc, /*tp_dealloc*/ - (printfunc)0, /*tp_print*/ - (getattrfunc)0, /*tp_getattr*/ - (setattrfunc)0, /*tp_setattr*/ - (cmpfunc)0, /*tp_compare*/ - (reprfunc)ufunc_repr, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - (hashfunc)0, /*tp_hash*/ - (ternaryfunc)ufunc_generic_call, /*tp_call*/ - (reprfunc)ufunc_repr, /*tp_str*/ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - Ufunctype__doc__, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - ufunc_methods, /* tp_methods */ - 0, /* tp_members */ - ufunc_getset, /* tp_getset */ -}; - -/* End of code for ufunc objects */ -/* -------------------------------------------------------- */ diff --git a/numpy/core/src/umathmodule.c.src b/numpy/core/src/umathmodule.c.src deleted file mode 100644 index ec531e0e5..000000000 --- a/numpy/core/src/umathmodule.c.src +++ /dev/null @@ -1,2334 +0,0 @@ -/* -*- c -*- */ - -#include "Python.h" -#include "numpy/noprefix.h" -#define _UMATHMODULE -#include "numpy/ufuncobject.h" -#include "abstract.h" -#include - -/* A whole slew of basic math functions are provided originally - by Konrad Hinsen. */ - -#if !defined(__STDC__) && !defined(_MSC_VER) -extern double fmod (double, double); -extern double frexp (double, int *); -extern double ldexp (double, int); -extern double modf (double, double *); -#endif -#ifndef M_PI -#define M_PI 3.14159265358979323846264338328 -#endif - - -#if defined(DISTUTILS_USE_SDK) -/* win32 on AMD64 build architecture */ -/* See also http://projects.scipy.org/scipy/numpy/ticket/164 */ -#ifndef HAVE_FABSF -#ifdef fabsf -#undef fabsf -#endif -static float fabsf(float x) -{ - return (float)fabs((double)(x)); -} -#endif -#ifndef HAVE_HYPOTF -static float hypotf(float x, float y) -{ - return (float)hypot((double)(x), (double)(y)); -} -#endif -#ifndef HAVE_RINTF -#ifndef HAVE_RINT -static double rint (double x); -#endif -static float rintf(float x) -{ - return (float)rint((double)(x)); -} -#endif -#ifndef HAVE_FREXPF -static float frexpf(float x, int * i) -{ - return (float)frexp((double)(x), i); -} -#endif -#ifndef HAVE_LDEXPF -static float ldexpf(float x, int i) -{ - return (float)ldexp((double)(x), i); -} -#endif -#define tanhf nc_tanhf -#endif - -#ifndef HAVE_INVERSE_HYPERBOLIC -static double acosh(double x) -{ - return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); -} - -double log1p(double); -static double asinh(double xx) -{ - double x, d; - int sign; - if (xx < 0.0) { - sign = -1; - x = -xx; - } - else { - sign = 1; - x = xx; - } - if (x > 1e8) { - d = x; - } else { - d = sqrt(x*x + 1); - } - return sign*log1p(x*(1.0 + x/(d+1))); -} - -static double atanh(double x) -{ - return 0.5*log1p(2.0*x/(1.0-x)); -} -#endif - -#if !defined(HAVE_INVERSE_HYPERBOLIC_FLOAT) -#ifdef HAVE_FLOAT_FUNCS -#ifdef log1pf -#undef log1pf -#endif -#ifdef logf -#undef logf -#endif -#ifdef sqrtf -#undef sqrtf -#endif -float log1pf(float); -#ifdef DISTUTILS_USE_SDK -DL_IMPORT(float) logf(float); -DL_IMPORT(float) sqrtf(float); -#else -/* should these be extern?: */ -float logf(float); -float sqrtf(float); -#endif -#ifdef acoshf -#undef acoshf -#endif -static float acoshf(float x) -{ - return 2*logf(sqrtf((x+1)/2)+sqrtf((x-1)/2)); -} - -#ifdef asinhf -#undef asinhf -#endif -static float asinhf(float xx) -{ - float x, d; - int sign; - if (xx < 0) { - sign = -1; - x = -xx; - } - else { - sign = 1; - x = xx; - } - if (x > 1e5) { - d = x; - } else { - d = sqrtf(x*x + 1); - } - return sign*log1pf(x*(1 + x/(d+1))); -} - -#ifdef atanhf -#undef atanhf -#endif -static float atanhf(float x) -{ - return log1pf(2*x/(1-x))/2; -} -#else -#ifdef acoshf -#undef acoshf -#endif -static float acoshf(float x) -{ - return (float)acosh((double)(x)); -} - -#ifdef asinhf -#undef asinhf -#endif -static float asinhf(float x) -{ - return (float)asinh((double)(x)); -} - -#ifdef atanhf -#undef atanhf -#endif -static float atanhf(float x) -{ - return (float)atanh((double)(x)); -} -#endif -#endif - - -#if !defined(HAVE_INVERSE_HYPERBOLIC_LONGDOUBLE) -#ifdef HAVE_LONGDOUBLE_FUNCS -#ifdef logl -#undef logl -#endif -#ifdef sqrtl -#undef sqrtl -#endif -#ifdef log1pl -#undef log1pl -#endif -longdouble logl(longdouble); -longdouble sqrtl(longdouble); -longdouble log1pl(longdouble); -#ifdef acoshl -#undef acoshl -#endif -static longdouble acoshl(longdouble x) -{ - return 2*logl(sqrtl((x+1.0)/2)+sqrtl((x-1.0)/2)); -} - -#ifdef asinhl -#undef asinhl -#endif -static longdouble asinhl(longdouble xx) -{ - longdouble x, d; - int sign; - if (xx < 0.0) { - sign = -1; - x = -xx; - } - else { - sign = 1; - x = xx; - } - if (x > 1e17) { - d = x; - } else { - d = sqrtl(x*x + 1); - } - return sign*log1pl(x*(1.0 + x/(d+1))); -} - -#ifdef atanhl -#undef atanhl -#endif -static longdouble atanhl(longdouble x) -{ - return 0.5*log1pl(2.0*x/(1.0-x)); -} - -#else - -#ifdef acoshl -#undef acoshl -#endif -static longdouble acoshl(longdouble x) -{ - return (longdouble)acosh((double)(x)); -} - -#ifdef asinhl -#undef asinhl -#endif -static longdouble asinhl(longdouble x) -{ - return (longdouble)asinh((double)(x)); -} - -#ifdef atanhl -#undef atanhl -#endif -static longdouble atanhl(longdouble x) -{ - return (longdouble)atanh((double)(x)); -} - -#endif -#endif - - -#ifdef HAVE_HYPOT -#if !defined(NeXT) && !defined(_MSC_VER) -extern double hypot(double, double); -#endif -#else -static double hypot(double x, double y) -{ - double yx; - - x = fabs(x); - y = fabs(y); - if (x < y) { - double temp = x; - x = y; - y = temp; - } - if (x == 0.) - return 0.; - else { - yx = y/x; - return x*sqrt(1.+yx*yx); - } -} -#endif - - -#ifndef HAVE_RINT -static double -rint (double x) -{ - double y, r; - - y = floor(x); - r = x - y; - - if (r > 0.5) goto rndup; - - /* Round to nearest even */ - if (r==0.5) { - r = y - 2.0*floor(0.5*y); - if (r==1.0) { - rndup: - y+=1.0; - } - } - return y; -} -#endif - - - - - -/* Define isnan, isinf, isfinite, signbit if needed */ -/* Use fpclassify if possible */ -/* isnan, isinf -- - these will use macros and then fpclassify if available before - defaulting to a dumb convert-to-double version... - - isfinite -- define a macro if not already available - signbit -- if macro available use it, otherwise define a function - and a dumb convert-to-double version for other types. -*/ - -#if defined(fpclassify) - -#if !defined(isnan) -#define isnan(x) (fpclassify(x) == FP_NAN) -#endif -#if !defined(isinf) -#define isinf(x) (fpclassify(x) == FP_INFINITE) -#endif - -#else /* check to see if already have a function like this */ - -#if !defined(HAVE_ISNAN) - -#if !defined(isnan) -#include "_isnan.c" -#endif -#endif /* HAVE_ISNAN */ - -#if !defined(HAVE_ISINF) -#if !defined(isinf) -#define isinf(x) (!isnan((x)) && isnan((x)-(x))) -#endif -#endif /* HAVE_ISINF */ - -#endif /* defined(fpclassify) */ - - -/* Define signbit if needed */ -#if !defined(signbit) -#include "_signbit.c" -#endif - -/* Now defined the extended type macros */ - -#if !defined(isnan) - -#if !defined(HAVE_LONGDOUBLE_FUNCS) || !defined(HAVE_ISNAN) -#define isnanl(x) isnan((double)(x)) -#endif - -#if !defined(HAVE_FLOAT_FUNCS) || !defined(HAVE_ISNAN) -#define isnanf(x) isnan((double)(x)) -#endif - -#else /* !defined(isnan) */ - -#define isnanl(x) isnan((x)) -#define isnanf(x) isnan((x)) - -#endif /* !defined(isnan) */ - - -#if !defined(isinf) - -#if !defined(HAVE_LONGDOUBLE_FUNCS) || !defined(HAVE_ISINF) -#define isinfl(x) (!isnanl((x)) && isnanl((x)-(x))) -#endif - -#if !defined(HAVE_FLOAT_FUNCS) || !defined(HAVE_ISINF) -#define isinff(x) (!isnanf((x)) && isnanf((x)-(x))) -#endif - -#else /* !defined(isinf) */ - -#define isinfl(x) isinf((x)) -#define isinff(x) isinf((x)) - -#endif /* !defined(isinf) */ - - -#if !defined(signbit) -#define signbitl(x) ((longdouble) signbit((double)(x))) -#define signbitf(x) ((float) signbit((double) (x))) -#else -#define signbitl(x) signbit((x)) -#define signbitf(x) signbit((x)) -#endif - -#if !defined(isfinite) -#define isfinite(x) (!(isinf((x)) || isnan((x)))) -#endif -#define isfinitef(x) (!(isinff((x)) || isnanf((x)))) -#define isfinitel(x) (!(isinfl((x)) || isnanl((x)))) - -float degreesf(float x) { - return x * (float)(180.0/M_PI); -} -double degrees(double x) { - return x * (180.0/M_PI); -} -longdouble degreesl(longdouble x) { - return x * (180.0L/M_PI); -} - -float radiansf(float x) { - return x * (float)(M_PI/180.0); -} -double radians(double x) { - return x * (M_PI/180.0); -} -longdouble radiansl(longdouble x) { - return x * (M_PI/180.0L); -} - -/* First, the C functions that do the real work */ - -/* if C99 extensions not available then define dummy functions that use the - double versions for - - sin, cos, tan - sinh, cosh, tanh, - fabs, floor, ceil, fmod, sqrt, log10, log, exp, fabs - asin, acos, atan, - asinh, acosh, atanh - - hypot, atan2, pow -*/ - -/**begin repeat - - #kind=(sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,sqrt,log10,log,exp,asin,acos,atan,rint)*2# - #typ=longdouble*17, float*17# - #c=l*17,f*17# - #TYPE=LONGDOUBLE*17, FLOAT*17# -*/ -#ifndef HAVE_@TYPE@_FUNCS -#ifdef @kind@@c@ -#undef @kind@@c@ -#endif -@typ@ @kind@@c@(@typ@ x) { - return (@typ@) @kind@((double)x); -} -#endif -/**end repeat**/ - -/**begin repeat - - #kind=(atan2,hypot,pow,fmod)*2# - #typ=longdouble*4, float*4# - #c=l*4,f*4# - #TYPE=LONGDOUBLE*4,FLOAT*4# -*/ -#ifndef HAVE_@TYPE@_FUNCS -#ifdef @kind@@c@ -#undef @kind@@c@ -#endif -@typ@ @kind@@c@(@typ@ x, @typ@ y) { - return (@typ@) @kind@((double)x, (double) y); -} -#endif -/**end repeat**/ - -/**begin repeat - #kind=modf*2# - #typ=longdouble, float# - #c=l,f# - #TYPE=LONGDOUBLE, FLOAT# -*/ -#ifndef HAVE_@TYPE@_FUNCS -#ifdef modf@c@ -#undef modf@c@ -#endif -@typ@ modf@c@(@typ@ x, @typ@ *iptr) { - double nx, niptr, y; - nx = (double) x; - y = modf(nx, &niptr); - *iptr = (@typ@) niptr; - return (@typ@) y; -} -#endif -/**end repeat**/ - - - -#ifndef HAVE_LOG1P -double log1p(double x) -{ - double u = 1. + x; - if (u == 1.0) { - return x; - } else { - return log(u) * x / (u-1.); - } -} -#endif - -#if !defined(HAVE_LOG1P) || !defined(HAVE_LONGDOUBLE_FUNCS) -#ifdef log1pl -#undef log1pl -#endif -longdouble log1pl(longdouble x) -{ - longdouble u = 1. + x; - if (u == 1.0) { - return x; - } else { - return logl(u) * x / (u-1.); - } -} -#endif - -#if !defined(HAVE_LOG1P) || !defined(HAVE_FLOAT_FUNCS) -#ifdef log1pf -#undef log1pf -#endif -float log1pf(float x) -{ - float u = 1 + x; - if (u == 1) { - return x; - } else { - return logf(u) * x / (u-1); - } -} -#endif - -#ifndef HAVE_EXPM1 -static double expm1(double x) -{ - double u = exp(x); - if (u == 1.0) { - return x; - } else if (u-1.0 == -1.0) { - return -1; - } else { - return (u-1.0) * x/log(u); - } -} -#endif - -#if !defined(HAVE_EXPM1) || !defined(HAVE_LONGDOUBLE_FUNCS) -#ifdef expml1 -#undef expml1 -#endif -static longdouble expm1l(longdouble x) -{ - longdouble u = expl(x); - if (u == 1.0) { - return x; - } else if (u-1.0 == -1.0) { - return -1; - } else { - return (u-1.0) * x/logl(u); - } -} -#endif - -#if !defined(HAVE_EXPM1) || !defined(HAVE_FLOAT_FUNCS) -#ifdef expm1f -#undef expm1f -#endif -static float expm1f(float x) -{ - float u = expf(x); - if (u == 1) { - return x; - } else if (u-1 == -1) { - return -1; - } else { - return (u-1) * x/logf(u); - } -} -#endif - - - -/* Don't pass structures between functions (only pointers) because how - structures are passed is compiler dependent and could cause - segfaults if ufuncobject.c is compiled with a different compiler - than an extension that makes use of the UFUNC API -*/ - -/**begin repeat - - #typ=float, double, longdouble# - #c=f,,l# -*/ - -/* constants */ -static c@typ@ nc_1@c@ = {1., 0.}; -static c@typ@ nc_half@c@ = {0.5, 0.}; -static c@typ@ nc_i@c@ = {0., 1.}; -static c@typ@ nc_i2@c@ = {0., 0.5}; -/* - static c@typ@ nc_mi@c@ = {0., -1.}; - static c@typ@ nc_pi2@c@ = {M_PI/2., 0.}; -*/ - -static void -nc_sum@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r) -{ - r->real = a->real + b->real; - r->imag = a->imag + b->imag; - return; -} - -static void -nc_diff@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r) -{ - r->real = a->real - b->real; - r->imag = a->imag - b->imag; - return; -} - -static void -nc_neg@c@(c@typ@ *a, c@typ@ *r) -{ - r->real = -a->real; - r->imag = -a->imag; - return; -} - -static void -nc_prod@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r) -{ - register @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; - r->real = ar*br - ai*bi; - r->imag = ar*bi + ai*br; - return; -} - -static void -nc_quot@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r) -{ - - register @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; - register @typ@ d = br*br + bi*bi; - r->real = (ar*br + ai*bi)/d; - r->imag = (ai*br - ar*bi)/d; - return; -} - -static void -nc_sqrt@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ s,d; - if (x->real == 0. && x->imag == 0.) - *r = *x; - else { - s = sqrt@c@((fabs@c@(x->real) + hypot@c@(x->real,x->imag))/2); - d = x->imag/(2*s); - if (x->real > 0) { - r->real = s; - r->imag = d; - } - else if (x->imag >= 0) { - r->real = d; - r->imag = s; - } - else { - r->real = -d; - r->imag = -s; - } - } - return; -} - -static void -nc_rint@c@(c@typ@ *x, c@typ@ *r) -{ - r->real = rint@c@(x->real); - r->imag = rint@c@(x->imag); -} - -static void -nc_log@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ l = hypot@c@(x->real,x->imag); - r->imag = atan2@c@(x->imag, x->real); - r->real = log@c@(l); - return; -} - -static void -nc_log1p@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ l = hypot@c@(x->real + 1,x->imag); - r->imag = atan2@c@(x->imag, x->real + 1); - r->real = log@c@(l); - return; -} - -static void -nc_exp@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ a = exp@c@(x->real); - r->real = a*cos@c@(x->imag); - r->imag = a*sin@c@(x->imag); - return; -} - -static void -nc_expm1@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ a = exp@c@(x->real); - r->real = a*cos@c@(x->imag) - 1; - r->imag = a*sin@c@(x->imag); - return; -} - -static void -nc_pow@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r) -{ - intp n; - @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; - - if (br == 0. && bi == 0.) { - r->real = 1.; - r->imag = 0.; - return; - } - if (ar == 0. && ai == 0.) { - r->real = 0.; - r->imag = 0.; - return; - } - if (bi == 0 && (n=(intp)br) == br) { - if (n > -100 && n < 100) { - c@typ@ p, aa; - intp mask = 1; - if (n < 0) n = -n; - aa = nc_1@c@; - p.real = ar; p.imag = ai; - while (1) { - if (n & mask) - nc_prod@c@(&aa,&p,&aa); - mask <<= 1; - if (n < mask || mask <= 0) break; - nc_prod@c@(&p,&p,&p); - } - r->real = aa.real; r->imag = aa.imag; - if (br < 0) nc_quot@c@(&nc_1@c@, r, r); - return; - } - } - /* complexobect.c uses an inline version of this formula - investigate whether this had better performance or accuracy */ - nc_log@c@(a, r); - nc_prod@c@(r, b, r); - nc_exp@c@(r, r); - return; -} - - -static void -nc_prodi@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ xr = x->real; - r->real = -x->imag; - r->imag = xr; - return; -} - - -static void -nc_acos@c@(c@typ@ *x, c@typ@ *r) -{ - nc_prod@c@(x,x,r); - nc_diff@c@(&nc_1@c@, r, r); - nc_sqrt@c@(r, r); - nc_prodi@c@(r, r); - nc_sum@c@(x, r, r); - nc_log@c@(r, r); - nc_prodi@c@(r, r); - nc_neg@c@(r, r); - return; - /* return nc_neg(nc_prodi(nc_log(nc_sum(x,nc_prod(nc_i, - nc_sqrt(nc_diff(nc_1,nc_prod(x,x)))))))); - */ -} - -static void -nc_acosh@c@(c@typ@ *x, c@typ@ *r) -{ - nc_prod@c@(x, x, r); - nc_diff@c@(&nc_1@c@, r, r); - nc_sqrt@c@(r, r); - nc_prodi@c@(r, r); - nc_sum@c@(x, r, r); - nc_log@c@(r, r); - return; - /* - return nc_log(nc_sum(x,nc_prod(nc_i, - nc_sqrt(nc_diff(nc_1,nc_prod(x,x)))))); - */ -} - -static void -nc_asin@c@(c@typ@ *x, c@typ@ *r) -{ - c@typ@ a, *pa=&a; - nc_prod@c@(x, x, r); - nc_diff@c@(&nc_1@c@, r, r); - nc_sqrt@c@(r, r); - nc_prodi@c@(x, pa); - nc_sum@c@(pa, r, r); - nc_log@c@(r, r); - nc_prodi@c@(r, r); - nc_neg@c@(r, r); - return; - /* - return nc_neg(nc_prodi(nc_log(nc_sum(nc_prod(nc_i,x), - nc_sqrt(nc_diff(nc_1,nc_prod(x,x))))))); - */ -} - - -static void -nc_asinh@c@(c@typ@ *x, c@typ@ *r) -{ - nc_prod@c@(x, x, r); - nc_sum@c@(&nc_1@c@, r, r); - nc_sqrt@c@(r, r); - nc_diff@c@(r, x, r); - nc_log@c@(r, r); - nc_neg@c@(r, r); - return; - /* - return nc_neg(nc_log(nc_diff(nc_sqrt(nc_sum(nc_1,nc_prod(x,x))),x))); - */ -} - -static void -nc_atan@c@(c@typ@ *x, c@typ@ *r) -{ - c@typ@ a, *pa=&a; - nc_diff@c@(&nc_i@c@, x, pa); - nc_sum@c@(&nc_i@c@, x, r); - nc_quot@c@(r, pa, r); - nc_log@c@(r,r); - nc_prod@c@(&nc_i2@c@, r, r); - return; - /* - return nc_prod(nc_i2,nc_log(nc_quot(nc_sum(nc_i,x),nc_diff(nc_i,x)))); - */ -} - -static void -nc_atanh@c@(c@typ@ *x, c@typ@ *r) -{ - c@typ@ a, *pa=&a; - nc_diff@c@(&nc_1@c@, x, r); - nc_sum@c@(&nc_1@c@, x, pa); - nc_quot@c@(pa, r, r); - nc_log@c@(r, r); - nc_prod@c@(&nc_half@c@, r, r); - return; - /* - return nc_prod(nc_half,nc_log(nc_quot(nc_sum(nc_1,x),nc_diff(nc_1,x)))); - */ -} - -static void -nc_cos@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = cos@c@(xr)*cosh@c@(xi); - r->imag = -sin@c@(xr)*sinh@c@(xi); - return; -} - -static void -nc_cosh@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = cos@c@(xi)*cosh@c@(xr); - r->imag = sin@c@(xi)*sinh@c@(xr); - return; -} - - -#define M_LOG10_E 0.434294481903251827651128918916605082294397 - -static void -nc_log10@c@(c@typ@ *x, c@typ@ *r) -{ - nc_log@c@(x, r); - r->real *= (@typ@) M_LOG10_E; - r->imag *= (@typ@) M_LOG10_E; - return; -} - -static void -nc_sin@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = sin@c@(xr)*cosh@c@(xi); - r->imag = cos@c@(xr)*sinh@c@(xi); - return; -} - -static void -nc_sinh@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = cos@c@(xi)*sinh@c@(xr); - r->imag = sin@c@(xi)*cosh@c@(xr); - return; -} - -static void -nc_tan@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ sr,cr,shi,chi; - @typ@ rs,is,rc,ic; - @typ@ d; - @typ@ xr=x->real, xi=x->imag; - sr = sin@c@(xr); - cr = cos@c@(xr); - shi = sinh@c@(xi); - chi = cosh@c@(xi); - rs = sr*chi; - is = cr*shi; - rc = cr*chi; - ic = -sr*shi; - d = rc*rc + ic*ic; - r->real = (rs*rc+is*ic)/d; - r->imag = (is*rc-rs*ic)/d; - return; -} - -static void -nc_tanh@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ si,ci,shr,chr; - @typ@ rs,is,rc,ic; - @typ@ d; - @typ@ xr=x->real, xi=x->imag; - si = sin@c@(xi); - ci = cos@c@(xi); - shr = sinh@c@(xr); - chr = cosh@c@(xr); - rs = ci*shr; - is = si*chr; - rc = ci*chr; - ic = si*shr; - d = rc*rc + ic*ic; - r->real = (rs*rc+is*ic)/d; - r->imag = (is*rc-rs*ic)/d; - return; -} - -/**end repeat**/ - - -/**begin repeat - - #TYPE=(BOOL, BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*2# - #OP=||, +*13, ^, -*13# - #kind=add*14, subtract*14# - #typ=(Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*2# -*/ - -static void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - for(i=0; ireal, \ - ai=((c@typ@ *)i1)->imag, \ - br=((c@typ@ *)i2)->real, \ - bi=((c@typ@ *)i2)->imag; - ((c@typ@ *)op)->real = ar*br - ai*bi; - ((c@typ@ *)op)->imag = ar*bi + ai*br; - } -} - -static void -@TYP@_divide(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0], is2=steps[1], os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - for (i=0; ireal, \ - ai=((c@typ@ *)i1)->imag, \ - br=((c@typ@ *)i2)->real, \ - bi=((c@typ@ *)i2)->imag; - register @typ@ d = br*br + bi*bi; - ((c@typ@ *)op)->real = (ar*br + ai*bi)/d; - ((c@typ@ *)op)->imag = (ai*br - ar*bi)/d; - } -} - -static void -@TYP@_floor_divide(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - for(i=0; ireal, \ - ai=((c@typ@ *)i1)->imag, \ - br=((c@typ@ *)i2)->real, \ - bi=((c@typ@ *)i2)->imag; - register @typ@ d = br*br + bi*bi; - ((c@typ@ *)op)->real = floor@c@((ar*br + ai*bi)/d); - ((c@typ@ *)op)->imag = 0; - } -} - -#define @TYP@_true_divide @TYP@_divide -/**end repeat**/ - - -/**begin repeat - #TYP=UBYTE,USHORT,UINT,ULONG,ULONGLONG# - #typ=ubyte, ushort, uint, ulong, ulonglong# -*/ -static void -@TYP@_divide(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i, is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - for(i=0; i 0) != (y > 0)) && (x % y != 0)) tmp--; - *((@typ@ *)op)= tmp; - } - } -} -/**end repeat**/ - - -/**begin repeat - #TYP=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# - #typ=char, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong# - #otyp=float*4, double*6# -*/ -static void -@TYP@_true_divide(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i, is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - for(i=0; ireal; - xi = x->imag; - y->real = xr*xr - xi*xi; - y->imag = 2*xr*xi; - } -} -/**end repeat**/ - -static PyObject * -Py_square(PyObject *o) -{ - return PyNumber_Multiply(o, o); -} - - -/**begin repeat - #TYP=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# - #typ=char, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# -*/ -static void -@TYP@_reciprocal(char **args, intp *dimensions, intp *steps, void *data) -{ - intp i, is1 = steps[0], os = steps[1], n = dimensions[0]; - char *i1 = args[0], *op = args[1]; - - for (i = 0; i < n; i++, i1 += is1, op += os) { - @typ@ x = *((@typ@ *)i1); - *((@typ@ *)op) = (@typ@) (1.0 / x); - } -} -/**end repeat**/ - -/**begin repeat - #TYP=CFLOAT,CDOUBLE,CLONGDOUBLE# - #typ=float, double, longdouble# -*/ -static void -@TYP@_reciprocal(char **args, intp *dimensions, intp *steps, void *data) -{ - intp i, is1 = steps[0], os = steps[1], n = dimensions[0]; - char *i1 = args[0], *op = args[1]; - c@typ@ *x, *y; - @typ@ xr, xi, r, denom; - - for (i = 0; i < n; i++, i1 += is1, op += os) { - x = (c@typ@ *)i1; - y = (c@typ@ *)op; - xr = x->real; - xi = x->imag; - if (fabs(xi) <= fabs(xr)) { - r = xi / xr; - denom = xr + xi * r; - y->real = 1 / denom; - y->imag = -r / denom; - } else { - r = xr / xi; - denom = xr * r + xi; - y->real = r / denom; - y->imag = -1 / denom; - } - } -} -/**end repeat**/ - - -static PyObject * -Py_reciprocal(PyObject *o) -{ - PyObject *one, *result; - one = PyInt_FromLong(1); - if (!one) return NULL; - result = PyNumber_Divide(one, o); - Py_DECREF(one); - return result; -} - -static PyObject * -_npy_ObjectMax(PyObject *i1, PyObject *i2) -{ - int cmp; - PyObject *res; - if (PyObject_Cmp(i1, i2, &cmp) < 0) return NULL; - - if (cmp >= 0) { - res = i1; - } - else { - res = i2; - } - Py_INCREF(res); - return res; -} - -static PyObject * -_npy_ObjectMin(PyObject *i1, PyObject *i2) -{ - int cmp; - PyObject *res; - if (PyObject_Cmp(i1, i2, &cmp) < 0) return NULL; - - if (cmp <= 0) { - res = i1; - } - else { - res = i2; - } - Py_INCREF(res); - return res; -} - -/* ones_like is defined here because it's used for x**0 */ - -/**begin repeat - #TYP=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# - #typ=char, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# -*/ -static void -@TYP@_ones_like(char **args, intp *dimensions, intp *steps, void *data) -{ - intp i, os = steps[1], n = dimensions[0]; - char *op = args[1]; - - for (i = 0; i < n; i++, op += os) { - *((@typ@ *)op) = 1; - } -} -/**end repeat**/ - -/**begin repeat - #TYP=CFLOAT,CDOUBLE,CLONGDOUBLE# - #typ=float, double, longdouble# -*/ -static void -@TYP@_ones_like(char **args, intp *dimensions, intp *steps, void *data) -{ - intp i, is1 = steps[0], os = steps[1], n = dimensions[0]; - char *i1 = args[0], *op = args[1]; - c@typ@ *y; - - for (i = 0; i < n; i++, i1 += is1, op += os) { - y = (c@typ@ *)op; - y->real = 1.0; - y->imag = 0.0; - } -} -/**end repeat**/ - -static PyObject * -Py_get_one(PyObject *o) -{ - return PyInt_FromLong(1); -} - - -/**begin repeat - #TYP=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# - #typ=char, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong# - #btyp=float*4, double*6# -*/ -static void -@TYP@_power(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i, is1=steps[0],is2=steps[1]; - register intp os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - @btyp@ x, y; - - for(i=0; i, >=, <, <=, ==, !=, &&, ||, &, |, ^# -**/ -static void -BOOL_@kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - Bool in1, in2; - for(i=0; i*13, >=*13, <*13, <=*13# - #typ=(byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*4# - #kind= greater*13, greater_equal*13, less*13, less_equal*13# -*/ - -static void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - for(i=0; i*3, >=*3, <*3, <=*3# - #typ=(cfloat, cdouble, clongdouble)*4# - #kind= greater*3, greater_equal*3, less*3, less_equal*3# -*/ - -static void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - for(i=0; ireal == ((@typ@ *)i2)->real) - *((Bool *)op)=((@typ@ *)i1)->imag @OP@ \ - ((@typ@ *)i2)->imag; - else - *((Bool *)op)=((@typ@ *)i1)->real @OP@ \ - ((@typ@ *)i2)->real; - } -} -/**end repeat**/ - - -/**begin repeat - #TYPE=(BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*4# - #typ=(byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*4# - #OP= ==*13, !=*13, &&*13, ||*13# - #kind=equal*13, not_equal*13, logical_and*13, logical_or*13# -*/ -static void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - for(i=0; i 0 ? 1 : ((x) < 0 ? -1 : 0)) -#define _SIGN2(x) ((x) == 0 ? 0 : 1) -#define _SIGNC(x) (((x).real > 0) ? 1 : ((x).real < 0 ? -1 : ((x).imag > 0 ? 1 : ((x).imag < 0) ? -1 : 0))) -/**begin repeat - #TYPE=BYTE,SHORT,INT,LONG,LONGLONG,FLOAT,DOUBLE,LONGDOUBLE,UBYTE,USHORT,UINT,ULONG,ULONGLONG# - #typ=byte,short,int,long,longlong,float,double,longdouble,ubyte,ushort,uint,ulong,ulonglong# - #func=_SIGN1*8,_SIGN2*5# -*/ -static void -@TYPE@_sign(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],os=steps[1], n=dimensions[0]; - char *i1=args[0], *op=args[1]; - @typ@ t1; - for(i=0; ireal || \ - ((@typ@ *)i1)->imag); - } -} -/**end repeat**/ - - - - -/**begin repeat - #TYPE=BYTE,SHORT,INT,LONG,LONGLONG# - #typ=byte, short, int, long, longlong# - #c=f*2,,,l*1# -*/ -static void -@TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - register @typ@ ix,iy, tmp; - for(i=0; i 0) == (iy > 0)) { - *((@typ@ *)op) = ix % iy; - } - else { /* handle mixed case the way Python does */ - tmp = ix % iy; - if (tmp) tmp += iy; - *((@typ@ *)op)= tmp; - } - } -} -/**end repeat**/ - -/**begin repeat - #TYPE=UBYTE,USHORT,UINT,ULONG,ULONGLONG# - #typ=ubyte, ushort, uint, ulong, ulonglong# -*/ -static void -@TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - register @typ@ ix,iy; - for(i=0; i>*10# - #kind=bitwise_and*10, bitwise_or*10, bitwise_xor*10, left_shift*10, right_shift*10# - -*/ -static void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0]; - register char *i1=args[0], *i2=args[1], *op=args[2]; - for(i=0; ireal || ((@typ@ *)i1)->imag; - p2 = ((@typ@ *)i2)->real || ((@typ@ *)i2)->imag; - *((Bool *)op)= (p1 || p2) && !(p1 && p2); - } -} -/**end repeat**/ - - - -/**begin repeat - - #TYPE=(BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*2# - #OP= >*14, <*14# - #typ=(Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*2# - #kind= maximum*14, minimum*14# -*/ -static void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - for(i=0; i*3, <*3# - #typ=(cfloat, cdouble, clongdouble)*2# - #kind= maximum*3, minimum*3# -*/ -static void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - @typ@ *i1c, *i2c; - for(i=0; ireal @OP@ i2c->real) || \ - ((i1c->real==i2c->real) && (i1c->imag @OP@ i2c->imag))) - memmove(op, i1, sizeof(@typ@)); - else - memmove(op, i2, sizeof(@typ@)); - } -} -/**end repeat**/ - - - -/*** isinf, isinf, isfinite, signbit ***/ -/**begin repeat - #kind=isnan*3, isinf*3, isfinite*3, signbit*3# - #TYPE=(FLOAT, DOUBLE, LONGDOUBLE)*4# - #typ=(float, double, longdouble)*4# - #c=(f,,l)*4# -*/ -static void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - register intp i; - intp is=steps[0], os=steps[1], n=dimensions[0]; - char *ip=args[0], *op=args[1]; - for(i=0; i= mA) - assert all(mB >= mB) - assert not any(mB > mB) - - assert all(mA == mA) - assert not any(mA == mB) - assert all(mB != mA) - - assert not all(abs(mA) > 0) - assert all(abs(mB > 0)) - - def check_asmatrix(self): - A = arange(100).reshape(10,10) - mA = asmatrix(A) - A[0,0] = -10 - assert A[0,0] == mA[0,0] - - def check_noaxis(self): - A = matrix([[1,0],[0,1]]) - assert A.sum() == matrix(2) - assert A.mean() == matrix(0.5) - -class TestCasting(NumpyTestCase): - def check_basic(self): - A = arange(100).reshape(10,10) - mA = matrix(A) - - mB = mA.copy() - O = ones((10,10), float64) * 0.1 - mB = mB + O - assert mB.dtype.type == float64 - assert all(mA != mB) - assert all(mB == mA+0.1) - - mC = mA.copy() - O = ones((10,10), complex128) - mC = mC * O - assert mC.dtype.type == complex128 - assert all(mA != mB) - -class TestAlgebra(NumpyTestCase): - def check_basic(self): - import numpy.linalg as linalg - - A = array([[1., 2.], - [3., 4.]]) - mA = matrix(A) - - B = identity(2) - for i in xrange(6): - assert allclose((mA ** i).A, B) - B = dot(B, A) - - Ainv = linalg.inv(A) - B = identity(2) - for i in xrange(6): - assert allclose((mA ** -i).A, B) - B = dot(B, Ainv) - - assert allclose((mA * mA).A, dot(A, A)) - assert allclose((mA + mA).A, (A + A)) - assert allclose((3*mA).A, (3*A)) - -class TestMatrixReturn(NumpyTestCase): - def check_instance_methods(self): - a = matrix([1.0], dtype='f8') - methodargs = { - 'astype' : ('intc',), - 'clip' : (0.0, 1.0), - 'compress' : ([1],), - 'repeat' : (1,), - 'reshape' : (1,), - 'swapaxes' : (0,0) - } - excluded_methods = [ - 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', - 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', - 'searchsorted', 'setflags', 'setfield', 'sort', 'take', - 'tofile', 'tolist', 'tostring', 'all', 'any', 'sum', - 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', - 'prod', 'std', 'ctypes', 'itemset' - ] - for attrib in dir(a): - if attrib.startswith('_') or attrib in excluded_methods: - continue - f = eval('a.%s' % attrib) - if callable(f): - # reset contents of a - a.astype('f8') - a.fill(1.0) - if attrib in methodargs: - args = methodargs[attrib] - else: - args = () - b = f(*args) - assert type(b) is matrix, "%s" % attrib - assert type(a.real) is matrix - assert type(a.imag) is matrix - c,d = matrix([0.0]).nonzero() - assert type(c) is matrix - assert type(d) is matrix - -class TestIndexing(NumpyTestCase): - def check_basic(self): - x = asmatrix(zeros((3,2),float)) - y = zeros((3,1),float) - y[:,0] = [0.8,0.2,0.3] - x[:,1] = y>0.5 - assert_equal(x, [[0,1],[0,0],[0,0]]) - - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/core/tests/test_errstate.py b/numpy/core/tests/test_errstate.py deleted file mode 100644 index 1e1082559..000000000 --- a/numpy/core/tests/test_errstate.py +++ /dev/null @@ -1,62 +0,0 @@ -# The following exec statement (or something like it) is needed to -# prevent SyntaxError on Python < 2.5. Even though this is a test, -# SyntaxErrors are not acceptable; on Debian systems, they block -# byte-compilation during install and thus cause the package to fail -# to install. - -import sys -if sys.version_info[:2] >= (2, 5): - exec """ -from __future__ import with_statement -from numpy.core import * -from numpy.random import rand, randint -from numpy.testing import * - - - -class TestErrstate(NumpyTestCase): - - - def test_invalid(self): - with errstate(all='raise', under='ignore'): - a = -arange(3) - # This should work - with errstate(invalid='ignore'): - sqrt(a) - # While this should fail! - try: - sqrt(a) - except FloatingPointError: - pass - else: - self.fail() - - def test_divide(self): - with errstate(all='raise', under='ignore'): - a = -arange(3) - # This should work - with errstate(divide='ignore'): - a / 0 - # While this should fail! - try: - a / 0 - except FloatingPointError: - pass - else: - self.fail() - - def test_errcall(self): - def foo(*args): - print args - olderrcall = geterrcall() - with errstate(call=foo): - assert(geterrcall() is foo), 'call is not foo' - with errstate(call=None): - assert(geterrcall() is None), 'call is not None' - assert(geterrcall() is olderrcall), 'call is not olderrcall' - -""" - -if __name__ == '__main__': - from numpy.testing import * - NumpyTest().run() diff --git a/numpy/core/tests/test_ma.py b/numpy/core/tests/test_ma.py deleted file mode 100644 index ed7cb2a79..000000000 --- a/numpy/core/tests/test_ma.py +++ /dev/null @@ -1,873 +0,0 @@ -import numpy -import types, time -from numpy.core.ma import * -from numpy.core.numerictypes import float32 -from numpy.testing import NumpyTestCase, NumpyTest -pi = numpy.pi -def eq(v,w, msg=''): - result = allclose(v,w) - if not result: - print """Not eq:%s -%s ----- -%s"""% (msg, str(v), str(w)) - return result - -class TestMa(NumpyTestCase): - def __init__(self, *args, **kwds): - NumpyTestCase.__init__(self, *args, **kwds) - self.setUp() - - def setUp (self): - x=numpy.array([1.,1.,1.,-2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y=numpy.array([5.,0.,3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 ,0, 1] - xm = array(x, mask=m1) - ym = array(y, mask=m2) - z = numpy.array([-.5, 0., .5, .8]) - zm = array(z, mask=[0,1,0,0]) - xf = numpy.where(m1, 1.e+20, x) - s = x.shape - xm.set_fill_value(1.e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) - - def check_testBasic1d(self): - "Test of basic array creation and properties in 1 dimension." - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - self.failIf(isMaskedArray(x)) - self.failUnless(isMaskedArray(xm)) - self.assertEqual(shape(xm), s) - self.assertEqual(xm.shape, s) - self.assertEqual(xm.dtype, x.dtype) - self.assertEqual( xm.size , reduce(lambda x,y:x*y, s)) - self.assertEqual(count(xm) , len(m1) - reduce(lambda x,y:x+y, m1)) - self.failUnless(eq(xm, xf)) - self.failUnless(eq(filled(xm, 1.e20), xf)) - self.failUnless(eq(x, xm)) - - def check_testBasic2d(self): - "Test of basic array creation and properties in 2 dimensions." - for s in [(4,3), (6,2)]: - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s - - self.failIf(isMaskedArray(x)) - self.failUnless(isMaskedArray(xm)) - self.assertEqual(shape(xm), s) - self.assertEqual(xm.shape, s) - self.assertEqual( xm.size , reduce(lambda x,y:x*y, s)) - self.assertEqual( count(xm) , len(m1) - reduce(lambda x,y:x+y, m1)) - self.failUnless(eq(xm, xf)) - self.failUnless(eq(filled(xm, 1.e20), xf)) - self.failUnless(eq(x, xm)) - self.setUp() - - def check_testArithmetic (self): - "Test of basic arithmetic." - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - a2d = array([[1,2],[0,4]]) - a2dm = masked_array(a2d, [[0,0],[1,0]]) - self.failUnless(eq (a2d * a2d, a2d * a2dm)) - self.failUnless(eq (a2d + a2d, a2d + a2dm)) - self.failUnless(eq (a2d - a2d, a2d - a2dm)) - for s in [(12,), (4,3), (2,6)]: - x = x.reshape(s) - y = y.reshape(s) - xm = xm.reshape(s) - ym = ym.reshape(s) - xf = xf.reshape(s) - self.failUnless(eq(-x, -xm)) - self.failUnless(eq(x + y, xm + ym)) - self.failUnless(eq(x - y, xm - ym)) - self.failUnless(eq(x * y, xm * ym)) - olderr = numpy.seterr(divide='ignore', invalid='ignore') - self.failUnless(eq(x / y, xm / ym)) - numpy.seterr(**olderr) - self.failUnless(eq(a10 + y, a10 + ym)) - self.failUnless(eq(a10 - y, a10 - ym)) - self.failUnless(eq(a10 * y, a10 * ym)) - olderr = numpy.seterr(divide='ignore', invalid='ignore') - self.failUnless(eq(a10 / y, a10 / ym)) - numpy.seterr(**olderr) - self.failUnless(eq(x + a10, xm + a10)) - self.failUnless(eq(x - a10, xm - a10)) - self.failUnless(eq(x * a10, xm * a10)) - self.failUnless(eq(x / a10, xm / a10)) - self.failUnless(eq(x**2, xm**2)) - self.failUnless(eq(abs(x)**2.5, abs(xm) **2.5)) - self.failUnless(eq(x**y, xm**ym)) - self.failUnless(eq(numpy.add(x,y), add(xm, ym))) - self.failUnless(eq(numpy.subtract(x,y), subtract(xm, ym))) - self.failUnless(eq(numpy.multiply(x,y), multiply(xm, ym))) - olderr = numpy.seterr(divide='ignore', invalid='ignore') - self.failUnless(eq(numpy.divide(x,y), divide(xm, ym))) - numpy.seterr(**olderr) - - - def check_testMixedArithmetic(self): - na = numpy.array([1]) - ma = array([1]) - self.failUnless(isinstance(na + ma, MaskedArray)) - self.failUnless(isinstance(ma + na, MaskedArray)) - - def check_testUfuncs1 (self): - "Test various functions such as sin, cos." - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - self.failUnless (eq(numpy.cos(x), cos(xm))) - self.failUnless (eq(numpy.cosh(x), cosh(xm))) - self.failUnless (eq(numpy.sin(x), sin(xm))) - self.failUnless (eq(numpy.sinh(x), sinh(xm))) - self.failUnless (eq(numpy.tan(x), tan(xm))) - self.failUnless (eq(numpy.tanh(x), tanh(xm))) - olderr = numpy.seterr(divide='ignore', invalid='ignore') - self.failUnless (eq(numpy.sqrt(abs(x)), sqrt(xm))) - self.failUnless (eq(numpy.log(abs(x)), log(xm))) - self.failUnless (eq(numpy.log10(abs(x)), log10(xm))) - numpy.seterr(**olderr) - self.failUnless (eq(numpy.exp(x), exp(xm))) - self.failUnless (eq(numpy.arcsin(z), arcsin(zm))) - self.failUnless (eq(numpy.arccos(z), arccos(zm))) - self.failUnless (eq(numpy.arctan(z), arctan(zm))) - self.failUnless (eq(numpy.arctan2(x, y), arctan2(xm, ym))) - self.failUnless (eq(numpy.absolute(x), absolute(xm))) - self.failUnless (eq(numpy.equal(x,y), equal(xm, ym))) - self.failUnless (eq(numpy.not_equal(x,y), not_equal(xm, ym))) - self.failUnless (eq(numpy.less(x,y), less(xm, ym))) - self.failUnless (eq(numpy.greater(x,y), greater(xm, ym))) - self.failUnless (eq(numpy.less_equal(x,y), less_equal(xm, ym))) - self.failUnless (eq(numpy.greater_equal(x,y), greater_equal(xm, ym))) - self.failUnless (eq(numpy.conjugate(x), conjugate(xm))) - self.failUnless (eq(numpy.concatenate((x,y)), concatenate((xm,ym)))) - self.failUnless (eq(numpy.concatenate((x,y)), concatenate((x,y)))) - self.failUnless (eq(numpy.concatenate((x,y)), concatenate((xm,y)))) - self.failUnless (eq(numpy.concatenate((x,y,x)), concatenate((x,ym,x)))) - - def check_xtestCount (self): - "Test count" - ott = array([0.,1.,2.,3.], mask=[1,0,0,0]) - self.failUnless( isinstance(count(ott), types.IntType)) - self.assertEqual(3, count(ott)) - self.assertEqual(1, count(1)) - self.failUnless (eq(0, array(1,mask=[1]))) - ott=ott.reshape((2,2)) - assert isMaskedArray(count(ott,0)) - assert isinstance(count(ott), types.IntType) - self.failUnless (eq(3, count(ott))) - assert getmask(count(ott,0)) is nomask - self.failUnless (eq([1,2],count(ott,0))) - - def check_testMinMax (self): - "Test minimum and maximum." - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - xr = numpy.ravel(x) #max doesn't work if shaped - xmr = ravel(xm) - self.failUnless (eq(max(xr), maximum(xmr))) #true because of careful selection of data - self.failUnless (eq(min(xr), minimum(xmr))) #true because of careful selection of data - - def check_testAddSumProd (self): - "Test add, sum, product." - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - self.failUnless (eq(numpy.add.reduce(x), add.reduce(x))) - self.failUnless (eq(numpy.add.accumulate(x), add.accumulate(x))) - self.failUnless (eq(4, sum(array(4),axis=0))) - self.failUnless (eq(4, sum(array(4), axis=0))) - self.failUnless (eq(numpy.sum(x,axis=0), sum(x,axis=0))) - self.failUnless (eq(numpy.sum(filled(xm,0),axis=0), sum(xm,axis=0))) - self.failUnless (eq(numpy.sum(x,0), sum(x,0))) - self.failUnless (eq(numpy.product(x,axis=0), product(x,axis=0))) - self.failUnless (eq(numpy.product(x,0), product(x,0))) - self.failUnless (eq(numpy.product(filled(xm,1),axis=0), product(xm,axis=0))) - if len(s) > 1: - self.failUnless (eq(numpy.concatenate((x,y),1), concatenate((xm,ym),1))) - self.failUnless (eq(numpy.add.reduce(x,1), add.reduce(x,1))) - self.failUnless (eq(numpy.sum(x,1), sum(x,1))) - self.failUnless (eq(numpy.product(x,1), product(x,1))) - - - def check_testCI(self): - "Test of conversions and indexing" - x1 = numpy.array([1,2,4,3]) - x2 = array(x1, mask = [1,0,0,0]) - x3 = array(x1, mask = [0,1,0,1]) - x4 = array(x1) - # test conversion to strings - junk, garbage = str(x2), repr(x2) - assert eq(numpy.sort(x1),sort(x2, fill_value=0)) - # tests of indexing - assert type(x2[1]) is type(x1[1]) - assert x1[1] == x2[1] - assert x2[0] is masked - assert eq(x1[2],x2[2]) - assert eq(x1[2:5],x2[2:5]) - assert eq(x1[:],x2[:]) - assert eq(x1[1:], x3[1:]) - x1[2]=9 - x2[2]=9 - assert eq(x1,x2) - x1[1:3] = 99 - x2[1:3] = 99 - assert eq(x1,x2) - x2[1] = masked - assert eq(x1,x2) - x2[1:3]=masked - assert eq(x1,x2) - x2[:] = x1 - x2[1] = masked - assert allequal(getmask(x2),array([0,1,0,0])) - x3[:] = masked_array([1,2,3,4],[0,1,1,0]) - assert allequal(getmask(x3), array([0,1,1,0])) - x4[:] = masked_array([1,2,3,4],[0,1,1,0]) - assert allequal(getmask(x4), array([0,1,1,0])) - assert allequal(x4, array([1,2,3,4])) - x1 = numpy.arange(5)*1.0 - x2 = masked_values(x1, 3.0) - assert eq(x1,x2) - assert allequal(array([0,0,0,1,0],MaskType), x2.mask) - assert eq(3.0, x2.fill_value()) - x1 = array([1,'hello',2,3],object) - x2 = numpy.array([1,'hello',2,3],object) - s1 = x1[1] - s2 = x2[1] - self.assertEqual(type(s2), str) - self.assertEqual(type(s1), str) - self.assertEqual(s1, s2) - assert x1[1:1].shape == (0,) - - def check_testCopySize(self): - "Tests of some subtle points of copying and sizing." - n = [0,0,1,0,0] - m = make_mask(n) - m2 = make_mask(m) - self.failUnless(m is m2) - m3 = make_mask(m, copy=1) - self.failUnless(m is not m3) - - x1 = numpy.arange(5) - y1 = array(x1, mask=m) - self.failUnless( y1.raw_data() is not x1) - self.failUnless( allequal(x1,y1.raw_data())) - self.failUnless( y1.mask is m) - - y1a = array(y1, copy=0) - self.failUnless( y1a.raw_data() is y1.raw_data()) - self.failUnless( y1a.mask is y1.mask) - - y2 = array(x1, mask=m, copy=0) - self.failUnless( y2.raw_data() is x1) - self.failUnless( y2.mask is m) - self.failUnless( y2[2] is masked) - y2[2]=9 - self.failUnless( y2[2] is not masked) - self.failUnless( y2.mask is not m) - self.failUnless( allequal(y2.mask, 0)) - - y3 = array(x1*1.0, mask=m) - self.failUnless(filled(y3).dtype is (x1*1.0).dtype) - - x4 = arange(4) - x4[2] = masked - y4 = resize(x4, (8,)) - self.failUnless( eq(concatenate([x4,x4]), y4)) - self.failUnless( eq(getmask(y4),[0,0,1,0,0,0,1,0])) - y5 = repeat(x4, (2,2,2,2), axis=0) - self.failUnless( eq(y5, [0,0,1,1,2,2,3,3])) - y6 = repeat(x4, 2, axis=0) - self.failUnless( eq(y5, y6)) - - def check_testPut(self): - "Test of put" - d = arange(5) - n = [0,0,0,1,1] - m = make_mask(n) - x = array(d, mask = m) - self.failUnless( x[3] is masked) - self.failUnless( x[4] is masked) - x[[1,4]] = [10,40] - self.failUnless( x.mask is not m) - self.failUnless( x[3] is masked) - self.failUnless( x[4] is not masked) - self.failUnless( eq(x, [0,10,2,-1,40])) - - x = array(d, mask = m) - x.put([-1,100,200]) - self.failUnless( eq(x, [-1,100,200,0,0])) - self.failUnless( x[3] is masked) - self.failUnless( x[4] is masked) - - x = array(d, mask = m) - x.putmask([30,40]) - self.failUnless( eq(x, [0,1,2,30,40])) - self.failUnless( x.mask is nomask) - - x = array(d, mask = m) - y = x.compressed() - z = array(x, mask = m) - z.put(y) - assert eq (x, z) - - def check_testMaPut(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] - i = numpy.nonzero(m)[0] - putmask(xm, m, z) - assert take(xm, i,axis=0) == z - put(ym, i, zm) - assert take(ym, i,axis=0) == zm - - def check_testOddFeatures(self): - "Test of other odd features" - x = arange(20); x=x.reshape(4,5) - x.flat[5] = 12 - assert x[1,0] == 12 - z = x + 10j * x - assert eq(z.real, x) - assert eq(z.imag, 10*x) - assert eq((z*conjugate(z)).real, 101*x*x) - z.imag[...] = 0.0 - - x = arange(10) - x[3] = masked - assert str(x[3]) == str(masked) - c = x >= 8 - assert count(where(c,masked,masked)) == 0 - assert shape(where(c,masked,masked)) == c.shape - z = where(c , x, masked) - assert z.dtype is x.dtype - assert z[3] is masked - assert z[4] is masked - assert z[7] is masked - assert z[8] is not masked - assert z[9] is not masked - assert eq(x,z) - z = where(c , masked, x) - assert z.dtype is x.dtype - assert z[3] is masked - assert z[4] is not masked - assert z[7] is not masked - assert z[8] is masked - assert z[9] is masked - z = masked_where(c, x) - assert z.dtype is x.dtype - assert z[3] is masked - assert z[4] is not masked - assert z[7] is not masked - assert z[8] is masked - assert z[9] is masked - assert eq(x,z) - x = array([1.,2.,3.,4.,5.]) - c = array([1,1,1,0,0]) - x[2] = masked - z = where(c, x, -x) - assert eq(z, [1.,2.,0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert eq(z, [1.,2.,0., -4., -5]) - assert z[0] is masked - assert z[1] is not masked - assert z[2] is masked - assert eq(masked_where(greater(x, 2), x), masked_greater(x,2)) - assert eq(masked_where(greater_equal(x, 2), x), masked_greater_equal(x,2)) - assert eq(masked_where(less(x, 2), x), masked_less(x,2)) - assert eq(masked_where(less_equal(x, 2), x), masked_less_equal(x,2)) - assert eq(masked_where(not_equal(x, 2), x), masked_not_equal(x,2)) - assert eq(masked_where(equal(x, 2), x), masked_equal(x,2)) - assert eq(masked_where(not_equal(x,2), x), masked_not_equal(x,2)) - assert eq(masked_inside(range(5), 1, 3), [0, 199, 199, 199, 4]) - assert eq(masked_outside(range(5), 1, 3),[199,1,2,3,199]) - assert eq(masked_inside(array(range(5), mask=[1,0,0,0,0]), 1, 3).mask, [1,1,1,1,0]) - assert eq(masked_outside(array(range(5), mask=[0,1,0,0,0]), 1, 3).mask, [1,1,0,0,1]) - assert eq(masked_equal(array(range(5), mask=[1,0,0,0,0]), 2).mask, [1,0,1,0,0]) - assert eq(masked_not_equal(array([2,2,1,2,1], mask=[1,0,0,0,0]), 2).mask, [1,0,1,0,1]) - assert eq(masked_where([1,1,0,0,0], [1,2,3,4,5]), [99,99,3,4,5]) - atest = ones((10,10,10), dtype=float32) - btest = zeros(atest.shape, MaskType) - ctest = masked_where(btest,atest) - assert eq(atest,ctest) - z = choose(c, (-x, x)) - assert eq(z, [1.,2.,0., -4., -5]) - assert z[0] is masked - assert z[1] is not masked - assert z[2] is masked - x = arange(6) - x[5] = masked - y = arange(6)*10 - y[2]= masked - c = array([1,1,1,0,0,0], mask=[1,0,0,0,0,0]) - cm = c.filled(1) - z = where(c,x,y) - zm = where(cm,x,y) - assert eq(z, zm) - assert getmask(zm) is nomask - assert eq(zm, [0,1,2,30,40,50]) - z = where(c, masked, 1) - assert eq(z, [99,99,99,1,1,1]) - z = where(c, 1, masked) - assert eq(z, [99, 1, 1, 99, 99, 99]) - - def check_testMinMax(self): - "Test of minumum, maximum." - assert eq(minimum([1,2,3],[4,0,9]), [1,0,3]) - assert eq(maximum([1,2,3],[4,0,9]), [4,2,9]) - x = arange(5) - y = arange(5) - 2 - x[3] = masked - y[0] = masked - assert eq(minimum(x,y), where(less(x,y), x, y)) - assert eq(maximum(x,y), where(greater(x,y), x, y)) - assert minimum(x) == 0 - assert maximum(x) == 4 - - def check_testTakeTransposeInnerOuter(self): - "Test of take, transpose, inner, outer products" - x = arange(24) - y = numpy.arange(24) - x[5:6] = masked - x=x.reshape(2,3,4) - y=y.reshape(2,3,4) - assert eq(numpy.transpose(y,(2,0,1)), transpose(x,(2,0,1))) - assert eq(numpy.take(y, (2,0,1), 1), take(x, (2,0,1), 1)) - assert eq(numpy.inner(filled(x,0),filled(y,0)), - inner(x, y)) - assert eq(numpy.outer(filled(x,0),filled(y,0)), - outer(x, y)) - y = array(['abc', 1, 'def', 2, 3], object) - y[2] = masked - t = take(y,[0,3,4]) - assert t[0] == 'abc' - assert t[1] == 2 - assert t[2] == 3 - - def check_testInplace(self): - """Test of inplace operations and rich comparisons""" - y = arange(10) - - x = arange(10) - xm = arange(10) - xm[2] = masked - x += 1 - assert eq(x, y+1) - xm += 1 - assert eq(x, y+1) - - x = arange(10) - xm = arange(10) - xm[2] = masked - x -= 1 - assert eq(x, y-1) - xm -= 1 - assert eq(xm, y-1) - - x = arange(10)*1.0 - xm = arange(10)*1.0 - xm[2] = masked - x *= 2.0 - assert eq(x, y*2) - xm *= 2.0 - assert eq(xm, y*2) - - x = arange(10)*2 - xm = arange(10) - xm[2] = masked - x /= 2 - assert eq(x, y) - xm /= 2 - assert eq(x, y) - - x = arange(10)*1.0 - xm = arange(10)*1.0 - xm[2] = masked - x /= 2.0 - assert eq(x, y/2.0) - xm /= arange(10) - assert eq(xm, ones((10,))) - - x = arange(10).astype(float32) - xm = arange(10) - xm[2] = masked - id1 = id(x.raw_data()) - x += 1. - assert id1 == id(x.raw_data()) - assert eq(x, y+1.) - - def check_testPickle(self): - "Test of pickling" - import pickle - x = arange(12) - x[4:10:2] = masked - x = x.reshape(4,3) - s = pickle.dumps(x) - y = pickle.loads(s) - assert eq(x,y) - - def check_testMasked(self): - "Test of masked element" - xx=arange(6) - xx[1] = masked - self.failUnless(str(masked) == '--') - self.failUnless(xx[1] is masked) - self.failUnlessEqual(filled(xx[1], 0), 0) - # don't know why these should raise an exception... - #self.failUnlessRaises(Exception, lambda x,y: x+y, masked, masked) - #self.failUnlessRaises(Exception, lambda x,y: x+y, masked, 2) - #self.failUnlessRaises(Exception, lambda x,y: x+y, masked, xx) - #self.failUnlessRaises(Exception, lambda x,y: x+y, xx, masked) - - def check_testAverage1(self): - "Test of average." - ott = array([0.,1.,2.,3.], mask=[1,0,0,0]) - self.failUnless(eq(2.0, average(ott,axis=0))) - self.failUnless(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) - result, wts = average(ott, weights=[1.,1.,2.,1.], returned=1) - self.failUnless(eq(2.0, result)) - self.failUnless(wts == 4.0) - ott[:] = masked - self.failUnless(average(ott,axis=0) is masked) - ott = array([0.,1.,2.,3.], mask=[1,0,0,0]) - ott=ott.reshape(2,2) - ott[:,1] = masked - self.failUnless(eq(average(ott,axis=0), [2.0, 0.0])) - self.failUnless(average(ott,axis=1)[0] is masked) - self.failUnless(eq([2.,0.], average(ott, axis=0))) - result, wts = average(ott, axis=0, returned=1) - self.failUnless(eq(wts, [1., 0.])) - - def check_testAverage2(self): - "More tests of average." - w1 = [0,1,1,1,1,0] - w2 = [[0,1,1,1,1,0],[1,0,0,0,0,1]] - x=arange(6) - self.failUnless(allclose(average(x, axis=0), 2.5)) - self.failUnless(allclose(average(x, axis=0, weights=w1), 2.5)) - y=array([arange(6), 2.0*arange(6)]) - self.failUnless(allclose(average(y, None), numpy.add.reduce(numpy.arange(6))*3./12.)) - self.failUnless(allclose(average(y, axis=0), numpy.arange(6) * 3./2.)) - self.failUnless(allclose(average(y, axis=1), [average(x,axis=0), average(x,axis=0) * 2.0])) - self.failUnless(allclose(average(y, None, weights=w2), 20./6.)) - self.failUnless(allclose(average(y, axis=0, weights=w2), [0.,1.,2.,3.,4.,10.])) - self.failUnless(allclose(average(y, axis=1), [average(x,axis=0), average(x,axis=0) * 2.0])) - m1 = zeros(6) - m2 = [0,0,1,1,0,0] - m3 = [[0,0,1,1,0,0],[0,1,1,1,1,0]] - m4 = ones(6) - m5 = [0, 1, 1, 1, 1, 1] - self.failUnless(allclose(average(masked_array(x, m1),axis=0), 2.5)) - self.failUnless(allclose(average(masked_array(x, m2),axis=0), 2.5)) - self.failUnless(average(masked_array(x, m4),axis=0) is masked) - self.assertEqual(average(masked_array(x, m5),axis=0), 0.0) - self.assertEqual(count(average(masked_array(x, m4),axis=0)), 0) - z = masked_array(y, m3) - self.failUnless(allclose(average(z, None), 20./6.)) - self.failUnless(allclose(average(z, axis=0), [0.,1.,99.,99.,4.0, 7.5])) - self.failUnless(allclose(average(z, axis=1), [2.5, 5.0])) - self.failUnless(allclose( average(z,axis=0, weights=w2), [0.,1., 99., 99., 4.0, 10.0])) - - a = arange(6) - b = arange(6) * 3 - r1, w1 = average([[a,b],[b,a]], axis=1, returned=1) - self.assertEqual(shape(r1) , shape(w1)) - self.assertEqual(r1.shape , w1.shape) - r2, w2 = average(ones((2,2,3)), axis=0, weights=[3,1], returned=1) - self.assertEqual(shape(w2) , shape(r2)) - r2, w2 = average(ones((2,2,3)), returned=1) - self.assertEqual(shape(w2) , shape(r2)) - r2, w2 = average(ones((2,2,3)), weights=ones((2,2,3)), returned=1) - self.failUnless(shape(w2) == shape(r2)) - a2d = array([[1,2],[0,4]], float) - a2dm = masked_array(a2d, [[0,0],[1,0]]) - a2da = average(a2d, axis=0) - self.failUnless(eq (a2da, [0.5, 3.0])) - a2dma = average(a2dm, axis=0) - self.failUnless(eq( a2dma, [1.0, 3.0])) - a2dma = average(a2dm, axis=None) - self.failUnless(eq(a2dma, 7./3.)) - a2dma = average(a2dm, axis=1) - self.failUnless(eq(a2dma, [1.5, 4.0])) - - def check_testToPython(self): - self.assertEqual(1, int(array(1))) - self.assertEqual(1.0, float(array(1))) - self.assertEqual(1, int(array([[[1]]]))) - self.assertEqual(1.0, float(array([[1]]))) - self.failUnlessRaises(ValueError, float, array([1,1])) - self.failUnlessRaises(MAError, float, array([1],mask=[1])) - self.failUnless(bool(array([0,1]))) - self.failUnless(bool(array([0,0],mask=[0,1]))) - self.failIf(bool(array([0,0]))) - self.failIf(bool(array([0,0],mask=[0,0]))) - - def check_testScalarArithmetic(self): - xm = array(0, mask=1) - self.failUnless((1/array(0)).mask) - self.failUnless((1 + xm).mask) - self.failUnless((-xm).mask) - self.failUnless((-xm).mask) - self.failUnless(maximum(xm, xm).mask) - self.failUnless(minimum(xm, xm).mask) - self.failUnless(xm.filled().dtype is xm.data.dtype) - x = array(0, mask=0) - self.failUnless(x.filled() == x.data) - self.failUnlessEqual(str(xm), str(masked_print_option)) - - def check_testArrayMethods(self): - a = array([1,3,2]) - b = array([1,3,2], mask=[1,0,1]) - self.failUnless(eq(a.any(), a.data.any())) - self.failUnless(eq(a.all(), a.data.all())) - self.failUnless(eq(a.argmax(), a.data.argmax())) - self.failUnless(eq(a.argmin(), a.data.argmin())) - self.failUnless(eq(a.choose(0,1,2,3,4), a.data.choose(0,1,2,3,4))) - self.failUnless(eq(a.compress([1,0,1]), a.data.compress([1,0,1]))) - self.failUnless(eq(a.conj(), a.data.conj())) - self.failUnless(eq(a.conjugate(), a.data.conjugate())) - m = array([[1,2],[3,4]]) - self.failUnless(eq(m.diagonal(), m.data.diagonal())) - self.failUnless(eq(a.sum(), a.data.sum())) - self.failUnless(eq(a.take([1,2]), a.data.take([1,2]))) - self.failUnless(eq(m.transpose(), m.data.transpose())) - - def check_testArrayAttributes(self): - a = array([1,3,2]) - b = array([1,3,2], mask=[1,0,1]) - self.failUnlessEqual(a.ndim, 1) - - def check_testAPI(self): - self.failIf([m for m in dir(numpy.ndarray) - if m not in dir(array) and not m.startswith('_')]) - - def check_testSingleElementSubscript(self): - a = array([1,3,2]) - b = array([1,3,2], mask=[1,0,1]) - self.failUnlessEqual(a[0].shape, ()) - self.failUnlessEqual(b[0].shape, ()) - self.failUnlessEqual(b[1].shape, ()) - -class TestUfuncs(NumpyTestCase): - def setUp(self): - self.d = (array([1.0, 0, -1, pi/2]*2, mask=[0,1]+[0]*6), - array([1.0, 0, -1, pi/2]*2, mask=[1,0]+[0]*6),) - - - def check_testUfuncRegression(self): - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', - 'sin', 'cos', 'tan', - 'arcsin', 'arccos', 'arctan', - 'sinh', 'cosh', 'tanh', - 'arcsinh', - 'arccosh', - 'arctanh', - 'absolute', 'fabs', 'negative', - # 'nonzero', 'around', - 'floor', 'ceil', - # 'sometrue', 'alltrue', - 'logical_not', - 'add', 'subtract', 'multiply', - 'divide', 'true_divide', 'floor_divide', - 'remainder', 'fmod', 'hypot', 'arctan2', - 'equal', 'not_equal', 'less_equal', 'greater_equal', - 'less', 'greater', - 'logical_and', 'logical_or', 'logical_xor', - ]: - try: - uf = getattr(umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(numpy.ma, f) - args = self.d[:uf.nin] - olderr = numpy.geterr() - if f in ['sqrt', 'arctanh', 'arcsin', 'arccos', 'arccosh', 'arctanh', 'log', - 'log10','divide','true_divide', 'floor_divide', 'remainder', 'fmod']: - numpy.seterr(invalid='ignore') - if f in ['arctanh', 'log', 'log10']: - numpy.seterr(divide='ignore') - ur = uf(*args) - mr = mf(*args) - numpy.seterr(**olderr) - self.failUnless(eq(ur.filled(0), mr.filled(0), f)) - self.failUnless(eqmask(ur.mask, mr.mask)) - - def test_reduce(self): - a = self.d[0] - self.failIf(alltrue(a,axis=0)) - self.failUnless(sometrue(a,axis=0)) - self.failUnlessEqual(sum(a[:3],axis=0), 0) - self.failUnlessEqual(product(a,axis=0), 0) - - def test_minmax(self): - a = arange(1,13).reshape(3,4) - amask = masked_where(a < 5,a) - self.failUnlessEqual(amask.max(), a.max()) - self.failUnlessEqual(amask.min(), 5) - self.failUnless((amask.max(0) == a.max(0)).all()) - self.failUnless((amask.min(0) == [5,6,7,8]).all()) - self.failUnless(amask.max(1)[0].mask) - self.failUnless(amask.min(1)[0].mask) - - def test_nonzero(self): - for t in "?bhilqpBHILQPfdgFDGO": - x = array([1,0,2,0], mask=[0,0,1,1]) - self.failUnless(eq(nonzero(x), [0])) - - -class TestArrayMethods(NumpyTestCase): - - def setUp(self): - x = numpy.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - X = x.reshape(6,6) - XX = x.reshape(3,2,2,3) - - m = numpy.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x,mask=m) - mX = array(data=X,mask=m.reshape(X.shape)) - mXX = array(data=XX,mask=m.reshape(XX.shape)) - - m2 = numpy.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x,mask=m2) - m2X = array(data=X,mask=m2.reshape(X.shape)) - m2XX = array(data=XX,mask=m2.reshape(XX.shape)) - self.d = (x,X,XX,m,mx,mX,mXX) - - #------------------------------------------------------ - def test_trace(self): - (x,X,XX,m,mx,mX,mXX,) = self.d - mXdiag = mX.diagonal() - self.assertEqual(mX.trace(), mX.diagonal().compressed().sum()) - self.failUnless(eq(mX.trace(), - X.trace() - sum(mXdiag.mask*X.diagonal(),axis=0))) - - def test_clip(self): - (x,X,XX,m,mx,mX,mXX,) = self.d - clipped = mx.clip(2,8) - self.failUnless(eq(clipped.mask,mx.mask)) - self.failUnless(eq(clipped.data,x.clip(2,8))) - self.failUnless(eq(clipped.data,mx.data.clip(2,8))) - - def test_ptp(self): - (x,X,XX,m,mx,mX,mXX,) = self.d - (n,m) = X.shape - self.assertEqual(mx.ptp(),mx.compressed().ptp()) - rows = numpy.zeros(n,numpy.float_) - cols = numpy.zeros(m,numpy.float_) - for k in range(m): - cols[k] = mX[:,k].compressed().ptp() - for k in range(n): - rows[k] = mX[k].compressed().ptp() - self.failUnless(eq(mX.ptp(0),cols)) - self.failUnless(eq(mX.ptp(1),rows)) - - def test_swapaxes(self): - (x,X,XX,m,mx,mX,mXX,) = self.d - mXswapped = mX.swapaxes(0,1) - self.failUnless(eq(mXswapped[-1],mX[:,-1])) - mXXswapped = mXX.swapaxes(0,2) - self.assertEqual(mXXswapped.shape,(2,2,3,3)) - - - def test_cumprod(self): - (x,X,XX,m,mx,mX,mXX,) = self.d - mXcp = mX.cumprod(0) - self.failUnless(eq(mXcp.data,mX.filled(1).cumprod(0))) - mXcp = mX.cumprod(1) - self.failUnless(eq(mXcp.data,mX.filled(1).cumprod(1))) - - def test_cumsum(self): - (x,X,XX,m,mx,mX,mXX,) = self.d - mXcp = mX.cumsum(0) - self.failUnless(eq(mXcp.data,mX.filled(0).cumsum(0))) - mXcp = mX.cumsum(1) - self.failUnless(eq(mXcp.data,mX.filled(0).cumsum(1))) - - def test_varstd(self): - (x,X,XX,m,mx,mX,mXX,) = self.d - self.failUnless(eq(mX.var(axis=None),mX.compressed().var())) - self.failUnless(eq(mX.std(axis=None),mX.compressed().std())) - self.failUnless(eq(mXX.var(axis=3).shape,XX.var(axis=3).shape)) - self.failUnless(eq(mX.var().shape,X.var().shape)) - (mXvar0,mXvar1) = (mX.var(axis=0), mX.var(axis=1)) - for k in range(6): - self.failUnless(eq(mXvar1[k],mX[k].compressed().var())) - self.failUnless(eq(mXvar0[k],mX[:,k].compressed().var())) - self.failUnless(eq(numpy.sqrt(mXvar0[k]), - mX[:,k].compressed().std())) - - -def eqmask(m1, m2): - if m1 is nomask: - return m2 is nomask - if m2 is nomask: - return m1 is nomask - return (m1 == m2).all() - -def timingTest(): - for f in [testf, testinplace]: - for n in [1000,10000,50000]: - t = testta(n, f) - t1 = testtb(n, f) - t2 = testtc(n, f) - print f.test_name - print """\ -n = %7d -numpy time (ms) %6.1f -MA maskless ratio %6.1f -MA masked ratio %6.1f -""" % (n, t*1000.0, t1/t, t2/t) - -def testta(n, f): - x=numpy.arange(n) + 1.0 - tn0 = time.time() - z = f(x) - return time.time() - tn0 - -def testtb(n, f): - x=arange(n) + 1.0 - tn0 = time.time() - z = f(x) - return time.time() - tn0 - -def testtc(n, f): - x=arange(n) + 1.0 - x[0] = masked - tn0 = time.time() - z = f(x) - return time.time() - tn0 - -def testf(x): - for i in range(25): - y = x **2 + 2.0 * x - 1.0 - w = x **2 + 1.0 - z = (y / w) ** 2 - return z -testf.test_name = 'Simple arithmetic' - -def testinplace(x): - for i in range(25): - y = x**2 - y += 2.0*x - y -= 1.0 - y /= x - return y -testinplace.test_name = 'Inplace operations' - -if __name__ == "__main__": - NumpyTest('numpy.core.ma').run() - #timingTest() diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py deleted file mode 100644 index 1baed8978..000000000 --- a/numpy/core/tests/test_multiarray.py +++ /dev/null @@ -1,551 +0,0 @@ -from numpy.testing import * -from numpy.core import * -from numpy import random -import numpy as N - -import tempfile - -class TestFlags(NumpyTestCase): - def setUp(self): - self.a = arange(10) - - def check_writeable(self): - mydict = locals() - self.a.flags.writeable = False - self.assertRaises(RuntimeError, runstring, 'self.a[0] = 3', mydict) - self.a.flags.writeable = True - self.a[0] = 5 - self.a[0] = 0 - - def check_otherflags(self): - assert_equal(self.a.flags.carray, True) - assert_equal(self.a.flags.farray, False) - assert_equal(self.a.flags.behaved, True) - assert_equal(self.a.flags.fnc, False) - assert_equal(self.a.flags.forc, True) - assert_equal(self.a.flags.owndata, True) - assert_equal(self.a.flags.writeable, True) - assert_equal(self.a.flags.aligned, True) - assert_equal(self.a.flags.updateifcopy, False) - - -class TestAttributes(NumpyTestCase): - def setUp(self): - self.one = arange(10) - self.two = arange(20).reshape(4,5) - self.three = arange(60,dtype=float64).reshape(2,5,6) - - def check_attributes(self): - assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4,5)) - assert_equal(self.three.shape, (2,5,6)) - self.three.shape = (10,3,2) - assert_equal(self.three.shape, (10,3,2)) - self.three.shape = (2,5,6) - assert_equal(self.one.strides, (self.one.itemsize,)) - num = self.two.itemsize - assert_equal(self.two.strides, (5*num, num)) - num = self.three.itemsize - assert_equal(self.three.strides, (30*num, 6*num, num)) - assert_equal(self.one.ndim, 1) - assert_equal(self.two.ndim, 2) - assert_equal(self.three.ndim, 3) - num = self.two.itemsize - assert_equal(self.two.size, 20) - assert_equal(self.two.nbytes, 20*num) - assert_equal(self.two.itemsize, self.two.dtype.itemsize) - assert_equal(self.two.base, arange(20)) - - def check_dtypeattr(self): - assert_equal(self.one.dtype, dtype(int_)) - assert_equal(self.three.dtype, dtype(float_)) - assert_equal(self.one.dtype.char, 'l') - assert_equal(self.three.dtype.char, 'd') - self.failUnless(self.three.dtype.str[0] in '<>') - assert_equal(self.one.dtype.str[1], 'i') - assert_equal(self.three.dtype.str[1], 'f') - - def check_stridesattr(self): - x = self.one - def make_array(size, offset, strides): - return ndarray([size], buffer=x, dtype=int, - offset=offset*x.itemsize, - strides=strides*x.itemsize) - assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) - self.failUnlessRaises(ValueError, make_array, 4, 4, -2) - self.failUnlessRaises(ValueError, make_array, 4, 2, -1) - self.failUnlessRaises(ValueError, make_array, 8, 3, 1) - #self.failUnlessRaises(ValueError, make_array, 8, 3, 0) - #self.failUnlessRaises(ValueError, lambda: ndarray([1], strides=4)) - - - def check_set_stridesattr(self): - x = self.one - def make_array(size, offset, strides): - try: - r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize) - except: - pass - r.strides = strides=strides*x.itemsize - return r - assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) - self.failUnlessRaises(ValueError, make_array, 4, 4, -2) - self.failUnlessRaises(ValueError, make_array, 4, 2, -1) - self.failUnlessRaises(ValueError, make_array, 8, 3, 1) - #self.failUnlessRaises(ValueError, make_array, 8, 3, 0) - - def check_fill(self): - for t in "?bhilqpBHILQPfdgFDGO": - x = empty((3,2,1), t) - y = empty((3,2,1), t) - x.fill(1) - y[...] = 1 - assert_equal(x,y) - - x = array([(0,0.0), (1,1.0)], dtype='i4,f8') - x.fill(x[0]) - assert_equal(x['f1'][1], x['f1'][0]) - -class TestDtypedescr(NumpyTestCase): - def check_construction(self): - d1 = dtype('i4') - assert_equal(d1, dtype(int32)) - d2 = dtype('f8') - assert_equal(d2, dtype(float64)) - -class TestFromstring(NumpyTestCase): - def check_binary(self): - a = fromstring('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',dtype='= g2, [g1[i] >= g2[i] for i in [0,1,2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]]) - - def check_mixed(self): - g1 = array(["spam","spa","spammer","and eggs"]) - g2 = "spam" - assert_array_equal(g1 == g2, [x == g2 for x in g1]) - assert_array_equal(g1 != g2, [x != g2 for x in g1]) - assert_array_equal(g1 < g2, [x < g2 for x in g1]) - assert_array_equal(g1 > g2, [x > g2 for x in g1]) - assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) - assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) - - - def check_unicode(self): - g1 = array([u"This",u"is",u"example"]) - g2 = array([u"This",u"was",u"example"]) - assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0,1,2]]) - assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0,1,2]]) - assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0,1,2]]) - assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0,1,2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]]) - - -class TestArgmax(NumpyTestCase): - def check_all(self): - a = random.normal(0,1,(4,5,6,7,8)) - for i in xrange(a.ndim): - amax = a.max(i) - aargmax = a.argmax(i) - axes = range(a.ndim) - axes.remove(i) - assert all(amax == aargmax.choose(*a.transpose(i,*axes))) - -class TestNewaxis(NumpyTestCase): - def check_basic(self): - sk = array([0,-0.1,0.1]) - res = 250*sk[:,newaxis] - assert_almost_equal(res.ravel(),250*sk) - -class TestClip(NumpyTestCase): - def _check_range(self,x,cmin,cmax): - assert N.all(x >= cmin) - assert N.all(x <= cmax) - - def _clip_type(self,type_group,array_max, - clip_min,clip_max,inplace=False, - expected_min=None,expected_max=None): - if expected_min is None: - expected_min = clip_min - if expected_max is None: - expected_max = clip_max - - for T in N.sctypes[type_group]: - if sys.byteorder == 'little': - byte_orders = ['=','>'] - else: - byte_orders = ['<','='] - - for byteorder in byte_orders: - dtype = N.dtype(T).newbyteorder(byteorder) - - x = (N.random.random(1000) * array_max).astype(dtype) - if inplace: - x.clip(clip_min,clip_max,x) - else: - x = x.clip(clip_min,clip_max) - byteorder = '=' - - if x.dtype.byteorder == '|': byteorder = '|' - assert_equal(x.dtype.byteorder,byteorder) - self._check_range(x,expected_min,expected_max) - return x - - def check_basic(self): - for inplace in [False, True]: - self._clip_type('float',1024,-12.8,100.2, inplace=inplace) - self._clip_type('float',1024,0,0, inplace=inplace) - - self._clip_type('int',1024,-120,100.5, inplace=inplace) - self._clip_type('int',1024,0,0, inplace=inplace) - - x = self._clip_type('uint',1024,-120,100,expected_min=0, inplace=inplace) - x = self._clip_type('uint',1024,0,0, inplace=inplace) - - def check_record_array(self): - rec = N.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], - dtype=[('x', '= 3) - x = val.clip(min=3) - assert N.all(x >= 3) - x = val.clip(max=4) - assert N.all(x <= 4) - -class TestPutmask(ParametricTestCase): - def tst_basic(self,x,T,mask,val): - N.putmask(x,mask,val) - assert N.all(x[mask] == T(val)) - assert x.dtype == T - - def testip_types(self): - unchecked_types = [str,unicode,N.void,object] - - x = N.random.random(1000)*100 - mask = x < 40 - - tests = [] - for val in [-100,0,15]: - for types in N.sctypes.itervalues(): - tests.extend([(self.tst_basic,x.copy().astype(T),T,mask,val) - for T in types if T not in unchecked_types]) - return tests - - def test_mask_size(self): - self.failUnlessRaises(ValueError,N.putmask, - N.array([1,2,3]),[True],5) - - def tst_byteorder(self,dtype): - x = N.array([1,2,3],dtype) - N.putmask(x,[True,False,True],-1) - assert_array_equal(x,[-1,2,-1]) - - def testip_byteorder(self): - return [(self.tst_byteorder,dtype) for dtype in ('>i4','f8'), ('z', '']: - for dtype in [float,int,N.complex]: - dt = N.dtype(dtype).newbyteorder(byteorder) - x = (N.random.random((4,7))*5).astype(dt) - buf = x.tostring() - tests.append((self.tst_basic,buf,x.flat,{'dtype':dt})) - return tests - -class TestResize(NumpyTestCase): - def test_basic(self): - x = N.eye(3) - x.resize((5,5)) - assert_array_equal(x.flat[:9],N.eye(3).flat) - assert_array_equal(x[9:].flat,0) - - def test_check_reference(self): - x = N.eye(3) - y = x - self.failUnlessRaises(ValueError,x.resize,(5,1)) - -# Import tests without matching module names -set_local_path() -from test_unicode import * -from test_regression import * -from test_ufunc import * -restore_path() - -if __name__ == "__main__": - NumpyTest('numpy.core.multiarray').run() diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py deleted file mode 100644 index 2f85c664e..000000000 --- a/numpy/core/tests/test_numeric.py +++ /dev/null @@ -1,734 +0,0 @@ -from numpy.core import * -from numpy.random import rand, randint, randn -from numpy.testing import * -from numpy.core.multiarray import dot as dot_ -import sys - -class Vec: - def __init__(self,sequence=None): - if sequence is None: - sequence=[] - self.array=array(sequence) - def __add__(self,other): - out=Vec() - out.array=self.array+other.array - return out - def __sub__(self,other): - out=Vec() - out.array=self.array-other.array - return out - def __mul__(self,other): # with scalar - out=Vec(self.array.copy()) - out.array*=other - return out - def __rmul__(self,other): - return self*other - def __abs__(self): - out=Vec() - out.array=abs(self.array) - return out - def __repr__(self): - return "Vec("+repr(self.array.tolist())+")" - __str__=__repr__ - -class TestDot(NumpyTestCase): - def setUp(self): - self.A = rand(10,8) - self.b1 = rand(8,1) - self.b2 = rand(8) - self.b3 = rand(1,8) - self.b4 = rand(10) - self.N = 14 - - def check_matmat(self): - A = self.A - c1 = dot(A.transpose(), A) - c2 = dot_(A.transpose(), A) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_matvec(self): - A, b1 = self.A, self.b1 - c1 = dot(A, b1) - c2 = dot_(A, b1) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_matvec2(self): - A, b2 = self.A, self.b2 - c1 = dot(A, b2) - c2 = dot_(A, b2) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_vecmat(self): - A, b4 = self.A, self.b4 - c1 = dot(b4, A) - c2 = dot_(b4, A) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_vecmat2(self): - b3, A = self.b3, self.A - c1 = dot(b3, A.transpose()) - c2 = dot_(b3, A.transpose()) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_vecmat3(self): - A, b4 = self.A, self.b4 - c1 = dot(A.transpose(),b4) - c2 = dot_(A.transpose(),b4) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_vecvecouter(self): - b1, b3 = self.b1, self.b3 - c1 = dot(b1, b3) - c2 = dot_(b1, b3) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_vecvecinner(self): - b1, b3 = self.b1, self.b3 - c1 = dot(b3, b1) - c2 = dot_(b3, b1) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_matscalar(self): - b1 = matrix(ones((3,3),dtype=complex)) - assert_equal(b1*1.0, b1) - - def check_columnvect(self): - b1 = ones((3,1)) - b2 = [5.3] - c1 = dot(b1,b2) - c2 = dot_(b1,b2) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_columnvect(self): - b1 = ones((3,1)).transpose() - b2 = [6.2] - c1 = dot(b2,b1) - c2 = dot_(b2,b1) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_vecscalar(self): - b1 = rand(1,1) - b2 = rand(1,8) - c1 = dot(b1,b2) - c2 = dot_(b1,b2) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_vecscalar2(self): - b1 = rand(8,1) - b2 = rand(1,1) - c1 = dot(b1,b2) - c2 = dot_(b1,b2) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_all(self): - dims = [(),(1,),(1,1)] - for dim1 in dims: - for dim2 in dims: - arg1 = rand(*dim1) - arg2 = rand(*dim2) - c1 = dot(arg1, arg2) - c2 = dot_(arg1, arg2) - assert (c1.shape == c2.shape) - assert_almost_equal(c1, c2, decimal=self.N) - - def check_vecobject(self): - U_non_cont = transpose([[1.,1.],[1.,2.]]) - U_cont = ascontiguousarray(U_non_cont) - x = array([Vec([1.,0.]),Vec([0.,1.])]) - zeros = array([Vec([0.,0.]),Vec([0.,0.])]) - zeros_test = dot(U_cont,x) - dot(U_non_cont,x) - assert_equal(zeros[0].array, zeros_test[0].array) - assert_equal(zeros[1].array, zeros_test[1].array) - - -class TestBoolScalar(NumpyTestCase): - def test_logical(self): - f = False_ - t = True_ - s = "xyz" - self.failUnless((t and s) is s) - self.failUnless((f and s) is f) - - def test_bitwise_or(self): - f = False_ - t = True_ - self.failUnless((t | t) is t) - self.failUnless((f | t) is t) - self.failUnless((t | f) is t) - self.failUnless((f | f) is f) - - def test_bitwise_and(self): - f = False_ - t = True_ - self.failUnless((t & t) is t) - self.failUnless((f & t) is f) - self.failUnless((t & f) is f) - self.failUnless((f & f) is f) - - def test_bitwise_xor(self): - f = False_ - t = True_ - self.failUnless((t ^ t) is f) - self.failUnless((f ^ t) is t) - self.failUnless((t ^ f) is t) - self.failUnless((f ^ f) is f) - - -class TestSeterr(NumpyTestCase): - def test_set(self): - err = seterr() - old = seterr(divide='warn') - self.failUnless(err == old) - new = seterr() - self.failUnless(new['divide'] == 'warn') - seterr(over='raise') - self.failUnless(geterr()['over'] == 'raise') - self.failUnless(new['divide'] == 'warn') - seterr(**old) - self.failUnless(geterr() == old) - def test_divideerr(self): - seterr(divide='raise') - try: - array([1.]) / array([0.]) - except FloatingPointError: - pass - else: - self.fail() - seterr(divide='ignore') - array([1.]) / array([0.]) - - -class TestFromiter(NumpyTestCase): - - def makegen(self): - for x in xrange(24): - yield x**2 - - def test_types(self): - ai32 = fromiter(self.makegen(), int32) - ai64 = fromiter(self.makegen(), int64) - af = fromiter(self.makegen(), float) - self.failUnless(ai32.dtype == dtype(int32)) - self.failUnless(ai64.dtype == dtype(int64)) - self.failUnless(af.dtype == dtype(float)) - - def test_lengths(self): - expected = array(list(self.makegen())) - a = fromiter(self.makegen(), int) - a20 = fromiter(self.makegen(), int, 20) - self.failUnless(len(a) == len(expected)) - self.failUnless(len(a20) == 20) - try: - fromiter(self.makegen(), int, len(expected) + 10) - except ValueError: - pass - else: - self.fail() - - def test_values(self): - expected = array(list(self.makegen())) - a = fromiter(self.makegen(), int) - a20 = fromiter(self.makegen(), int, 20) - self.failUnless(alltrue(a == expected,axis=0)) - self.failUnless(alltrue(a20 == expected[:20],axis=0)) - -class TestIndex(NumpyTestCase): - def test_boolean(self): - a = rand(3,5,8) - V = rand(5,8) - g1 = randint(0,5,size=15) - g2 = randint(0,8,size=15) - V[g1,g2] = -V[g1,g2] - assert (array([a[0][V>0],a[1][V>0],a[2][V>0]]) == a[:,V>0]).all() - -class TestBinaryRepr(NumpyTestCase): - def test_zero(self): - assert_equal(binary_repr(0),'0') - - def test_large(self): - assert_equal(binary_repr(10736848),'101000111101010011010000') - - def test_negative(self): - assert_equal(binary_repr(-1), '-1') - assert_equal(binary_repr(-1, width=8), '11111111') - -def assert_array_strict_equal(x, y): - assert_array_equal(x, y) - # Check flags - assert x.flags == y.flags - # check endianness - assert x.dtype.isnative == y.dtype.isnative - - -class TestClip(NumpyTestCase): - def setUp(self): - self.nr = 5 - self.nc = 3 - - def fastclip(self, a, m, M, out=None): - if out is None: - return a.clip(m,M) - else: - return a.clip(m,M,out) - - def clip(self, a, m, M, out=None): - # use slow-clip - selector = less(a, m)+2*greater(a, M) - return selector.choose((a, m, M), out=out) - - # Handy functions - def _generate_data(self, n, m): - return randn(n, m) - - def _generate_data_complex(self, n, m): - return randn(n, m) + 1.j *rand(n, m) - - def _generate_flt_data(self, n, m): - return (randn(n, m)).astype(float32) - - def _neg_byteorder(self, a): - import sys - a = asarray(a) - if sys.byteorder == 'little': - a = a.astype(a.dtype.newbyteorder('>')) - else: - a = a.astype(a.dtype.newbyteorder('<')) - return a - - def _generate_non_native_data(self, n, m): - data = randn(n, m) - data = self._neg_byteorder(data) - assert not data.dtype.isnative - return data - - def _generate_int_data(self, n, m): - return (10 * rand(n, m)).astype(int64) - - def _generate_int32_data(self, n, m): - return (10 * rand(n, m)).astype(int32) - - # Now the real test cases - def test_simple_double(self): - """Test native double input with scalar min/max.""" - a = self._generate_data(self.nr, self.nc) - m = 0.1 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_simple_int(self): - """Test native int input with scalar min/max.""" - a = self._generate_int_data(self.nr, self.nc) - a = a.astype(int) - m = -2 - M = 4 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_array_double(self): - """Test native double input with array min/max.""" - a = self._generate_data(self.nr, self.nc) - m = zeros(a.shape) - M = m + 0.5 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_simple_nonnative(self): - """Test non native double input with scalar min/max. - Test native double input with non native double scalar min/max.""" - a = self._generate_non_native_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_equal(ac, act) - - "Test native double input with non native double scalar min/max." - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = self._neg_byteorder(0.6) - assert not M.dtype.isnative - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_equal(ac, act) - - def test_simple_complex(self): - """Test native complex input with native double scalar min/max. - Test native input with complex double scalar min/max. - """ - a = 3 * self._generate_data_complex(self.nr, self.nc) - m = -0.5 - M = 1. - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - "Test native input with complex double scalar min/max." - a = 3 * self._generate_data(self.nr, self.nc) - m = -0.5 + 1.j - M = 1. + 2.j - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_clip_non_contig(self): - """Test clip for non contiguous native input and native scalar min/max.""" - a = self._generate_data(self.nr * 2, self.nc * 3) - a = a[::2, ::3] - assert not a.flags['F_CONTIGUOUS'] - assert not a.flags['C_CONTIGUOUS'] - ac = self.fastclip(a, -1.6, 1.7) - act = self.clip(a, -1.6, 1.7) - assert_array_strict_equal(ac, act) - - def test_simple_out(self): - """Test native double input with scalar min/max.""" - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = zeros(a.shape) - act = zeros(a.shape) - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int32_inout(self): - """Test native int32 input with double min/max and int32 out.""" - a = self._generate_int32_data(self.nr, self.nc) - m = float64(0) - M = float64(2) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int64_out(self): - """Test native int32 input with int32 scalar min/max and int64 out.""" - a = self._generate_int32_data(self.nr, self.nc) - m = int32(-1) - M = int32(1) - ac = zeros(a.shape, dtype = int64) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int64_inout(self): - """Test native in32 input with double array min/max and int32 out.""" - a = self._generate_int32_data(self.nr, self.nc) - m = zeros(a.shape, float64) - M = float64(1) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int32_out(self): - """Test native double input with scalar min/max and int out.""" - a = self._generate_data(self.nr, self.nc) - m = -1.0 - M = 2.0 - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_inplace_01(self): - """Test native double input with array min/max in-place.""" - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = zeros(a.shape) - M = 1.0 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_simple_inplace_02(self): - """Test native double input with scalar min/max in-place.""" - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_noncontig_inplace(self): - """Test non contiguous double input with double scalar min/max in-place.""" - a = self._generate_data(self.nr * 2, self.nc * 3) - a = a[::2, ::3] - assert not a.flags['F_CONTIGUOUS'] - assert not a.flags['C_CONTIGUOUS'] - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_equal(a, ac) - - def test_type_cast_01(self): - "Test native double input with scalar min/max." - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_02(self): - "Test native int32 input with int32 scalar min/max." - a = self._generate_int_data(self.nr, self.nc) - a = a.astype(int32) - m = -2 - M = 4 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_03(self): - "Test native int32 input with float64 scalar min/max." - a = self._generate_int32_data(self.nr, self.nc) - m = -2 - M = 4 - ac = self.fastclip(a, float64(m), float64(M)) - act = self.clip(a, float64(m), float64(M)) - assert_array_strict_equal(ac, act) - - def test_type_cast_04(self): - "Test native int32 input with float32 scalar min/max." - a = self._generate_int32_data(self.nr, self.nc) - m = float32(-2) - M = float32(4) - act = self.fastclip(a,m,M) - ac = self.clip(a,m,M) - assert_array_strict_equal(ac, act) - - def test_type_cast_04(self): - "Test native int32 with double arrays min/max." - a = self._generate_int_data(self.nr, self.nc) - m = -0.5 - M = 1. - ac = self.fastclip(a, m * zeros(a.shape), M) - act = self.clip(a, m * zeros(a.shape), M) - assert_array_strict_equal(ac, act) - - def test_type_cast_05(self): - "Test native with NON native scalar min/max." - a = self._generate_data(self.nr, self.nc) - m = 0.5 - m_s = self._neg_byteorder(m) - M = 1. - act = self.clip(a, m_s, M) - ac = self.fastclip(a, m_s, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_06(self): - "Test NON native with native array min/max." - a = self._generate_data(self.nr, self.nc) - m = -0.5 * ones(a.shape) - M = 1. - a_s = self._neg_byteorder(a) - assert not a_s.dtype.isnative - act = a_s.clip(m, M) - ac = self.fastclip(a_s, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_07(self): - "Test NON native with native scalar min/max." - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 1. - a_s = self._neg_byteorder(a) - assert not a_s.dtype.isnative - ac = self.fastclip(a_s, m , M) - act = a_s.clip(m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_08(self): - "Test native with NON native array min/max." - a = self._generate_data(self.nr, self.nc) - m = -0.5 * ones(a.shape) - M = 1. - m_s = self._neg_byteorder(m) - assert not m_s.dtype.isnative - ac = self.fastclip(a, m_s , M) - act = self.clip(a, m_s, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_09(self): - """Test native int32 with float min/max and float out for output argument.""" - a = self._generate_int_data(self.nr, self.nc) - b = zeros(a.shape, dtype = float32) - m = float32(-0.5) - M = float32(1) - act = self.clip(a, m, M, out = b) - ac = self.fastclip(a, m , M, out = b) - assert_array_strict_equal(ac, act) - - def test_type_cast_10(self): - "Test non native with native scalar, min/max, out non native" - a = self._generate_non_native_data(self.nr, self.nc) - b = a.copy() - b = b.astype(b.dtype.newbyteorder('>')) - bt = b.copy() - m = -0.5 - M = 1. - self.fastclip(a, m , M, out = b) - self.clip(a, m, M, out = bt) - assert_array_strict_equal(b, bt) - - def test_type_cast_11(self): - "Test native int32 input and min/max and float out" - a = self._generate_int_data(self.nr, self.nc) - b = zeros(a.shape, dtype = float32) - m = int32(0) - M = int32(1) - act = self.clip(a, m, M, out = b) - ac = self.fastclip(a, m , M, out = b) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple(self): - "Test native double input with scalar min/max" - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = zeros(a.shape) - act = zeros(a.shape) - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple2(self): - "Test native int32 input with double min/max and int32 out" - a = self._generate_int32_data(self.nr, self.nc) - m = float64(0) - M = float64(2) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple_int32(self): - "Test native int32 input with int32 scalar min/max and int64 out" - a = self._generate_int32_data(self.nr, self.nc) - m = int32(-1) - M = int32(1) - ac = zeros(a.shape, dtype = int64) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_array_int32(self): - "Test native int32 input with double array min/max and int32 out" - a = self._generate_int32_data(self.nr, self.nc) - m = zeros(a.shape, float64) - M = float64(1) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_array_outint32(self): - "Test native double input with scalar min/max and int out" - a = self._generate_data(self.nr, self.nc) - m = -1.0 - M = 2.0 - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_inplace_array(self): - "Test native double input with array min/max" - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = zeros(a.shape) - M = 1.0 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_clip_inplace_simple(self): - "Test native double input with scalar min/max" - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - -class test_allclose_inf(ParametricTestCase): - rtol = 1e-5 - atol = 1e-8 - - def tst_allclose(self,x,y): - assert allclose(x,y), "%s and %s not close" % (x,y) - - def tst_not_allclose(self,x,y): - assert not allclose(x,y), "%s and %s shouldn't be close" % (x,y) - - def testip_allclose(self): - """Parametric test factory.""" - arr = array([100,1000]) - aran = arange(125).reshape((5,5,5)) - - atol = self.atol - rtol = self.rtol - - data = [([1,0], [1,0]), - ([atol], [0]), - ([1], [1+rtol+atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol*2), - (aran, aran + aran*rtol),] - - for (x,y) in data: - yield (self.tst_allclose,x,y) - - def testip_not_allclose(self): - """Parametric test factory.""" - aran = arange(125).reshape((5,5,5)) - - atol = self.atol - rtol = self.rtol - - data = [([inf,0], [1,inf]), - ([inf,0], [1,0]), - ([inf,inf], [1,inf]), - ([inf,inf], [1,0]), - ([-inf, 0], [inf, 0]), - ([nan,0], [nan,0]), - ([atol*2], [0]), - ([1], [1+rtol+atol*2]), - (aran, aran + aran*atol + atol*2), - (array([inf,1]), array([0,inf]))] - - for (x,y) in data: - yield (self.tst_not_allclose,x,y) - - def test_no_parameter_modification(self): - x = array([inf,1]) - y = array([0,inf]) - allclose(x,y) - assert_array_equal(x,array([inf,1])) - assert_array_equal(y,array([0,inf])) - -import sys -if sys.version_info[:2] >= (2, 5): - set_local_path() - from test_errstate import * - restore_path() - -if __name__ == '__main__': - NumpyTest().run() diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py deleted file mode 100644 index 527b89b53..000000000 --- a/numpy/core/tests/test_numerictypes.py +++ /dev/null @@ -1,342 +0,0 @@ -import sys -from numpy.testing import * -import numpy -from numpy import zeros, ones, array - - -# This is the structure of the table used for plain objects: -# -# +-+-+-+ -# |x|y|z| -# +-+-+-+ - -# Structure of a plain array description: -Pdescr = [ - ('x', 'i4', (2,)), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -# A plain list of tuples with values for testing: -PbufferT = [ - # x y z - ([3,2], [[6.,4.],[6.,4.]], 8), - ([4,3], [[7.,5.],[7.,5.]], 9), - ] - - -# This is the structure of the table used for nested objects (DON'T PANIC!): -# -# +-+---------------------------------+-----+----------+-+-+ -# |x|Info |color|info |y|z| -# | +-----+--+----------------+----+--+ +----+-----+ | | -# | |value|y2|Info2 |name|z2| |Name|Value| | | -# | | | +----+-----+--+--+ | | | | | | | -# | | | |name|value|y3|z3| | | | | | | | -# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ -# - -# The corresponding nested array description: -Ndescr = [ - ('x', 'i4', (2,)), - ('Info', [ - ('value', 'c16'), - ('y2', 'f8'), - ('Info2', [ - ('name', 'S2'), - ('value', 'c16', (2,)), - ('y3', 'f8', (2,)), - ('z3', 'u4', (2,))]), - ('name', 'S2'), - ('z2', 'b1')]), - ('color', 'S2'), - ('info', [ - ('Name', 'U8'), - ('Value', 'c16')]), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 - ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), - ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), - ] - - -byteorder = {'little':'<', 'big':'>'}[sys.byteorder] - -def normalize_descr(descr): - "Normalize a description adding the platform byteorder." - - out = [] - for item in descr: - dtype = item[1] - if isinstance(dtype, str): - if dtype[0] not in ['|','<','>']: - onebyte = dtype[1:] == "1" - if onebyte or dtype[0] in ['S', 'V', 'b']: - dtype = "|" + dtype - else: - dtype = byteorder + dtype - if len(item) > 2 and item[2] > 1: - nitem = (item[0], dtype, item[2]) - else: - nitem = (item[0], dtype) - out.append(nitem) - elif isinstance(item[1], list): - l = [] - for j in normalize_descr(item[1]): - l.append(j) - out.append((item[0], l)) - else: - raise ValueError("Expected a str or list and got %s" % \ - (type(item))) - return out - - -############################################################ -# Creation tests -############################################################ - -class create_zeros: - """Check the creation of heterogeneous arrays zero-valued""" - - def check_zeros0D(self): - """Check creation of 0-dimensional objects""" - h = zeros((), dtype=self._descr) - self.assert_(normalize_descr(self._descr) == h.dtype.descr) - self.assert_(h.dtype.fields['x'][0].name[:4] == 'void') - self.assert_(h.dtype.fields['x'][0].char == 'V') - self.assert_(h.dtype.fields['x'][0].type == numpy.void) - # A small check that data is ok - assert_equal(h['z'], zeros((), dtype='u1')) - - def check_zerosSD(self): - """Check creation of single-dimensional objects""" - h = zeros((2,), dtype=self._descr) - self.assert_(normalize_descr(self._descr) == h.dtype.descr) - self.assert_(h.dtype['y'].name[:4] == 'void') - self.assert_(h.dtype['y'].char == 'V') - self.assert_(h.dtype['y'].type == numpy.void) - # A small check that data is ok - assert_equal(h['z'], zeros((2,), dtype='u1')) - - def check_zerosMD(self): - """Check creation of multi-dimensional objects""" - h = zeros((2,3), dtype=self._descr) - self.assert_(normalize_descr(self._descr) == h.dtype.descr) - self.assert_(h.dtype['z'].name == 'uint8') - self.assert_(h.dtype['z'].char == 'B') - self.assert_(h.dtype['z'].type == numpy.uint8) - # A small check that data is ok - assert_equal(h['z'], zeros((2,3), dtype='u1')) - - -class test_create_zeros_plain(create_zeros, NumpyTestCase): - """Check the creation of heterogeneous arrays zero-valued (plain)""" - _descr = Pdescr - -class test_create_zeros_nested(create_zeros, NumpyTestCase): - """Check the creation of heterogeneous arrays zero-valued (nested)""" - _descr = Ndescr - - -class create_values: - """Check the creation of heterogeneous arrays with values""" - - def check_tuple(self): - """Check creation from tuples""" - h = array(self._buffer, dtype=self._descr) - self.assert_(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - self.assert_(h.shape == (2,)) - else: - self.assert_(h.shape == ()) - - def check_list_of_tuple(self): - """Check creation from list of tuples""" - h = array([self._buffer], dtype=self._descr) - self.assert_(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - self.assert_(h.shape == (1,2)) - else: - self.assert_(h.shape == (1,)) - - def check_list_of_list_of_tuple(self): - """Check creation from list of list of tuples""" - h = array([[self._buffer]], dtype=self._descr) - self.assert_(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - self.assert_(h.shape == (1,1,2)) - else: - self.assert_(h.shape == (1,1)) - - -class test_create_values_plain_single(create_values, NumpyTestCase): - """Check the creation of heterogeneous arrays (plain, single row)""" - _descr = Pdescr - multiple_rows = 0 - _buffer = PbufferT[0] - -class test_create_values_plain_multiple(create_values, NumpyTestCase): - """Check the creation of heterogeneous arrays (plain, multiple rows)""" - _descr = Pdescr - multiple_rows = 1 - _buffer = PbufferT - -class test_create_values_nested_single(create_values, NumpyTestCase): - """Check the creation of heterogeneous arrays (nested, single row)""" - _descr = Ndescr - multiple_rows = 0 - _buffer = NbufferT[0] - -class test_create_values_nested_multiple(create_values, NumpyTestCase): - """Check the creation of heterogeneous arrays (nested, multiple rows)""" - _descr = Ndescr - multiple_rows = 1 - _buffer = NbufferT - - -############################################################ -# Reading tests -############################################################ - -class read_values_plain: - """Check the reading of values in heterogeneous arrays (plain)""" - - def check_access_fields(self): - h = array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - self.assert_(h.shape == ()) - assert_equal(h['x'], array(self._buffer[0], dtype='i4')) - assert_equal(h['y'], array(self._buffer[1], dtype='f8')) - assert_equal(h['z'], array(self._buffer[2], dtype='u1')) - else: - self.assert_(len(h) == 2) - assert_equal(h['x'], array([self._buffer[0][0], - self._buffer[1][0]], dtype='i4')) - assert_equal(h['y'], array([self._buffer[0][1], - self._buffer[1][1]], dtype='f8')) - assert_equal(h['z'], array([self._buffer[0][2], - self._buffer[1][2]], dtype='u1')) - - -class test_read_values_plain_single(read_values_plain, NumpyTestCase): - """Check the creation of heterogeneous arrays (plain, single row)""" - _descr = Pdescr - multiple_rows = 0 - _buffer = PbufferT[0] - -class test_read_values_plain_multiple(read_values_plain, NumpyTestCase): - """Check the values of heterogeneous arrays (plain, multiple rows)""" - _descr = Pdescr - multiple_rows = 1 - _buffer = PbufferT - -class read_values_nested: - """Check the reading of values in heterogeneous arrays (nested)""" - - - def check_access_top_fields(self): - """Check reading the top fields of a nested array""" - h = array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - self.assert_(h.shape == ()) - assert_equal(h['x'], array(self._buffer[0], dtype='i4')) - assert_equal(h['y'], array(self._buffer[4], dtype='f8')) - assert_equal(h['z'], array(self._buffer[5], dtype='u1')) - else: - self.assert_(len(h) == 2) - assert_equal(h['x'], array([self._buffer[0][0], - self._buffer[1][0]], dtype='i4')) - assert_equal(h['y'], array([self._buffer[0][4], - self._buffer[1][4]], dtype='f8')) - assert_equal(h['z'], array([self._buffer[0][5], - self._buffer[1][5]], dtype='u1')) - - - def check_nested1_acessors(self): - """Check reading the nested fields of a nested array (1st level)""" - h = array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_equal(h['Info']['value'], - array(self._buffer[1][0], dtype='c16')) - assert_equal(h['Info']['y2'], - array(self._buffer[1][1], dtype='f8')) - assert_equal(h['info']['Name'], - array(self._buffer[3][0], dtype='U2')) - assert_equal(h['info']['Value'], - array(self._buffer[3][1], dtype='c16')) - else: - assert_equal(h['Info']['value'], - array([self._buffer[0][1][0], - self._buffer[1][1][0]], - dtype='c16')) - assert_equal(h['Info']['y2'], - array([self._buffer[0][1][1], - self._buffer[1][1][1]], - dtype='f8')) - assert_equal(h['info']['Name'], - array([self._buffer[0][3][0], - self._buffer[1][3][0]], - dtype='U2')) - assert_equal(h['info']['Value'], - array([self._buffer[0][3][1], - self._buffer[1][3][1]], - dtype='c16')) - - def check_nested2_acessors(self): - """Check reading the nested fields of a nested array (2nd level)""" - h = array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_equal(h['Info']['Info2']['value'], - array(self._buffer[1][2][1], dtype='c16')) - assert_equal(h['Info']['Info2']['z3'], - array(self._buffer[1][2][3], dtype='u4')) - else: - assert_equal(h['Info']['Info2']['value'], - array([self._buffer[0][1][2][1], - self._buffer[1][1][2][1]], - dtype='c16')) - assert_equal(h['Info']['Info2']['z3'], - array([self._buffer[0][1][2][3], - self._buffer[1][1][2][3]], - dtype='u4')) - - def check_nested1_descriptor(self): - """Check access nested descriptors of a nested array (1st level)""" - h = array(self._buffer, dtype=self._descr) - self.assert_(h.dtype['Info']['value'].name == 'complex128') - self.assert_(h.dtype['Info']['y2'].name == 'float64') - self.assert_(h.dtype['info']['Name'].name == 'unicode256') - self.assert_(h.dtype['info']['Value'].name == 'complex128') - - def check_nested2_descriptor(self): - """Check access nested descriptors of a nested array (2nd level)""" - h = array(self._buffer, dtype=self._descr) - self.assert_(h.dtype['Info']['Info2']['value'].name == 'void256') - self.assert_(h.dtype['Info']['Info2']['z3'].name == 'void64') - - -class test_read_values_nested_single(read_values_nested, NumpyTestCase): - """Check the values of heterogeneous arrays (nested, single row)""" - _descr = Ndescr - multiple_rows = False - _buffer = NbufferT[0] - -class test_read_values_nested_multiple(read_values_nested, NumpyTestCase): - """Check the values of heterogeneous arrays (nested, multiple rows)""" - _descr = Ndescr - multiple_rows = True - _buffer = NbufferT - -class TestEmptyField(NumpyTestCase): - def check_assign(self): - a = numpy.arange(10, dtype=numpy.float32) - a.dtype = [("int", "<0i4"),("float", "<2f4")] - assert(a['int'].shape == (5,0)) - assert(a['float'].shape == (5,2)) - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py deleted file mode 100644 index 431852ce4..000000000 --- a/numpy/core/tests/test_records.py +++ /dev/null @@ -1,114 +0,0 @@ - -from numpy.testing import * -set_package_path() -import os as _os -import numpy.core;reload(numpy.core) -from numpy.core import * -restore_path() - -class TestFromrecords(NumpyTestCase): - def check_fromrecords(self): - r = rec.fromrecords([[456,'dbe',1.2],[2,'de',1.3]],names='col1,col2,col3') - assert_equal(r[0].item(),(456, 'dbe', 1.2)) - - def check_method_array(self): - r = rec.array('abcdefg'*100,formats='i2,a3,i4',shape=3,byteorder='big') - assert_equal(r[1].item(),(25444, 'efg', 1633837924)) - - def check_method_array2(self): - r=rec.array([(1,11,'a'),(2,22,'b'),(3,33,'c'),(4,44,'d'),(5,55,'ex'),(6,66,'f'),(7,77,'g')],formats='u1,f4,a1') - assert_equal(r[1].item(),(2, 22.0, 'b')) - - def check_recarray_slices(self): - r=rec.array([(1,11,'a'),(2,22,'b'),(3,33,'c'),(4,44,'d'),(5,55,'ex'),(6,66,'f'),(7,77,'g')],formats='u1,f4,a1') - assert_equal(r[1::2][1].item(),(4, 44.0, 'd')) - - def check_recarray_fromarrays(self): - x1 = array([1,2,3,4]) - x2 = array(['a','dd','xyz','12']) - x3 = array([1.1,2,3,4]) - r = rec.fromarrays([x1,x2,x3],names='a,b,c') - assert_equal(r[1].item(),(2,'dd',2.0)) - x1[1] = 34 - assert_equal(r.a,array([1,2,3,4])) - - def check_recarray_fromfile(self): - __path__ = _os.path.split(__file__) - filename = _os.path.join(__path__[0], "testdata.fits") - fd = open(filename) - fd.seek(2880*2) - r = rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') - - def check_recarray_from_obj(self): - count = 10 - a = zeros(count, dtype='O') - b = zeros(count, dtype='f8') - c = zeros(count, dtype='f8') - for i in range(len(a)): - a[i] = range(1,10) - - mine = numpy.rec.fromarrays([a,b,c], - names='date,data1,data2') - for i in range(len(a)): - assert(mine.date[i]==range(1,10)) - assert(mine.data1[i]==0.0) - assert(mine.data2[i]==0.0) - - def check_recarray_from_names(self): - ra = rec.array([ - (1, 'abc', 3.7000002861022949, 0), - (2, 'xy', 6.6999998092651367, 1), - (0, ' ', 0.40000000596046448, 0)], - names='c1, c2, c3, c4') - pa = rec.fromrecords([ - (1, 'abc', 3.7000002861022949, 0), - (2, 'xy', 6.6999998092651367, 1), - (0, ' ', 0.40000000596046448, 0)], - names='c1, c2, c3, c4') - assert ra.dtype == pa.dtype - assert ra.shape == pa.shape - for k in xrange(len(ra)): - assert ra[k].item() == pa[k].item() - - def check_recarray_conflict_fields(self): - ra = rec.array([(1,'abc',2.3),(2,'xyz',4.2), - (3,'wrs',1.3)], - names='field, shape, mean') - ra.mean = [1.1,2.2,3.3] - assert_array_almost_equal(ra['mean'], [1.1,2.2,3.3]) - assert type(ra.mean) is type(ra.var) - ra.shape = (1,3) - assert ra.shape == (1,3) - ra.shape = ['A','B','C'] - assert_array_equal(ra['shape'], [['A','B','C']]) - ra.field = 5 - assert_array_equal(ra['field'], [[5,5,5]]) - assert callable(ra.field) - -class TestRecord(NumpyTestCase): - def setUp(self): - self.data = rec.fromrecords([(1,2,3),(4,5,6)], - dtype=[("col1", "= rc) - -class TestRegression(NumpyTestCase): - def check_invalid_round(self,level=rlevel): - """Ticket #3""" - v = 4.7599999999999998 - assert_array_equal(N.array([v]),N.array(v)) - - def check_mem_empty(self,level=rlevel): - """Ticket #7""" - N.empty((1,),dtype=[('x',N.int64)]) - - def check_pickle_transposed(self,level=rlevel): - """Ticket #16""" - a = N.transpose(N.array([[2,9],[7,0],[3,8]])) - f = StringIO() - pickle.dump(a,f) - f.seek(0) - b = pickle.load(f) - f.close() - assert_array_equal(a,b) - - def check_masked_array_create(self,level=rlevel): - """Ticket #17""" - x = N.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0]) - assert_array_equal(N.ma.nonzero(x),[[1,2,6,7]]) - - def check_poly1d(self,level=rlevel): - """Ticket #28""" - assert_equal(N.poly1d([1]) - N.poly1d([1,0]), - N.poly1d([-1,1])) - - def check_typeNA(self,level=rlevel): - """Ticket #31""" - assert_equal(N.typeNA[N.int64],'Int64') - assert_equal(N.typeNA[N.uint64],'UInt64') - - def check_dtype_names(self,level=rlevel): - """Ticket #35""" - dt = N.dtype([(('name','label'),N.int32,3)]) - - def check_reduce(self,level=rlevel): - """Ticket #40""" - assert_almost_equal(N.add.reduce([1.,.5],dtype=None), 1.5) - - def check_zeros_order(self,level=rlevel): - """Ticket #43""" - N.zeros([3], int, 'C') - N.zeros([3], order='C') - N.zeros([3], int, order='C') - - def check_sort_bigendian(self,level=rlevel): - """Ticket #47""" - a = N.linspace(0, 10, 11) - c = a.astype(N.dtype('f8') - b = N.arange(10.,dtype='2) & (a<6)) - xb = N.where((b>2) & (b<6)) - ya = ((a>2) & (a<6)) - yb = ((b>2) & (b<6)) - assert_array_almost_equal(xa,ya.nonzero()) - assert_array_almost_equal(xb,yb.nonzero()) - assert(N.all(a[ya] > 0.5)) - assert(N.all(b[yb] > 0.5)) - - def check_mem_dot(self,level=rlevel): - """Ticket #106""" - x = N.random.randn(0,1) - y = N.random.randn(10,1) - z = N.dot(x, N.transpose(y)) - - def check_arange_endian(self,level=rlevel): - """Ticket #111""" - ref = N.arange(10) - x = N.arange(10,dtype='f8') - assert_array_equal(ref,x) - -# Longfloat support is not consistent enough across -# platforms for this test to be meaningful. -# def check_longfloat_repr(self,level=rlevel): -# """Ticket #112""" -# if N.longfloat(0).itemsize > 8: -# a = N.exp(N.array([1000],dtype=N.longfloat)) -# assert(str(a)[1:9] == str(a[0])[:8]) - - def check_argmax(self,level=rlevel): - """Ticket #119""" - a = N.random.normal(0,1,(4,5,6,7,8)) - for i in xrange(a.ndim): - aargmax = a.argmax(i) - - def check_matrix_properties(self,level=rlevel): - """Ticket #125""" - a = N.matrix([1.0],dtype=float) - assert(type(a.real) is N.matrix) - assert(type(a.imag) is N.matrix) - c,d = N.matrix([0.0]).nonzero() - assert(type(c) is N.matrix) - assert(type(d) is N.matrix) - - def check_mem_divmod(self,level=rlevel): - """Ticket #126""" - for i in range(10): - divmod(N.array([i])[0],10) - - - def check_hstack_invalid_dims(self,level=rlevel): - """Ticket #128""" - x = N.arange(9).reshape((3,3)) - y = N.array([0,0,0]) - self.failUnlessRaises(ValueError,N.hstack,(x,y)) - - def check_squeeze_type(self,level=rlevel): - """Ticket #133""" - a = N.array([3]) - b = N.array(3) - assert(type(a.squeeze()) is N.ndarray) - assert(type(b.squeeze()) is N.ndarray) - - def check_add_identity(self,level=rlevel): - """Ticket #143""" - assert_equal(0,N.add.identity) - - def check_binary_repr_0(self,level=rlevel): - """Ticket #151""" - assert_equal('0',N.binary_repr(0)) - - def check_rec_iterate(self,level=rlevel): - """Ticket #160""" - descr = N.dtype([('i',int),('f',float),('s','|S3')]) - x = N.rec.array([(1,1.1,'1.0'), - (2,2.2,'2.0')],dtype=descr) - x[0].tolist() - [i for i in x[0]] - - def check_unicode_string_comparison(self,level=rlevel): - """Ticket #190""" - a = N.array('hello',N.unicode_) - b = N.array('world') - a == b - - def check_tostring_FORTRANORDER_discontiguous(self,level=rlevel): - """Fix in r2836""" - # Create discontiguous Fortran-ordered array - x = N.array(N.random.rand(3,3),order='F')[:,:2] - assert_array_almost_equal(x.ravel(),N.fromstring(x.tostring())) - - def check_flat_assignment(self,level=rlevel): - """Correct behaviour of ticket #194""" - x = N.empty((3,1)) - x.flat = N.arange(3) - assert_array_almost_equal(x,[[0],[1],[2]]) - x.flat = N.arange(3,dtype=float) - assert_array_almost_equal(x,[[0],[1],[2]]) - - def check_broadcast_flat_assignment(self,level=rlevel): - """Ticket #194""" - x = N.empty((3,1)) - def bfa(): x[:] = N.arange(3) - def bfb(): x[:] = N.arange(3,dtype=float) - self.failUnlessRaises(ValueError, bfa) - self.failUnlessRaises(ValueError, bfb) - - def check_unpickle_dtype_with_object(self,level=rlevel): - """Implemented in r2840""" - dt = N.dtype([('x',int),('y',N.object_),('z','O')]) - f = StringIO() - pickle.dump(dt,f) - f.seek(0) - dt_ = pickle.load(f) - f.close() - assert_equal(dt,dt_) - - def check_mem_array_creation_invalid_specification(self,level=rlevel): - """Ticket #196""" - dt = N.dtype([('x',int),('y',N.object_)]) - # Wrong way - self.failUnlessRaises(ValueError, N.array, [1,'object'], dt) - # Correct way - N.array([(1,'object')],dt) - - def check_recarray_single_element(self,level=rlevel): - """Ticket #202""" - a = N.array([1,2,3],dtype=N.int32) - b = a.copy() - r = N.rec.array(a,shape=1,formats=['3i4'],names=['d']) - assert_array_equal(a,b) - assert_equal(a,r[0][0]) - - def check_zero_sized_array_indexing(self,level=rlevel): - """Ticket #205""" - tmp = N.array([]) - def index_tmp(): tmp[N.array(10)] - self.failUnlessRaises(IndexError, index_tmp) - - def check_unique_zero_sized(self,level=rlevel): - """Ticket #205""" - assert_array_equal([], N.unique(N.array([]))) - - def check_chararray_rstrip(self,level=rlevel): - """Ticket #222""" - x = N.chararray((1,),5) - x[0] = 'a ' - x = x.rstrip() - assert_equal(x[0], 'a') - - def check_object_array_shape(self,level=rlevel): - """Ticket #239""" - assert_equal(N.array([[1,2],3,4],dtype=object).shape, (3,)) - assert_equal(N.array([[1,2],[3,4]],dtype=object).shape, (2,2)) - assert_equal(N.array([(1,2),(3,4)],dtype=object).shape, (2,2)) - assert_equal(N.array([],dtype=object).shape, (0,)) - assert_equal(N.array([[],[],[]],dtype=object).shape, (3,0)) - assert_equal(N.array([[3,4],[5,6],None],dtype=object).shape, (3,)) - - def check_mem_around(self,level=rlevel): - """Ticket #243""" - x = N.zeros((1,)) - y = [0] - decimal = 6 - N.around(abs(x-y),decimal) <= 10.0**(-decimal) - - def check_character_array_strip(self,level=rlevel): - """Ticket #246""" - x = N.char.array(("x","x ","x ")) - for c in x: assert_equal(c,"x") - - def check_lexsort(self,level=rlevel): - """Lexsort memory error""" - v = N.array([1,2,3,4,5,6,7,8,9,10]) - assert_equal(N.lexsort(v),0) - - def check_pickle_dtype(self,level=rlevel): - """Ticket #251""" - import pickle - pickle.dumps(N.float) - - def check_masked_array_multiply(self,level=rlevel): - """Ticket #254""" - a = N.ma.zeros((4,1)) - a[2,0] = N.ma.masked - b = N.zeros((4,2)) - a*b - b*a - - def check_swap_real(self, level=rlevel): - """Ticket #265""" - assert_equal(N.arange(4,dtype='>c8').imag.max(),0.0) - assert_equal(N.arange(4,dtype=' 1 and x['two'] > 2) - - def check_method_args(self, level=rlevel): - # Make sure methods and functions have same default axis - # keyword and arguments - funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'), - ('sometrue', 'any'), - ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'), - 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean', - 'round', 'min', 'max', 'argsort', 'sort'] - funcs2 = ['compress', 'take', 'repeat'] - - for func in funcs1: - arr = N.random.rand(8,7) - arr2 = arr.copy() - if isinstance(func, tuple): - func_meth = func[1] - func = func[0] - else: - func_meth = func - res1 = getattr(arr, func_meth)() - res2 = getattr(N, func)(arr2) - if res1 is None: - assert abs(arr-res2).max() < 1e-8, func - else: - assert abs(res1-res2).max() < 1e-8, func - - for func in funcs2: - arr1 = N.random.rand(8,7) - arr2 = N.random.rand(8,7) - res1 = None - if func == 'compress': - arr1 = arr1.ravel() - res1 = getattr(arr2, func)(arr1) - else: - arr2 = (15*arr2).astype(int).ravel() - if res1 is None: - res1 = getattr(arr1, func)(arr2) - res2 = getattr(N, func)(arr1, arr2) - assert abs(res1-res2).max() < 1e-8, func - - def check_mem_lexsort_strings(self, level=rlevel): - """Ticket #298""" - lst = ['abc','cde','fgh'] - N.lexsort((lst,)) - - def check_fancy_index(self, level=rlevel): - """Ticket #302""" - x = N.array([1,2])[N.array([0])] - assert_equal(x.shape,(1,)) - - def check_recarray_copy(self, level=rlevel): - """Ticket #312""" - dt = [('x',N.int16),('y',N.float64)] - ra = N.array([(1,2.3)], dtype=dt) - rb = N.rec.array(ra, dtype=dt) - rb['x'] = 2. - assert ra['x'] != rb['x'] - - def check_rec_fromarray(self, level=rlevel): - """Ticket #322""" - x1 = N.array([[1,2],[3,4],[5,6]]) - x2 = N.array(['a','dd','xyz']) - x3 = N.array([1.1,2,3]) - N.rec.fromarrays([x1,x2,x3], formats="(2,)i4,a3,f8") - - def check_object_array_assign(self, level=rlevel): - x = N.empty((2,2),object) - x.flat[2] = (1,2,3) - assert_equal(x.flat[2],(1,2,3)) - - def check_ndmin_float64(self, level=rlevel): - """Ticket #324""" - x = N.array([1,2,3],dtype=N.float64) - assert_equal(N.array(x,dtype=N.float32,ndmin=2).ndim,2) - assert_equal(N.array(x,dtype=N.float64,ndmin=2).ndim,2) - - def check_mem_vectorise(self, level=rlevel): - """Ticket #325""" - vt = N.vectorize(lambda *args: args) - vt(N.zeros((1,2,1)), N.zeros((2,1,1)), N.zeros((1,1,2))) - vt(N.zeros((1,2,1)), N.zeros((2,1,1)), N.zeros((1,1,2)), N.zeros((2,2))) - - def check_mem_axis_minimization(self, level=rlevel): - """Ticket #327""" - data = N.arange(5) - data = N.add.outer(data,data) - - def check_mem_float_imag(self, level=rlevel): - """Ticket #330""" - N.float64(1.0).imag - - def check_dtype_tuple(self, level=rlevel): - """Ticket #334""" - assert N.dtype('i4') == N.dtype(('i4',())) - - def check_dtype_posttuple(self, level=rlevel): - """Ticket #335""" - N.dtype([('col1', '()i4')]) - - def check_mgrid_single_element(self, level=rlevel): - """Ticket #339""" - assert_array_equal(N.mgrid[0:0:1j],[0]) - assert_array_equal(N.mgrid[0:0],[]) - - def check_numeric_carray_compare(self, level=rlevel): - """Ticket #341""" - assert_equal(N.array([ 'X' ], 'c'),'X') - - def check_string_array_size(self, level=rlevel): - """Ticket #342""" - self.failUnlessRaises(ValueError, - N.array,[['X'],['X','X','X']],'|S1') - - def check_dtype_repr(self, level=rlevel): - """Ticket #344""" - dt1=N.dtype(('uint32', 2)) - dt2=N.dtype(('uint32', (2,))) - assert_equal(dt1.__repr__(), dt2.__repr__()) - - def check_reshape_order(self, level=rlevel): - """Make sure reshape order works.""" - a = N.arange(6).reshape(2,3,order='F') - assert_equal(a,[[0,2,4],[1,3,5]]) - a = N.array([[1,2],[3,4],[5,6],[7,8]]) - b = a[:,1] - assert_equal(b.reshape(2,2,order='F'), [[2,6],[4,8]]) - - def check_repeat_discont(self, level=rlevel): - """Ticket #352""" - a = N.arange(12).reshape(4,3)[:,2] - assert_equal(a.repeat(3), [2,2,2,5,5,5,8,8,8,11,11,11]) - - def check_array_index(self, level=rlevel): - """Make sure optimization is not called in this case.""" - a = N.array([1,2,3]) - a2 = N.array([[1,2,3]]) - assert_equal(a[N.where(a==3)], a2[N.where(a2==3)]) - - def check_object_argmax(self, level=rlevel): - a = N.array([1,2,3],dtype=object) - assert a.argmax() == 2 - - def check_recarray_fields(self, level=rlevel): - """Ticket #372""" - dt0 = N.dtype([('f0','i4'),('f1','i4')]) - dt1 = N.dtype([('f0','i8'),('f1','i8')]) - for a in [N.array([(1,2),(3,4)],"i4,i4"), - N.rec.array([(1,2),(3,4)],"i4,i4"), - N.rec.array([(1,2),(3,4)]), - N.rec.fromarrays([(1,2),(3,4)],"i4,i4"), - N.rec.fromarrays([(1,2),(3,4)])]: - assert(a.dtype in [dt0,dt1]) - - def check_random_shuffle(self, level=rlevel): - """Ticket #374""" - a = N.arange(5).reshape((5,1)) - b = a.copy() - N.random.shuffle(b) - assert_equal(N.sort(b, axis=0),a) - - def check_refcount_vectorize(self, level=rlevel): - """Ticket #378""" - def p(x,y): return 123 - v = N.vectorize(p) - assert_valid_refcount(v) - - def check_poly1d_nan_roots(self, level=rlevel): - """Ticket #396""" - p = N.poly1d([N.nan,N.nan,1], r=0) - self.failUnlessRaises(N.linalg.LinAlgError,getattr,p,"r") - - def check_refcount_vdot(self, level=rlevel): - """Changeset #3443""" - assert_valid_refcount(N.vdot) - - def check_startswith(self, level=rlevel): - ca = N.char.array(['Hi','There']) - assert_equal(ca.startswith('H'),[True,False]) - - def check_noncommutative_reduce_accumulate(self, level=rlevel): - """Ticket #413""" - tosubtract = N.arange(5) - todivide = N.array([2.0, 0.5, 0.25]) - assert_equal(N.subtract.reduce(tosubtract), -10) - assert_equal(N.divide.reduce(todivide), 16.0) - assert_array_equal(N.subtract.accumulate(tosubtract), - N.array([0, -1, -3, -6, -10])) - assert_array_equal(N.divide.accumulate(todivide), - N.array([2., 4., 16.])) - - def check_mem_polymul(self, level=rlevel): - """Ticket #448""" - N.polymul([],[1.]) - - def check_convolve_empty(self, level=rlevel): - """Convolve should raise an error for empty input array.""" - self.failUnlessRaises(AssertionError,N.convolve,[],[1]) - self.failUnlessRaises(AssertionError,N.convolve,[1],[]) - - def check_multidim_byteswap(self, level=rlevel): - """Ticket #449""" - r=N.array([(1,(0,1,2))], dtype="i2,3i2") - assert_array_equal(r.byteswap(), - N.array([(256,(0,256,512))],r.dtype)) - - def check_string_NULL(self, level=rlevel): - """Changeset 3557""" - assert_equal(N.array("a\x00\x0b\x0c\x00").item(), - 'a\x00\x0b\x0c') - - def check_mem_string_concat(self, level=rlevel): - """Ticket #469""" - x = N.array([]) - N.append(x,'asdasd\tasdasd') - - def check_matrix_multiply_by_1d_vector(self, level=rlevel) : - """Ticket #473""" - def mul() : - N.mat(N.eye(2))*N.ones(2) - - self.failUnlessRaises(ValueError,mul) - - def check_junk_in_string_fields_of_recarray(self, level=rlevel): - """Ticket #483""" - r = N.array([['abc']], dtype=[('var1', '|S20')]) - assert str(r['var1'][0][0]) == 'abc' - - def check_take_output(self, level=rlevel): - """Ensure that 'take' honours output parameter.""" - x = N.arange(12).reshape((3,4)) - a = N.take(x,[0,2],axis=1) - b = N.zeros_like(a) - N.take(x,[0,2],axis=1,out=b) - assert_array_equal(a,b) - - def check_array_str_64bit(self, level=rlevel): - """Ticket #501""" - s = N.array([1, N.nan],dtype=N.float64) - errstate = N.seterr(all='raise') - try: - sstr = N.array_str(s) - finally: - N.seterr(**errstate) - - def check_frompyfunc_endian(self, level=rlevel): - """Ticket #503""" - from math import radians - uradians = N.frompyfunc(radians, 1, 1) - big_endian = N.array([83.4, 83.5], dtype='>f8') - little_endian = N.array([83.4, 83.5], dtype='f4','0)]=1.0 - self.failUnlessRaises(ValueError,ia,x,s) - - def check_mem_scalar_indexing(self, level=rlevel): - """Ticket #603""" - x = N.array([0],dtype=float) - index = N.array(0,dtype=N.int32) - x[index] - - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py deleted file mode 100644 index 84cca37a9..000000000 --- a/numpy/core/tests/test_scalarmath.py +++ /dev/null @@ -1,66 +0,0 @@ -from numpy.testing import * -set_package_path() -import numpy.core.umath as ncu -from numpy import array -import numpy as N -restore_path() - -types = [N.bool_, N.byte, N.ubyte, N.short, N.ushort, N.intc, N.uintc, - N.int_, N.uint, N.longlong, N.ulonglong, - N.single, N.double, N.longdouble, N.csingle, - N.cdouble, N.clongdouble] - -# This compares scalarmath against ufuncs. - -class TestTypes(NumpyTestCase): - def check_types(self, level=1): - for atype in types: - a = atype(1) - assert a == 1, "error with %r: got %r" % (atype,a) - - def check_type_add(self, level=1): - # list of types - for k, atype in enumerate(types): - vala = atype(3) - val1 = array([3],dtype=atype) - for l, btype in enumerate(types): - valb = btype(1) - val2 = array([1],dtype=btype) - val = vala+valb - valo = val1 + val2 - assert val.dtype.num == valo.dtype.num and \ - val.dtype.char == valo.dtype.char, \ - "error with (%d,%d)" % (k,l) - - def check_type_create(self, level=1): - for k, atype in enumerate(types): - a = array([1,2,3],atype) - b = atype([1,2,3]) - assert_equal(a,b) - -class TestPower(NumpyTestCase): - def check_small_types(self): - for t in [N.int8, N.int16]: - a = t(3) - b = a ** 4 - assert b == 81, "error with %r: got %r" % (t,b) - - def check_large_types(self): - for t in [N.int32, N.int64, N.float32, N.float64, N.longdouble]: - a = t(51) - b = a ** 4 - assert b == 6765201, "error with %r: got %r" % (t,b) - -class TestConversion(NumpyTestCase): - def test_int_from_long(self): - l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] - li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] - for T in [None,N.float64,N.int64]: - a = N.array(l,dtype=T) - assert_equal(map(int,a), li) - - a = N.array(l[:3],dtype=N.uint64) - assert_equal(map(int,a), li[:3]) - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py deleted file mode 100644 index d07713b68..000000000 --- a/numpy/core/tests/test_ufunc.py +++ /dev/null @@ -1,16 +0,0 @@ -from numpy.testing import * - -set_package_path() -import numpy as N -restore_path() - -class TestUfunc(NumpyTestCase): - def test_reduceat_shifting_sum(self): - L = 6 - x = N.arange(L) - idx = N.array(zip(N.arange(L-2),N.arange(L-2)+2)).ravel() - assert_array_equal(N.add.reduceat(x,idx)[::2], - [1,3,5,7]) - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py deleted file mode 100644 index ef5f3047e..000000000 --- a/numpy/core/tests/test_umath.py +++ /dev/null @@ -1,208 +0,0 @@ -from numpy.testing import * -set_package_path() -from numpy.core.umath import minimum, maximum, exp -import numpy.core.umath as ncu -from numpy import zeros, ndarray, array, choose, pi -restore_path() - -class TestDivision(NumpyTestCase): - def check_division_int(self): - # int division should return the floor of the result, a la Python - x = array([5, 10, 90, 100, -5, -10, -90, -100, -120]) - assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) - assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) - assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) - -class TestPower(NumpyTestCase): - def check_power_float(self): - x = array([1., 2., 3.]) - assert_equal(x**0, [1., 1., 1.]) - assert_equal(x**1, x) - assert_equal(x**2, [1., 4., 9.]) - y = x.copy() - y **= 2 - assert_equal(y, [1., 4., 9.]) - assert_almost_equal(x**(-1), [1., 0.5, 1./3]) - assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) - - def check_power_complex(self): - x = array([1+2j, 2+3j, 3+4j]) - assert_equal(x**0, [1., 1., 1.]) - assert_equal(x**1, x) - assert_equal(x**2, [-3+4j, -5+12j, -7+24j]) - assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) - assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, - (-117-44j)/15625]) - assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), - ncu.sqrt(3+4j)]) - assert_almost_equal(x**14, [-76443+16124j, 23161315+58317492j, - 5583548873 + 2465133864j]) - -class TestLog1p(NumpyTestCase): - def check_log1p(self): - assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) - assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) - -class TestExpm1(NumpyTestCase): - def check_expm1(self): - assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) - assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) - -class TestMaximum(NumpyTestCase): - def check_reduce_complex(self): - assert_equal(maximum.reduce([1,2j]),1) - assert_equal(maximum.reduce([1+3j,2j]),1+3j) - -class TestMinimum(NumpyTestCase): - def check_reduce_complex(self): - assert_equal(minimum.reduce([1,2j]),2j) - -class TestFloatingPoint(NumpyTestCase): - def check_floating_point(self): - assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) - -def TestDegrees(NumpyTestCase): - def check_degrees(self): - assert_almost_equal(ncu.degrees(pi), 180.0) - assert_almost_equal(ncu.degrees(-0.5*pi), -90.0) - -def TestRadians(NumpyTestCase): - def check_radians(self): - assert_almost_equal(ncu.radians(180.0), pi) - assert_almost_equal(ncu.degrees(-90.0), -0.5*pi) - -class TestSpecialMethods(NumpyTestCase): - def test_wrap(self): - class with_wrap(object): - def __array__(self): - return zeros(1) - def __array_wrap__(self, arr, context): - r = with_wrap() - r.arr = arr - r.context = context - return r - a = with_wrap() - x = minimum(a, a) - assert_equal(x.arr, zeros(1)) - func, args, i = x.context - self.failUnless(func is minimum) - self.failUnlessEqual(len(args), 2) - assert_equal(args[0], a) - assert_equal(args[1], a) - self.failUnlessEqual(i, 0) - - def test_old_wrap(self): - class with_wrap(object): - def __array__(self): - return zeros(1) - def __array_wrap__(self, arr): - r = with_wrap() - r.arr = arr - return r - a = with_wrap() - x = minimum(a, a) - assert_equal(x.arr, zeros(1)) - - def test_priority(self): - class A(object): - def __array__(self): - return zeros(1) - def __array_wrap__(self, arr, context): - r = type(self)() - r.arr = arr - r.context = context - return r - class B(A): - __array_priority__ = 20. - class C(A): - __array_priority__ = 40. - x = zeros(1) - a = A() - b = B() - c = C() - f = minimum - self.failUnless(type(f(x,x)) is ndarray) - self.failUnless(type(f(x,a)) is A) - self.failUnless(type(f(x,b)) is B) - self.failUnless(type(f(x,c)) is C) - self.failUnless(type(f(a,x)) is A) - self.failUnless(type(f(b,x)) is B) - self.failUnless(type(f(c,x)) is C) - - self.failUnless(type(f(a,a)) is A) - self.failUnless(type(f(a,b)) is B) - self.failUnless(type(f(b,a)) is B) - self.failUnless(type(f(b,b)) is B) - self.failUnless(type(f(b,c)) is C) - self.failUnless(type(f(c,b)) is C) - self.failUnless(type(f(c,c)) is C) - - self.failUnless(type(exp(a) is A)) - self.failUnless(type(exp(b) is B)) - self.failUnless(type(exp(c) is C)) - - def test_failing_wrap(self): - class A(object): - def __array__(self): - return zeros(1) - def __array_wrap__(self, arr, context): - raise RuntimeError - a = A() - self.failUnlessRaises(RuntimeError, maximum, a, a) - - def test_array_with_context(self): - class A(object): - def __array__(self, dtype=None, context=None): - func, args, i = context - self.func = func - self.args = args - self.i = i - return zeros(1) - class B(object): - def __array__(self, dtype=None): - return zeros(1, dtype) - class C(object): - def __array__(self): - return zeros(1) - a = A() - maximum(zeros(1), a) - self.failUnless(a.func is maximum) - assert_equal(a.args[0], 0) - self.failUnless(a.args[1] is a) - self.failUnless(a.i == 1) - assert_equal(maximum(a, B()), 0) - assert_equal(maximum(a, C()), 0) - -class TestChoose(NumpyTestCase): - def test_mixed(self): - c = array([True,True]) - a = array([True,True]) - assert_equal(choose(c, (a, 1)), array([1,1])) - - -class _test_complex_real(NumpyTestCase): - def setUp(self): - self.x = 0.52 - self.z = self.x+0j - self.funcs = ['arcsin', 'arccos', 'arctan', 'arcsinh', 'arccosh', - 'arctanh', 'sin', 'cos', 'tan', 'exp', 'log', 'sqrt', - 'log10'] - def test_it(self): - for fun in self.funcs: - cr = fun(self.z) - assert_almost_equal(fun(self.x),cr.real) - assert_almost_equal(0, cr.imag) - -class TestChoose(NumpyTestCase): - def test_attributes(self): - add = ncu.add - assert_equal(add.__name__, 'add') - assert_equal(add.__doc__, 'y = add(x1,x2) adds the arguments elementwise.') - self.failUnless(add.ntypes >= 18) # don't fail if types added - self.failUnless('ii->i' in add.types) - assert_equal(add.nin, 2) - assert_equal(add.nout, 1) - assert_equal(add.identity, 0) - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/core/tests/test_unicode.py b/numpy/core/tests/test_unicode.py deleted file mode 100644 index 7d7c06f30..000000000 --- a/numpy/core/tests/test_unicode.py +++ /dev/null @@ -1,304 +0,0 @@ -import sys -from numpy.testing import * -from numpy.core import * - -# Guess the UCS length for this python interpreter -if len(buffer(u'u')) == 4: - ucs4 = True -else: - ucs4 = False - -# Value that can be represented in UCS2 interpreters -ucs2_value = u'\uFFFF' -# Value that cannot be represented in UCS2 interpreters (but can in UCS4) -ucs4_value = u'\U0010FFFF' - - -############################################################ -# Creation tests -############################################################ - -class create_zeros(NumpyTestCase): - """Check the creation of zero-valued arrays""" - - def content_test(self, ua, ua_scalar, nbytes): - - # Check the length of the unicode base type - self.assert_(int(ua.dtype.str[2:]) == self.ulen) - # Check the length of the data buffer - self.assert_(len(ua.data) == nbytes) - # Small check that data in array element is ok - self.assert_(ua_scalar == u'') - # Encode to ascii and double check - self.assert_(ua_scalar.encode('ascii') == '') - # Check buffer lengths for scalars - if ucs4: - self.assert_(len(buffer(ua_scalar)) == 0) - else: - self.assert_(len(buffer(ua_scalar)) == 0) - - def check_zeros0D(self): - """Check creation of 0-dimensional objects""" - ua = zeros((), dtype='U%s' % self.ulen) - self.content_test(ua, ua[()], 4*self.ulen) - - def check_zerosSD(self): - """Check creation of single-dimensional objects""" - ua = zeros((2,), dtype='U%s' % self.ulen) - self.content_test(ua, ua[0], 4*self.ulen*2) - self.content_test(ua, ua[1], 4*self.ulen*2) - - def check_zerosMD(self): - """Check creation of multi-dimensional objects""" - ua = zeros((2,3,4), dtype='U%s' % self.ulen) - self.content_test(ua, ua[0,0,0], 4*self.ulen*2*3*4) - self.content_test(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4) - - -class test_create_zeros_1(create_zeros): - """Check the creation of zero-valued arrays (size 1)""" - ulen = 1 - -class test_create_zeros_2(create_zeros): - """Check the creation of zero-valued arrays (size 2)""" - ulen = 2 - -class test_create_zeros_1009(create_zeros): - """Check the creation of zero-valued arrays (size 1009)""" - ulen = 1009 - - -class create_values(NumpyTestCase): - """Check the creation of unicode arrays with values""" - - def content_test(self, ua, ua_scalar, nbytes): - - # Check the length of the unicode base type - self.assert_(int(ua.dtype.str[2:]) == self.ulen) - # Check the length of the data buffer - self.assert_(len(ua.data) == nbytes) - # Small check that data in array element is ok - self.assert_(ua_scalar == self.ucs_value*self.ulen) - # Encode to UTF-8 and double check - self.assert_(ua_scalar.encode('utf-8') == \ - (self.ucs_value*self.ulen).encode('utf-8')) - # Check buffer lengths for scalars - if ucs4: - self.assert_(len(buffer(ua_scalar)) == 4*self.ulen) - else: - if self.ucs_value == ucs4_value: - # In UCS2, the \U0010FFFF will be represented using a - # surrogate *pair* - self.assert_(len(buffer(ua_scalar)) == 2*2*self.ulen) - else: - # In UCS2, the \uFFFF will be represented using a - # regular 2-byte word - self.assert_(len(buffer(ua_scalar)) == 2*self.ulen) - - def check_values0D(self): - """Check creation of 0-dimensional objects with values""" - ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) - self.content_test(ua, ua[()], 4*self.ulen) - - def check_valuesSD(self): - """Check creation of single-dimensional objects with values""" - ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) - self.content_test(ua, ua[0], 4*self.ulen*2) - self.content_test(ua, ua[1], 4*self.ulen*2) - - def check_valuesMD(self): - """Check creation of multi-dimensional objects with values""" - ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) - self.content_test(ua, ua[0,0,0], 4*self.ulen*2*3*4) - self.content_test(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4) - - -class test_create_values_1_ucs2(create_values): - """Check the creation of valued arrays (size 1, UCS2 values)""" - ulen = 1 - ucs_value = ucs2_value - -class test_create_values_1_ucs4(create_values): - """Check the creation of valued arrays (size 1, UCS4 values)""" - ulen = 1 - ucs_value = ucs4_value - -class test_create_values_2_ucs2(create_values): - """Check the creation of valued arrays (size 2, UCS2 values)""" - ulen = 2 - ucs_value = ucs2_value - -class test_create_values_2_ucs4(create_values): - """Check the creation of valued arrays (size 2, UCS4 values)""" - ulen = 2 - ucs_value = ucs4_value - -class test_create_values_1009_ucs2(create_values): - """Check the creation of valued arrays (size 1009, UCS2 values)""" - ulen = 1009 - ucs_value = ucs2_value - -class test_create_values_1009_ucs4(create_values): - """Check the creation of valued arrays (size 1009, UCS4 values)""" - ulen = 1009 - ucs_value = ucs4_value - - -############################################################ -# Assignment tests -############################################################ - -class assign_values(NumpyTestCase): - """Check the assignment of unicode arrays with values""" - - def content_test(self, ua, ua_scalar, nbytes): - - # Check the length of the unicode base type - self.assert_(int(ua.dtype.str[2:]) == self.ulen) - # Check the length of the data buffer - self.assert_(len(ua.data) == nbytes) - # Small check that data in array element is ok - self.assert_(ua_scalar == self.ucs_value*self.ulen) - # Encode to UTF-8 and double check - self.assert_(ua_scalar.encode('utf-8') == \ - (self.ucs_value*self.ulen).encode('utf-8')) - # Check buffer lengths for scalars - if ucs4: - self.assert_(len(buffer(ua_scalar)) == 4*self.ulen) - else: - if self.ucs_value == ucs4_value: - # In UCS2, the \U0010FFFF will be represented using a - # surrogate *pair* - self.assert_(len(buffer(ua_scalar)) == 2*2*self.ulen) - else: - # In UCS2, the \uFFFF will be represented using a - # regular 2-byte word - self.assert_(len(buffer(ua_scalar)) == 2*self.ulen) - - def check_values0D(self): - """Check assignment of 0-dimensional objects with values""" - ua = zeros((), dtype='U%s' % self.ulen) - ua[()] = self.ucs_value*self.ulen - self.content_test(ua, ua[()], 4*self.ulen) - - def check_valuesSD(self): - """Check assignment of single-dimensional objects with values""" - ua = zeros((2,), dtype='U%s' % self.ulen) - ua[0] = self.ucs_value*self.ulen - self.content_test(ua, ua[0], 4*self.ulen*2) - ua[1] = self.ucs_value*self.ulen - self.content_test(ua, ua[1], 4*self.ulen*2) - - def check_valuesMD(self): - """Check assignment of multi-dimensional objects with values""" - ua = zeros((2,3,4), dtype='U%s' % self.ulen) - ua[0,0,0] = self.ucs_value*self.ulen - self.content_test(ua, ua[0,0,0], 4*self.ulen*2*3*4) - ua[-1,-1,-1] = self.ucs_value*self.ulen - self.content_test(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4) - - -class test_assign_values_1_ucs2(assign_values): - """Check the assignment of valued arrays (size 1, UCS2 values)""" - ulen = 1 - ucs_value = ucs2_value - -class test_assign_values_1_ucs4(assign_values): - """Check the assignment of valued arrays (size 1, UCS4 values)""" - ulen = 1 - ucs_value = ucs4_value - -class test_assign_values_2_ucs2(assign_values): - """Check the assignment of valued arrays (size 2, UCS2 values)""" - ulen = 2 - ucs_value = ucs2_value - -class test_assign_values_2_ucs4(assign_values): - """Check the assignment of valued arrays (size 2, UCS4 values)""" - ulen = 2 - ucs_value = ucs4_value - -class test_assign_values_1009_ucs2(assign_values): - """Check the assignment of valued arrays (size 1009, UCS2 values)""" - ulen = 1009 - ucs_value = ucs2_value - -class test_assign_values_1009_ucs4(assign_values): - """Check the assignment of valued arrays (size 1009, UCS4 values)""" - ulen = 1009 - ucs_value = ucs4_value - - -############################################################ -# Byteorder tests -############################################################ - -class byteorder_values(NumpyTestCase): - """Check the byteorder of unicode arrays in round-trip conversions""" - - def check_values0D(self): - """Check byteorder of 0-dimensional objects""" - ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) - ua2 = ua.newbyteorder() - # This changes the interpretation of the data region (but not the - # actual data), therefore the returned scalars are not - # the same (they are byte-swapped versions of each other). - self.assert_(ua[()] != ua2[()]) - ua3 = ua2.newbyteorder() - # Arrays must be equal after the round-trip - assert_equal(ua, ua3) - - def check_valuesSD(self): - """Check byteorder of single-dimensional objects""" - ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) - ua2 = ua.newbyteorder() - self.assert_(ua[0] != ua2[0]) - self.assert_(ua[-1] != ua2[-1]) - ua3 = ua2.newbyteorder() - # Arrays must be equal after the round-trip - assert_equal(ua, ua3) - - def check_valuesMD(self): - """Check byteorder of multi-dimensional objects""" - ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, - dtype='U%s' % self.ulen) - ua2 = ua.newbyteorder() - self.assert_(ua[0,0,0] != ua2[0,0,0]) - self.assert_(ua[-1,-1,-1] != ua2[-1,-1,-1]) - ua3 = ua2.newbyteorder() - # Arrays must be equal after the round-trip - assert_equal(ua, ua3) - -class test_byteorder_1_ucs2(byteorder_values): - """Check the byteorder in unicode (size 1, UCS2 values)""" - ulen = 1 - ucs_value = ucs2_value - -class test_byteorder_1_ucs4(byteorder_values): - """Check the byteorder in unicode (size 1, UCS4 values)""" - ulen = 1 - ucs_value = ucs4_value - -class test_byteorder_2_ucs2(byteorder_values): - """Check the byteorder in unicode (size 2, UCS2 values)""" - ulen = 2 - ucs_value = ucs2_value - -class test_byteorder_2_ucs4(byteorder_values): - """Check the byteorder in unicode (size 2, UCS4 values)""" - ulen = 2 - ucs_value = ucs4_value - -class test_byteorder_1009_ucs2(byteorder_values): - """Check the byteorder in unicode (size 1009, UCS2 values)""" - ulen = 1009 - ucs_value = ucs2_value - -class test_byteorder_1009_ucs4(byteorder_values): - """Check the byteorder in unicode (size 1009, UCS4 values)""" - ulen = 1009 - ucs_value = ucs4_value - - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/core/tests/testdata.fits b/numpy/core/tests/testdata.fits deleted file mode 100644 index ca48ee851..000000000 Binary files a/numpy/core/tests/testdata.fits and /dev/null differ diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py deleted file mode 100644 index 82f6a91df..000000000 --- a/numpy/ctypeslib.py +++ /dev/null @@ -1,165 +0,0 @@ -__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library', - 'c_intp'] - -import sys, os -from numpy import integer, ndarray, dtype as _dtype, deprecate -from numpy.core.multiarray import _flagdict, flagsobj - -try: - import ctypes -except ImportError: - ctypes = None - -if ctypes is None: - def _dummy(*args, **kwds): - raise ImportError, "ctypes is not available." - ctypes_load_library = _dummy - load_library = _dummy - from numpy import intp as c_intp -else: - import numpy.core._internal as nic - c_intp = nic._getintp_ctype() - del nic - - # Adapted from Albert Strasheim - def load_library(libname, loader_path): - if ctypes.__version__ < '1.0.1': - import warnings - warnings.warn("All features of ctypes interface may not work " \ - "with ctypes < 1.0.1") - if '.' not in libname: - if sys.platform == 'win32': - libname = '%s.dll' % libname - elif sys.platform == 'darwin': - libname = '%s.dylib' % libname - else: - libname = '%s.so' % libname - loader_path = os.path.abspath(loader_path) - if not os.path.isdir(loader_path): - libdir = os.path.dirname(loader_path) - else: - libdir = loader_path - libpath = os.path.join(libdir, libname) - return ctypes.cdll[libpath] - - ctypes_load_library = deprecate(load_library, 'ctypes_load_library', - 'load_library') - -def _num_fromflags(flaglist): - num = 0 - for val in flaglist: - num += _flagdict[val] - return num - -_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', - 'OWNDATA', 'UPDATEIFCOPY'] -def _flags_fromnum(num): - res = [] - for key in _flagnames: - value = _flagdict[key] - if (num & value): - res.append(key) - return res - - -class _ndptr(object): - def from_param(cls, obj): - if not isinstance(obj, ndarray): - raise TypeError, "argument must be an ndarray" - if cls._dtype_ is not None \ - and obj.dtype != cls._dtype_: - raise TypeError, "array must have data type %s" % cls._dtype_ - if cls._ndim_ is not None \ - and obj.ndim != cls._ndim_: - raise TypeError, "array must have %d dimension(s)" % cls._ndim_ - if cls._shape_ is not None \ - and obj.shape != cls._shape_: - raise TypeError, "array must have shape %s" % str(cls._shape_) - if cls._flags_ is not None \ - and ((obj.flags.num & cls._flags_) != cls._flags_): - raise TypeError, "array must have flags %s" % \ - _flags_fromnum(cls._flags_) - return obj.ctypes - from_param = classmethod(from_param) - - -# Factory for an array-checking class with from_param defined for -# use with ctypes argtypes mechanism -_pointer_type_cache = {} -def ndpointer(dtype=None, ndim=None, shape=None, flags=None): - """Array-checking restype/argtypes. - - An ndpointer instance is used to describe an ndarray in restypes - and argtypes specifications. This approach is more flexible than - using, for example, - - POINTER(c_double) - - since several restrictions can be specified, which are verified - upon calling the ctypes function. These include data type - (dtype), number of dimensions (ndim), shape and flags (e.g. - 'C_CONTIGUOUS' or 'F_CONTIGUOUS'). If a given array does not satisfy the - specified restrictions, a TypeError is raised. - - Example: - - clib.somefunc.argtypes = [ndpointer(dtype=float64, - ndim=1, - flags='C_CONTIGUOUS')] - clib.somefunc(array([1,2,3],dtype=float64)) - - """ - - if dtype is not None: - dtype = _dtype(dtype) - num = None - if flags is not None: - if isinstance(flags, str): - flags = flags.split(',') - elif isinstance(flags, (int, integer)): - num = flags - flags = _flags_fromnum(num) - elif isinstance(flags, flagsobj): - num = flags.num - flags = _flags_fromnum(num) - if num is None: - try: - flags = [x.strip().upper() for x in flags] - except: - raise TypeError, "invalid flags specification" - num = _num_fromflags(flags) - try: - return _pointer_type_cache[(dtype, ndim, shape, num)] - except KeyError: - pass - if dtype is None: - name = 'any' - elif dtype.names: - name = str(id(dtype)) - else: - name = dtype.str - if ndim is not None: - name += "_%dd" % ndim - if shape is not None: - try: - strshape = [str(x) for x in shape] - except TypeError: - strshape = [str(shape)] - shape = (shape,) - shape = tuple(shape) - name += "_"+"x".join(strshape) - if flags is not None: - name += "_"+"_".join(flags) - else: - flags = [] - klass = type("ndpointer_%s"%name, (_ndptr,), - {"_dtype_": dtype, - "_shape_" : shape, - "_ndim_" : ndim, - "_flags_" : num}) - _pointer_type_cache[dtype] = klass - return klass - -def test(level=1, verbosity=1): - from numpy.testing import NumpyTest - return NumpyTest().test(level, verbosity) diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py deleted file mode 100644 index 72f10ba25..000000000 --- a/numpy/distutils/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ - -from __version__ import version as __version__ - -# Must import local ccompiler ASAP in order to get -# customized CCompiler.spawn effective. -import ccompiler -import unixccompiler - -from info import __doc__ - -try: - import __config__ - _INSTALLED = True -except ImportError: - _INSTALLED = False - -if _INSTALLED: - def test(level=1, verbosity=1): - from numpy.testing import NumpyTest - return NumpyTest().test(level, verbosity) diff --git a/numpy/distutils/__version__.py b/numpy/distutils/__version__.py deleted file mode 100644 index 06077f79c..000000000 --- a/numpy/distutils/__version__.py +++ /dev/null @@ -1,4 +0,0 @@ -major = 0 -minor = 4 -micro = 0 -version = '%(major)d.%(minor)d.%(micro)d' % (locals()) diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py deleted file mode 100644 index 0c352b964..000000000 --- a/numpy/distutils/ccompiler.py +++ /dev/null @@ -1,403 +0,0 @@ -import re -import os -import sys -import new - -from distutils.ccompiler import * -from distutils import ccompiler -from distutils.sysconfig import customize_compiler -from distutils.version import LooseVersion - -from numpy.distutils import log -from numpy.distutils.exec_command import exec_command -from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, quote_args, msvc_on_amd64 - -# hack to set compiler optimizing options. Needs to integrated with something. -import distutils.sysconfig -_old_init_posix = distutils.sysconfig._init_posix -def _new_init_posix(): - _old_init_posix() - distutils.sysconfig._config_vars['OPT'] = '-Wall -g -O0' -#distutils.sysconfig._init_posix = _new_init_posix - -def replace_method(klass, method_name, func): - m = new.instancemethod(func, None, klass) - setattr(klass, method_name, m) - -# Using customized CCompiler.spawn. -def CCompiler_spawn(self, cmd, display=None): - if display is None: - display = cmd - if is_sequence(display): - display = ' '.join(list(display)) - log.info(display) - s,o = exec_command(cmd) - if s: - if is_sequence(cmd): - cmd = ' '.join(list(cmd)) - print o - if re.search('Too many open files', o): - msg = '\nTry rerunning setup command until build succeeds.' - else: - msg = '' - raise DistutilsExecError,\ - 'Command "%s" failed with exit status %d%s' % (cmd, s, msg) - -replace_method(CCompiler, 'spawn', CCompiler_spawn) - -def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(os.path.normpath(src_name)) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if base.startswith('..'): - # Resolve starting relative path components, middle ones - # (if any) have been handled by os.path.normpath above. - i = base.rfind('..')+2 - d = base[:i] - d = os.path.basename(os.path.abspath(d)) - base = d + base[i:] - if ext not in self.src_extensions: - raise UnknownFileError, \ - "unknown file type '%s' (from '%s')" % (ext, src_name) - if strip_dir: - base = os.path.basename(base) - obj_name = os.path.join(output_dir,base + self.obj_extension) - obj_names.append(obj_name) - return obj_names - -replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) - -def CCompiler_compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - # This method is effective only with Python >=2.3 distutils. - # Any changes here should be applied also to fcompiler.compile - # method to support pre Python 2.3 distutils. - if not sources: - return [] - from fcompiler import FCompiler - if isinstance(self, FCompiler): - display = [] - for fc in ['f77','f90','fix']: - fcomp = getattr(self,'compiler_'+fc) - if fcomp is None: - continue - display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) - display = '\n'.join(display) - else: - ccomp = self.compiler_so - display = "C compiler: %s\n" % (' '.join(ccomp),) - log.info(display) - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - display = "compile options: '%s'" % (' '.join(cc_args)) - if extra_postargs: - display += "\nextra options: '%s'" % (' '.join(extra_postargs)) - log.info(display) - - # build any sources in same order as they were originally specified - # especially important for fortran .f90 files using modules - if isinstance(self, FCompiler): - objects_to_build = build.keys() - for obj in objects: - if obj in objects_to_build: - src, ext = build[obj] - if self.compiler_type=='absoft': - obj = cyg2win32(obj) - src = cyg2win32(src) - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - else: - for obj, (src, ext) in build.items(): - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - - # Return *all* object filenames, not just the ones we just built. - return objects - -replace_method(CCompiler, 'compile', CCompiler_compile) - -def CCompiler_customize_cmd(self, cmd, ignore=()): - """ Customize compiler using distutils command. - """ - log.info('customize %s using %s' % (self.__class__.__name__, - cmd.__class__.__name__)) - def allow(attr): - return getattr(cmd, attr, None) is not None and attr not in ignore - - if allow('include_dirs'): - self.set_include_dirs(cmd.include_dirs) - if allow('define'): - for (name,value) in cmd.define: - self.define_macro(name, value) - if allow('undef'): - for macro in cmd.undef: - self.undefine_macro(macro) - if allow('libraries'): - self.set_libraries(self.libraries + cmd.libraries) - if allow('library_dirs'): - self.set_library_dirs(self.library_dirs + cmd.library_dirs) - if allow('rpath'): - self.set_runtime_library_dirs(cmd.rpath) - if allow('link_objects'): - self.set_link_objects(cmd.link_objects) - -replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) - -def _compiler_to_string(compiler): - props = [] - mx = 0 - keys = compiler.executables.keys() - for key in ['version','libraries','library_dirs', - 'object_switch','compile_switch', - 'include_dirs','define','undef','rpath','link_objects']: - if key not in keys: - keys.append(key) - for key in keys: - if hasattr(compiler,key): - v = getattr(compiler, key) - mx = max(mx,len(key)) - props.append((key,repr(v))) - lines = [] - format = '%-' + repr(mx+1) + 's = %s' - for prop in props: - lines.append(format % prop) - return '\n'.join(lines) - -def CCompiler_show_customization(self): - if 0: - for attrname in ['include_dirs','define','undef', - 'libraries','library_dirs', - 'rpath','link_objects']: - attr = getattr(self,attrname,None) - if not attr: - continue - log.info("compiler '%s' is set to %s" % (attrname,attr)) - try: - self.get_version() - except: - pass - if log._global_log.threshold<2: - print '*'*80 - print self.__class__ - print _compiler_to_string(self) - print '*'*80 - -replace_method(CCompiler, 'show_customization', CCompiler_show_customization) - -def CCompiler_customize(self, dist, need_cxx=0): - # See FCompiler.customize for suggested usage. - log.info('customize %s' % (self.__class__.__name__)) - customize_compiler(self) - if need_cxx: - # In general, distutils uses -Wstrict-prototypes, but this option is - # not valid for C++ code, only for C. Remove it if it's there to - # avoid a spurious warning on every compilation. All the default - # options used by distutils can be extracted with: - - # from distutils import sysconfig - # sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', - # 'CCSHARED', 'LDSHARED', 'SO') - try: - self.compiler_so.remove('-Wstrict-prototypes') - except (AttributeError, ValueError): - pass - - if hasattr(self,'compiler') and 'cc' in self.compiler[0]: - if not self.compiler_cxx: - if self.compiler[0].startswith('gcc'): - a, b = 'gcc', 'g++' - else: - a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a,b)]\ - + self.compiler[1:] - else: - if hasattr(self,'compiler'): - log.warn("#### %s #######" % (self.compiler,)) - log.warn('Missing compiler_cxx fix for '+self.__class__.__name__) - return - -replace_method(CCompiler, 'customize', CCompiler_customize) - -def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): - """ - Simple matching of version numbers, for use in CCompiler and FCompiler - classes. - - :Parameters: - pat : regex matching version numbers. - ignore : false or regex matching expressions to skip over. - start : false or regex matching the start of where to start looking - for version numbers. - - :Returns: - A function that is appropiate to use as the .version_match - attribute of a CCompiler class. - """ - def matcher(self, version_string): - pos = 0 - if start: - m = re.match(start, version_string) - if not m: - return None - pos = m.end() - while 1: - m = re.search(pat, version_string[pos:]) - if not m: - return None - if ignore and re.match(ignore, m.group(0)): - pos = m.end() - continue - break - return m.group(0) - return matcher - -def CCompiler_get_version(self, force=False, ok_status=[0]): - """Compiler version. Returns None if compiler is not available.""" - if not force and hasattr(self,'version'): - return self.version - self.find_executables() - try: - version_cmd = self.version_cmd - except AttributeError: - return None - if not version_cmd or not version_cmd[0]: - return None - try: - matcher = self.version_match - except AttributeError: - try: - pat = self.version_pattern - except AttributeError: - return None - def matcher(version_string): - m = re.match(pat, version_string) - if not m: - return None - version = m.group('version') - return version - - status, output = exec_command(version_cmd,use_tee=0) - - version = None - if status in ok_status: - version = matcher(output) - if version: - version = LooseVersion(version) - self.version = version - return version - -replace_method(CCompiler, 'get_version', CCompiler_get_version) - -def CCompiler_cxx_compiler(self): - if self.compiler_type=='msvc': return self - cxx = copy(self) - cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:] - if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]: - # AIX needs the ld_so_aix script included with Python - cxx.linker_so = [cxx.linker_so[0]] + cxx.compiler_cxx[0] \ - + cxx.linker_so[2:] - else: - cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] - return cxx - -replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) - -compiler_class['intel'] = ('intelccompiler','IntelCCompiler', - "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler','IntelItaniumCCompiler', - "Intel C Itanium Compiler for Itanium-based applications") -ccompiler._default_compilers += (('linux.*','intel'),('linux.*','intele')) - -if sys.platform == 'win32': - compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"\ - "(for MSC built Python)") - if mingw32(): - # On windows platforms, we want to default to mingw32 (gcc) - # because msvc can't build blitz stuff. - log.info('Setting mingw32 as default compiler for nt.') - ccompiler._default_compilers = (('nt', 'mingw32'),) \ - + ccompiler._default_compilers - - -_distutils_new_compiler = new_compiler -def new_compiler (plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0): - # Try first C compilers from numpy.distutils. - if plat is None: - plat = os.name - try: - if compiler is None: - compiler = get_default_compiler(plat) - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError, msg - module_name = "numpy.distutils." + module_name - try: - __import__ (module_name) - except ImportError, msg: - log.info('%s in numpy.distutils; trying from distutils', - str(msg)) - module_name = module_name[6:] - try: - __import__(module_name) - except ImportError, msg: - raise DistutilsModuleError, \ - "can't compile C/C++ code: unable to load module '%s'" % \ - module_name - try: - module = sys.modules[module_name] - klass = vars(module)[class_name] - except KeyError: - raise DistutilsModuleError, \ - ("can't compile C/C++ code: unable to find class '%s' " + - "in module '%s'") % (class_name, module_name) - compiler = klass(None, dry_run, force) - log.debug('new_compiler returns %s' % (klass)) - return compiler - -ccompiler.new_compiler = new_compiler - -_distutils_gen_lib_options = gen_lib_options -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): - library_dirs = quote_args(library_dirs) - runtime_library_dirs = quote_args(runtime_library_dirs) - r = _distutils_gen_lib_options(compiler, library_dirs, - runtime_library_dirs, libraries) - lib_opts = [] - for i in r: - if is_sequence(i): - lib_opts.extend(list(i)) - else: - lib_opts.append(i) - return lib_opts -ccompiler.gen_lib_options = gen_lib_options - -# Also fix up the various compiler modules, which do -# from distutils.ccompiler import gen_lib_options -# Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.'+_cc+'compiler') - if _m is not None: - setattr(getattr(_m, _cc+'compiler'), 'gen_lib_options', - gen_lib_options) - -_distutils_gen_preprocess_options = gen_preprocess_options -def gen_preprocess_options (macros, include_dirs): - include_dirs = quote_args(include_dirs) - return _distutils_gen_preprocess_options(macros, include_dirs) -ccompiler.gen_preprocess_options = gen_preprocess_options - -# define DISTUTILS_USE_SDK when necessary to workaround distutils/msvccompiler.py bug -msvc_on_amd64() diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py deleted file mode 100644 index dfe81d542..000000000 --- a/numpy/distutils/command/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands.""" - -__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" - -distutils_all = [ 'build_py', - 'clean', - 'install_lib', - 'install_scripts', - 'bdist', - 'bdist_dumb', - 'bdist_wininst', - ] - -__import__('distutils.command',globals(),locals(),distutils_all) - -__all__ = ['build', - 'config_compiler', - 'config', - 'build_src', - 'build_ext', - 'build_clib', - 'build_scripts', - 'install', - 'install_data', - 'install_headers', - 'bdist_rpm', - 'sdist', - ] + distutils_all diff --git a/numpy/distutils/command/bdist_rpm.py b/numpy/distutils/command/bdist_rpm.py deleted file mode 100644 index 60e9b5752..000000000 --- a/numpy/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys -if 'setuptools' in sys.modules: - from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm -else: - from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm - -class bdist_rpm(old_bdist_rpm): - - def _make_spec_file(self): - spec_file = old_bdist_rpm._make_spec_file(self) - - # Replace hardcoded setup.py script name - # with the real setup script name. - setup_py = os.path.basename(sys.argv[0]) - if setup_py == 'setup.py': - return spec_file - new_spec_file = [] - for line in spec_file: - line = line.replace('setup.py',setup_py) - new_spec_file.append(line) - return new_spec_file diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py deleted file mode 100644 index 1f5c08205..000000000 --- a/numpy/distutils/command/build.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import sys -from distutils.command.build import build as old_build -from distutils.util import get_platform -from numpy.distutils.command.config_compiler import show_fortran_compilers - -class build(old_build): - - sub_commands = [('config_cc', lambda *args: True), - ('config_fc', lambda *args: True), - ('build_src', old_build.has_ext_modules), - ] + old_build.sub_commands - - user_options = old_build.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ] - - help_options = old_build.help_options + [ - ('help-fcompiler',None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build.initialize_options(self) - self.fcompiler = None - - def finalize_options(self): - build_scripts = self.build_scripts - old_build.finalize_options(self) - plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) - if build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts' + plat_specifier) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py deleted file mode 100644 index 0d49d7ee8..000000000 --- a/numpy/distutils/command/build_clib.py +++ /dev/null @@ -1,259 +0,0 @@ -""" Modified version of build_clib that handles fortran source files. -""" - -import os -from glob import glob -from distutils.command.build_clib import build_clib as old_build_clib -from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError - -from numpy.distutils import log -from distutils.dep_util import newer_group -from numpy.distutils.misc_util import filter_sources, has_f_sources,\ - has_cxx_sources, all_strings, get_lib_source_files, is_sequence - -# Fix Python distutils bug sf #1718574: -_l = old_build_clib.user_options -for _i in range(len(_l)): - if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0]+'=',)+_l[_i][1:] -# - -class build_clib(old_build_clib): - - description = "build C/C++/F libraries used by Python extensions" - - user_options = old_build_clib.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ] - - def initialize_options(self): - old_build_clib.initialize_options(self) - self.fcompiler = None - return - - def have_f_sources(self): - for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources',[])): - return True - return False - - def have_cxx_sources(self): - for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources',[])): - return True - return False - - def run(self): - if not self.libraries: - return - - # Make sure that library sources are complete. - languages = [] - for (lib_name, build_info) in self.libraries: - if not all_strings(build_info.get('sources',[])): - self.run_command('build_src') - l = build_info.get('language',None) - if l and l not in languages: languages.append(l) - - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution, - need_cxx=self.have_cxx_sources()) - - libraries = self.libraries - self.libraries = None - self.compiler.customize_cmd(self) - self.libraries = libraries - - self.compiler.show_customization() - - if self.have_f_sources(): - from numpy.distutils.fcompiler import new_fcompiler - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) - if self.compiler is not None: - self.fcompiler.customize(self.distribution) - - libraries = self.libraries - self.libraries = None - self.fcompiler.customize_cmd(self) - self.libraries = libraries - - self.fcompiler.show_customization() - - self.build_libraries(self.libraries) - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib in self.libraries: - filenames.extend(get_lib_source_files(lib)) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - self.build_a_library(build_info, lib_name, libraries) - - def build_a_library(self, build_info, lib_name, libraries): - # default compilers - compiler = self.compiler - fcompiler = self.fcompiler - - sources = build_info.get('sources') - if sources is None or not is_sequence(sources): - raise DistutilsSetupError, \ - ("in 'libraries' option (library '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % lib_name - sources = list(sources) - - c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) - requiref90 = not not fmodule_sources or \ - build_info.get('language','c')=='f90' - - # save source type information so that build_ext can use it. - source_languages = [] - if c_sources: source_languages.append('c') - if cxx_sources: source_languages.append('c++') - if requiref90: source_languages.append('f90') - elif f_sources: source_languages.append('f77') - build_info['source_languages'] = source_languages - - lib_file = compiler.library_filename(lib_name, - output_dir=self.build_clib) - depends = sources + build_info.get('depends',[]) - if not (self.force or newer_group(depends, lib_file, 'newer')): - log.debug("skipping '%s' library (up-to-date)", lib_name) - return - else: - log.info("building '%s' library", lib_name) - - config_fc = build_info.get('config_fc',{}) - if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script '\ - 'for fortran compiler: %s' \ - % (config_fc,)) - from numpy.distutils.fcompiler import new_fcompiler - fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=requiref90, - c_compiler=self.compiler) - if fcompiler is not None: - dist = self.distribution - base_config_fc = dist.get_option_dict('config_fc').copy() - base_config_fc.update(config_fc) - fcompiler.customize(base_config_fc) - - # check availability of Fortran compilers - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError, "library %s has Fortran sources"\ - " but no Fortran compiler found" % (lib_name) - - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - extra_postargs = build_info.get('extra_compiler_args') or [] - - # where compiled F90 module files are: - module_dirs = build_info.get('module_dirs') or [] - module_build_dir = os.path.dirname(lib_file) - if requiref90: self.mkpath(module_build_dir) - - if compiler.compiler_type=='msvc': - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - objects = [] - if c_sources: - log.info("compiling C sources") - objects = compiler.compile(c_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if cxx_sources: - log.info("compiling C++ sources") - cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile(cxx_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - objects.extend(cxx_objects) - - if f_sources or fmodule_sources: - extra_postargs = [] - f_objects = [] - - if requiref90: - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options(\ - module_dirs,module_build_dir) - - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if requiref90 and self.fcompiler.module_dir_switch is None: - # move new compiled F90 module files to module_build_dir - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f)==os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' \ - % (f, module_build_dir)) - - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - else: - f_objects = [] - - objects.extend(f_objects) - - # assume that default linker is suitable for - # linking Fortran object files - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) - - # fix library dependencies - clib_libraries = build_info.get('libraries',[]) - for lname, binfo in libraries: - if lname in clib_libraries: - clib_libraries.extend(binfo[1].get('libraries',[])) - if clib_libraries: - build_info['libraries'] = clib_libraries diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py deleted file mode 100644 index 68d0aff7f..000000000 --- a/numpy/distutils/command/build_ext.py +++ /dev/null @@ -1,474 +0,0 @@ -""" Modified version of build_ext that handles fortran source files. -""" - -import os -import sys -from glob import glob - -from distutils.dep_util import newer_group -from distutils.command.build_ext import build_ext as old_build_ext -from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError -from distutils.file_util import copy_file - -from numpy.distutils import log -from numpy.distutils.exec_command import exec_command -from numpy.distutils.system_info import combine_paths -from numpy.distutils.misc_util import filter_sources, has_f_sources, \ - has_cxx_sources, get_ext_source_files, \ - get_numpy_include_dirs, is_sequence -from numpy.distutils.command.config_compiler import show_fortran_compilers - -try: - set -except NameError: - from sets import Set as set - -class build_ext (old_build_ext): - - description = "build C/C++/F extensions (compile/link to build directory)" - - user_options = old_build_ext.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ] - - help_options = old_build_ext.help_options + [ - ('help-fcompiler',None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.fcompiler = None - - def finalize_options(self): - incl_dirs = self.include_dirs - old_build_ext.finalize_options(self) - if incl_dirs is not None: - self.include_dirs.extend(self.distribution.include_dirs or []) - - def run(self): - if not self.extensions: - return - - # Make sure that extension sources are complete. - self.run_command('build_src') - - if self.distribution.has_c_libraries(): - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') - self.library_dirs.append(build_clib.build_clib) - else: - build_clib = None - - # Not including C libraries to the list of - # extension libraries automatically to prevent - # bogus linking commands. Extensions must - # explicitly specify the C libraries that they use. - - from distutils.ccompiler import new_compiler - from numpy.distutils.fcompiler import new_fcompiler - - compiler_type = self.compiler - # Initialize C compiler: - self.compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution) - self.compiler.customize_cmd(self) - self.compiler.show_customization() - - # Create mapping of libraries built by build_clib: - clibs = {} - if build_clib is not None: - for libname,build_info in build_clib.libraries or []: - if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,'\ - ' overwriting build_info\n%s... \nwith\n%s...' \ - % (libname, `clibs[libname]`[:300], `build_info`[:300])) - clibs[libname] = build_info - # .. and distribution libraries: - for libname,build_info in self.distribution.libraries or []: - if libname in clibs: - # build_clib libraries have a precedence before distribution ones - continue - clibs[libname] = build_info - - # Determine if C++/Fortran 77/Fortran 90 compilers are needed. - # Update extension libraries, library_dirs, and macros. - all_languages = set() - for ext in self.extensions: - ext_languages = set() - c_libs = [] - c_lib_dirs = [] - macros = [] - for libname in ext.libraries: - if libname in clibs: - binfo = clibs[libname] - c_libs += binfo.get('libraries',[]) - c_lib_dirs += binfo.get('library_dirs',[]) - for m in binfo.get('macros',[]): - if m not in macros: - macros.append(m) - - for l in clibs.get(libname,{}).get('source_languages',[]): - ext_languages.add(l) - if c_libs: - new_c_libs = ext.libraries + c_libs - log.info('updating extension %r libraries from %r to %r' - % (ext.name, ext.libraries, new_c_libs)) - ext.libraries = new_c_libs - ext.library_dirs = ext.library_dirs + c_lib_dirs - if macros: - log.info('extending extension %r defined_macros with %r' - % (ext.name, macros)) - ext.define_macros = ext.define_macros + macros - - # determine extension languages - if has_f_sources(ext.sources): - ext_languages.add('f77') - if has_cxx_sources(ext.sources): - ext_languages.add('c++') - l = ext.language or self.compiler.detect_language(ext.sources) - if l: - ext_languages.add(l) - # reset language attribute for choosing proper linker - if 'c++' in ext_languages: - ext_language = 'c++' - elif 'f90' in ext_languages: - ext_language = 'f90' - elif 'f77' in ext_languages: - ext_language = 'f77' - else: - ext_language = 'c' # default - if l and l != ext_language and ext.language: - log.warn('resetting extension %r language from %r to %r.' % - (ext.name,l,ext_language)) - ext.language = ext_language - # global language - all_languages.update(ext_languages) - - need_f90_compiler = 'f90' in all_languages - need_f77_compiler = 'f77' in all_languages - need_cxx_compiler = 'c++' in all_languages - - # Initialize C++ compiler: - if need_cxx_compiler: - self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - compiler = self._cxx_compiler - compiler.customize(self.distribution,need_cxx=need_cxx_compiler) - compiler.customize_cmd(self) - compiler.show_customization() - self._cxx_compiler = compiler.cxx_compiler() - else: - self._cxx_compiler = None - - # Initialize Fortran 77 compiler: - if need_f77_compiler: - ctype = self.fcompiler - self._f77_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=False, - c_compiler=self.compiler) - fcompiler = self._f77_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f77_compiler=%s is not available.' % - (ctype)) - self._f77_compiler = None - else: - self._f77_compiler = None - - # Initialize Fortran 90 compiler: - if need_f90_compiler: - ctype = self.fcompiler - self._f90_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=True, - c_compiler = self.compiler) - fcompiler = self._f90_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f90_compiler=%s is not available.' % - (ctype)) - self._f90_compiler = None - else: - self._f90_compiler = None - - # Build extensions - self.build_extensions() - - def swig_sources(self, sources): - # Do nothing. Swig sources have beed handled in build_src command. - return sources - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not is_sequence(sources): - raise DistutilsSetupError( - ("in 'ext_modules' option (extension '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % ext.name) - sources = list(sources) - - if not sources: - return - - fullname = self.get_ext_fullname(ext.name) - if self.inplace: - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - base = modpath[-1] - build_py = self.get_finalized_command('build_py') - package_dir = build_py.get_package_dir(package) - ext_filename = os.path.join(package_dir, - self.get_ext_filename(base)) - else: - ext_filename = os.path.join(self.build_lib, - self.get_ext_filename(fullname)) - depends = sources + ext.depends - - if not (self.force or newer_group(depends, ext_filename, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - extra_args = ext.extra_compile_args or [] - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) - - - - if self.compiler.compiler_type=='msvc': - if cxx_sources: - # Needed to compile kiva.agg._agg extension. - extra_args.append('/Zm1000') - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - # Set Fortran/C++ compilers for compilation and linking. - if ext.language=='f90': - fcompiler = self._f90_compiler - elif ext.language=='f77': - fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance - fcompiler = self._f90_compiler or self._f77_compiler - cxx_compiler = self._cxx_compiler - - # check for the availability of required compilers - if cxx_sources and cxx_compiler is None: - raise DistutilsError, "extension %r has C++ sources" \ - "but no C++ compiler found" % (ext.name) - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError, "extension %r has Fortran sources " \ - "but no Fortran compiler found" % (ext.name) - if ext.language in ['f77','f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " \ - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language=='c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " \ - "but no C++ linker found, using default linker" % (ext.name)) - - kws = {'depends':ext.depends} - output_dir = self.build_temp - - include_dirs = ext.include_dirs + get_numpy_include_dirs() - - c_objects = [] - if c_sources: - log.info("compiling C sources") - c_objects = self.compiler.compile(c_sources, - output_dir=output_dir, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) - - if cxx_sources: - log.info("compiling C++ sources") - c_objects += cxx_compiler.compile(cxx_sources, - output_dir=output_dir, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) - - extra_postargs = [] - f_objects = [] - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - module_dirs = ext.module_dirs[:] - module_build_dir = os.path.join( - self.build_temp,os.path.dirname( - self.get_ext_filename(fullname))) - - self.mkpath(module_build_dir) - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs,module_build_dir) - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if fcompiler.module_dir_switch is None: - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f)==os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' % - (f, module_build_dir)) - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - objects = c_objects + f_objects - - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - libraries = self.get_libraries(ext)[:] - library_dirs = ext.library_dirs[:] - - linker = self.compiler.link_shared_object - # Always use system linker when using MSVC compiler. - if self.compiler.compiler_type=='msvc': - # expand libraries with fcompiler libraries as we are - # not using fcompiler linker - self._libs_with_msvc_and_fortran(fcompiler, libraries, library_dirs) - elif ext.language in ['f77','f90'] and fcompiler is not None: - linker = fcompiler.link_shared_object - if ext.language=='c++' and cxx_compiler is not None: - linker = cxx_compiler.link_shared_object - - if sys.version[:3]>='2.3': - kws = {'target_lang':ext.language} - else: - kws = {} - - linker(objects, ext_filename, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp,**kws) - - def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, - c_library_dirs): - if fcompiler is None: return - - for libname in c_libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in c_library_dirs or []: - libfile = os.path.join(libdir,'%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in c_library_dirs: - libfile = os.path.join(libdir,'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(self.build_temp, libname + '.lib') - copy_file(libfile, libfile2) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' - % (libname, c_library_dirs)) - - # Always use system linker when using MSVC compiler. - f_lib_dirs = [] - for dir in fcompiler.library_dirs: - # correct path when compiling in Cygwin but with normal Win - # Python - if dir.startswith('/usr/lib'): - s,o = exec_command(['cygpath', '-w', dir], use_tee=False) - if not s: - dir = o - f_lib_dirs.append(dir) - c_library_dirs.extend(f_lib_dirs) - - # make g77-compiled static libs available to MSVC - for lib in fcompiler.libraries: - if not lib.startswith('msvc'): - c_libraries.append(lib) - p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') - if p: - dst_name = os.path.join(self.build_temp, lib + '.lib') - if not os.path.isfile(dst_name): - copy_file(p[0], dst_name) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - - def get_source_files (self): - self.check_extensions_list(self.extensions) - filenames = [] - for ext in self.extensions: - filenames.extend(get_ext_source_files(ext)) - return filenames - - def get_outputs (self): - self.check_extensions_list(self.extensions) - - outputs = [] - for ext in self.extensions: - if not ext.sources: - continue - fullname = self.get_ext_fullname(ext.name) - outputs.append(os.path.join(self.build_lib, - self.get_ext_filename(fullname))) - return outputs diff --git a/numpy/distutils/command/build_py.py b/numpy/distutils/command/build_py.py deleted file mode 100644 index 0da23a513..000000000 --- a/numpy/distutils/command/build_py.py +++ /dev/null @@ -1,25 +0,0 @@ - -from distutils.command.build_py import build_py as old_build_py -from numpy.distutils.misc_util import is_string - -class build_py(old_build_py): - - def find_package_modules(self, package, package_dir): - modules = old_build_py.find_package_modules(self, package, package_dir) - - # Find build_src generated *.py files. - build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package,[]) - - return modules - - def find_modules(self): - old_py_modules = self.py_modules[:] - new_py_modules = filter(is_string, self.py_modules) - self.py_modules[:] = new_py_modules - modules = old_build_py.find_modules(self) - self.py_modules[:] = old_py_modules - return modules - - # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple - # and item[2] is source file. diff --git a/numpy/distutils/command/build_scripts.py b/numpy/distutils/command/build_scripts.py deleted file mode 100644 index 99134f202..000000000 --- a/numpy/distutils/command/build_scripts.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Modified version of build_scripts that handles building scripts from functions. -""" - -from distutils.command.build_scripts import build_scripts as old_build_scripts -from numpy.distutils import log -from numpy.distutils.misc_util import is_string - -class build_scripts(old_build_scripts): - - def generate_scripts(self, scripts): - new_scripts = [] - func_scripts = [] - for script in scripts: - if is_string(script): - new_scripts.append(script) - else: - func_scripts.append(script) - if not func_scripts: - return new_scripts - - build_dir = self.build_dir - self.mkpath(build_dir) - for func in func_scripts: - script = func(build_dir) - if not script: - continue - if is_string(script): - log.info(" adding '%s' to scripts" % (script,)) - new_scripts.append(script) - else: - [log.info(" adding '%s' to scripts" % (s,)) for s in script] - new_scripts.extend(list(script)) - return new_scripts - - def run (self): - if not self.scripts: - return - - self.scripts = self.generate_scripts(self.scripts) - # Now make sure that the distribution object has this list of scripts. - # setuptools' develop command requires that this be a list of filenames, - # not functions. - self.distribution.scripts = self.scripts - - return old_build_scripts.run(self) - - def get_source_files(self): - from numpy.distutils.misc_util import get_script_files - return get_script_files(self.scripts) diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py deleted file mode 100644 index 57b10ba54..000000000 --- a/numpy/distutils/command/build_src.py +++ /dev/null @@ -1,716 +0,0 @@ -""" Build swig, f2py, pyrex sources. -""" - -import os -import re -import sys - -from distutils.command import build_ext -from distutils.dep_util import newer_group, newer -from distutils.util import get_platform -from distutils.errors import DistutilsError, DistutilsSetupError - -try: - import Pyrex.Compiler.Main - have_pyrex = True -except ImportError: - have_pyrex = False - -# this import can't be done here, as it uses numpy stuff only available -# after it's installed -#import numpy.f2py -from numpy.distutils import log -from numpy.distutils.misc_util import fortran_ext_match, \ - appendpath, is_string, is_sequence -from numpy.distutils.from_template import process_file as process_f_file -from numpy.distutils.conv_template import process_file as process_c_file -from numpy.distutils.exec_command import splitcmdline - -class build_src(build_ext.build_ext): - - description = "build sources from SWIG, F2PY files or a function" - - user_options = [ - ('build-src=', 'd', "directory to \"build\" sources to"), - ('f2py-opts=', None, "list of f2py command line options"), - ('swig=', None, "path to the SWIG executable"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), - ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete - ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " + - "directory alongside your pure Python modules"), - ] - - boolean_options = ['force','inplace'] - - help_options = [] - - def initialize_options(self): - self.extensions = None - self.package = None - self.py_modules = None - self.py_modules_dict = None - self.build_src = None - self.build_lib = None - self.build_base = None - self.force = None - self.inplace = None - self.package_dir = None - self.f2pyflags = None # obsolete - self.f2py_opts = None - self.swigflags = None # obsolete - self.swig_opts = None - self.swig_cpp = None - self.swig = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('force', 'force')) - if self.package is None: - self.package = self.distribution.ext_package - self.extensions = self.distribution.ext_modules - self.libraries = self.distribution.libraries or [] - self.py_modules = self.distribution.py_modules or [] - self.data_files = self.distribution.data_files or [] - - if self.build_src is None: - plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) - self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) - - # py_modules_dict is used in build_py.find_package_modules - self.py_modules_dict = {} - - if self.f2pyflags: - if self.f2py_opts: - log.warn('ignoring --f2pyflags as --f2py-opts already used') - else: - self.f2py_opts = self.f2pyflags - self.f2pyflags = None - if self.f2py_opts is None: - self.f2py_opts = [] - else: - self.f2py_opts = splitcmdline(self.f2py_opts) - - if self.swigflags: - if self.swig_opts: - log.warn('ignoring --swigflags as --swig-opts already used') - else: - self.swig_opts = self.swigflags - self.swigflags = None - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = splitcmdline(self.swig_opts) - - # use options from build_ext command - build_ext = self.get_finalized_command('build_ext') - if self.inplace is None: - self.inplace = build_ext.inplace - if self.swig_cpp is None: - self.swig_cpp = build_ext.swig_cpp - for c in ['swig','swig_opt']: - o = '--'+c.replace('_','-') - v = getattr(build_ext,c,None) - if v: - if getattr(self,c): - log.warn('both build_src and build_ext define %s option' % (o)) - else: - log.info('using "%s=%s" option from build_ext command' % (o,v)) - setattr(self, c, v) - - def run(self): - if not (self.extensions or self.libraries): - return - self.build_sources() - - def build_sources(self): - - if self.inplace: - self.get_package_dir = \ - self.get_finalized_command('build_py').get_package_dir - - self.build_py_modules_sources() - - for libname_info in self.libraries: - self.build_library_sources(*libname_info) - - if self.extensions: - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - self.build_extension_sources(ext) - - self.build_data_files_sources() - - def build_data_files_sources(self): - if not self.data_files: - return - log.info('building data_files sources') - from numpy.distutils.misc_util import get_data_files - new_data_files = [] - for data in self.data_files: - if isinstance(data,str): - new_data_files.append(data) - elif isinstance(data,tuple): - d,files = data - if self.inplace: - build_dir = self.get_package_dir('.'.join(d.split(os.sep))) - else: - build_dir = os.path.join(self.build_src,d) - funcs = filter(callable,files) - files = filter(lambda f:not callable(f), files) - for f in funcs: - if f.func_code.co_argcount==1: - s = f(build_dir) - else: - s = f() - if s is not None: - if isinstance(s,list): - files.extend(s) - elif isinstance(s,str): - files.append(s) - else: - raise TypeError(repr(s)) - filenames = get_data_files((d,files)) - new_data_files.append((d, filenames)) - else: - raise TypeError(repr(data)) - self.data_files[:] = new_data_files - - def build_py_modules_sources(self): - if not self.py_modules: - return - log.info('building py_modules sources') - new_py_modules = [] - for source in self.py_modules: - if is_sequence(source) and len(source)==3: - package, module_base, source = source - if self.inplace: - build_dir = self.get_package_dir(package) - else: - build_dir = os.path.join(self.build_src, - os.path.join(*package.split('.'))) - if callable(source): - target = os.path.join(build_dir, module_base + '.py') - source = source(target) - if source is None: - continue - modules = [(package, module_base, source)] - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - self.py_modules_dict[package] += modules - else: - new_py_modules.append(source) - self.py_modules[:] = new_py_modules - - def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources',[])) - - if not sources: - return - - log.info('building library "%s" sources' % (lib_name)) - - sources = self.generate_sources(sources, (lib_name, build_info)) - - sources = self.template_sources(sources, (lib_name, build_info)) - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - self.package, h_files) - - #for f in h_files: - # self.distribution.headers.append((lib_name,f)) - - build_info['sources'] = sources - return - - def build_extension_sources(self, ext): - - sources = list(ext.sources) - - log.info('building extension "%s" sources' % (ext.name)) - - fullname = self.get_ext_fullname(ext.name) - - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - - if self.inplace: - self.ext_target_dir = self.get_package_dir(package) - - sources = self.generate_sources(sources, ext) - - sources = self.template_sources(sources, ext) - - sources = self.swig_sources(sources, ext) - - sources = self.f2py_sources(sources, ext) - - sources = self.pyrex_sources(sources, ext) - - sources, py_files = self.filter_py_files(sources) - - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - modules = [] - for f in py_files: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - self.py_modules_dict[package] += modules - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - package, h_files) - #for f in h_files: - # self.distribution.headers.append((package,f)) - - ext.sources = sources - - def generate_sources(self, sources, extension): - new_sources = [] - func_sources = [] - for source in sources: - if is_string(source): - new_sources.append(source) - else: - func_sources.append(source) - if not func_sources: - return new_sources - if self.inplace and not is_sequence(extension): - build_dir = self.ext_target_dir - else: - if is_sequence(extension): - name = extension[0] - # if 'include_dirs' not in extension[1]: - # extension[1]['include_dirs'] = [] - # incl_dirs = extension[1]['include_dirs'] - else: - name = extension.name - # incl_dirs = extension.include_dirs - #if self.build_src not in incl_dirs: - # incl_dirs.append(self.build_src) - build_dir = os.path.join(*([self.build_src]\ - +name.split('.')[:-1])) - self.mkpath(build_dir) - for func in func_sources: - source = func(extension, build_dir) - if not source: - continue - if is_sequence(source): - [log.info(" adding '%s' to sources." % (s,)) for s in source] - new_sources.extend(source) - else: - log.info(" adding '%s' to sources." % (source,)) - new_sources.append(source) - - return new_sources - - def filter_py_files(self, sources): - return self.filter_files(sources,['.py']) - - def filter_h_files(self, sources): - return self.filter_files(sources,['.h','.hpp','.inc']) - - def filter_files(self, sources, exts = []): - new_sources = [] - files = [] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext in exts: - files.append(source) - else: - new_sources.append(source) - return new_sources, files - - def template_sources(self, sources, extension): - new_sources = [] - if is_sequence(extension): - depends = extension[1].get('depends') - include_dirs = extension[1].get('include_dirs') - else: - depends = extension.depends - include_dirs = extension.include_dirs - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.src': # Template file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - self.mkpath(target_dir) - target_file = os.path.join(target_dir,os.path.basename(base)) - if (self.force or newer_group([source] + depends, target_file)): - if _f_pyf_ext_match(base): - log.info("from_template:> %s" % (target_file)) - outstr = process_f_file(source) - else: - log.info("conv_template:> %s" % (target_file)) - outstr = process_c_file(source) - fid = open(target_file,'w') - fid.write(outstr) - fid.close() - if _header_ext_match(target_file): - d = os.path.dirname(target_file) - if d not in include_dirs: - log.info(" adding '%s' to include_dirs." % (d)) - include_dirs.append(d) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def pyrex_sources(self, sources, extension): - new_sources = [] - ext_name = extension.name.split('.')[-1] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyx': - target_file = self.generate_a_pyrex_source(base, ext_name, - source, - extension) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def generate_a_pyrex_source(self, base, ext_name, source, extension): - if self.inplace or not have_pyrex: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - target_file = os.path.join(target_dir, ext_name + '.c') - depends = [source] + extension.depends - if self.force or newer_group(depends, target_file, 'newer'): - if have_pyrex: - log.info("pyrexc:> %s" % (target_file)) - self.mkpath(target_dir) - options = Pyrex.Compiler.Main.CompilationOptions( - defaults=Pyrex.Compiler.Main.default_options, - include_path=extension.include_dirs, - output_file=target_file) - pyrex_result = Pyrex.Compiler.Main.compile(source, - options=options) - if pyrex_result.num_errors != 0: - raise DistutilsError,"%d errors while compiling %r with Pyrex" \ - % (pyrex_result.num_errors, source) - elif os.path.isfile(target_file): - log.warn("Pyrex required for compiling %r but not available,"\ - " using old target %r"\ - % (source, target_file)) - else: - raise DistutilsError("Pyrex required for compiling %r"\ - " but notavailable" % (source,)) - return target_file - - def f2py_sources(self, sources, extension): - new_sources = [] - f2py_sources = [] - f_sources = [] - f2py_targets = {} - target_dirs = [] - ext_name = extension.name.split('.')[-1] - skip_f2py = 0 - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyf': # F2PY interface file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - if os.path.isfile(source): - name = get_f2py_modulename(source) - if name != ext_name: - raise DistutilsSetupError('mismatch of extension names: %s ' - 'provides %r but expected %r' % ( - source, name, ext_name)) - target_file = os.path.join(target_dir,name+'module.c') - else: - log.debug(' source %s does not exist: skipping f2py\'ing.' \ - % (source)) - name = ext_name - skip_f2py = 1 - target_file = os.path.join(target_dir,name+'module.c') - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %smodule.c was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir,name+'module.c') - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.info(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - f2py_sources.append(source) - f2py_targets[source] = target_file - new_sources.append(target_file) - elif fortran_ext_match(ext): - f_sources.append(source) - else: - new_sources.append(source) - - if not (f2py_sources or f_sources): - return new_sources - - map(self.mkpath, target_dirs) - - f2py_options = extension.f2py_options + self.f2py_opts - - if self.distribution.libraries: - for name,build_info in self.distribution.libraries: - if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options',[])) - - log.info("f2py options: %s" % (f2py_options)) - - if f2py_sources: - if len(f2py_sources) != 1: - raise DistutilsSetupError( - 'only one .pyf file is allowed per extension module but got'\ - ' more: %r' % (f2py_sources,)) - source = f2py_sources[0] - target_file = f2py_targets[source] - target_dir = os.path.dirname(target_file) or '.' - depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file,'newer')) \ - and not skip_f2py: - log.info("f2py: %s" % (source)) - import numpy.f2py - numpy.f2py.run_main(f2py_options - + ['--build-dir',target_dir,source]) - else: - log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) - else: - #XXX TODO: --inplace support for sdist command - if is_sequence(extension): - name = extension[0] - else: name = extension.name - target_dir = os.path.join(*([self.build_src]\ - +name.split('.')[:-1])) - target_file = os.path.join(target_dir,ext_name + 'module.c') - new_sources.append(target_file) - depends = f_sources + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py:> %s" % (target_file)) - self.mkpath(target_dir) - import numpy.f2py - numpy.f2py.run_main(f2py_options + ['--lower', - '--build-dir',target_dir]+\ - ['-m',ext_name]+f_sources) - else: - log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ - % (target_file)) - - if not os.path.isfile(target_file): - raise DistutilsError("f2py target file %r not generated" % (target_file,)) - - target_c = os.path.join(self.build_src,'fortranobject.c') - target_h = os.path.join(self.build_src,'fortranobject.h') - log.info(" adding '%s' to sources." % (target_c)) - new_sources.append(target_c) - if self.build_src not in extension.include_dirs: - log.info(" adding '%s' to include_dirs." \ - % (self.build_src)) - extension.include_dirs.append(self.build_src) - - if not skip_f2py: - import numpy.f2py - d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d,'src','fortranobject.c') - source_h = os.path.join(d,'src','fortranobject.h') - if newer(source_c,target_c) or newer(source_h,target_h): - self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c,target_c) - self.copy_file(source_h,target_h) - else: - if not os.path.isfile(target_c): - raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) - if not os.path.isfile(target_h): - raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - - for name_ext in ['-f2pywrappers.f','-f2pywrappers2.f90']: - filename = os.path.join(target_dir,ext_name + name_ext) - if os.path.isfile(filename): - log.info(" adding '%s' to sources." % (filename)) - f_sources.append(filename) - - return new_sources + f_sources - - def swig_sources(self, sources, extension): - # Assuming SWIG 1.3.14 or later. See compatibility note in - # http://www.swig.org/Doc1.3/Python.html#Python_nn6 - - new_sources = [] - swig_sources = [] - swig_targets = {} - target_dirs = [] - py_files = [] # swig generated .py files - target_ext = '.c' - if self.swig_cpp: - typ = 'c++' - is_cpp = True - else: - typ = None - is_cpp = False - skip_swig = 0 - ext_name = extension.name.split('.')[-1] - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.i': # SWIG interface file - if self.inplace: - target_dir = os.path.dirname(base) - py_target_dir = self.ext_target_dir - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - py_target_dir = target_dir - if os.path.isfile(source): - name = get_swig_modulename(source) - if name != ext_name[1:]: - raise DistutilsSetupError( - 'mismatch of extension names: %s provides %r' - ' but expected %r' % (source, name, ext_name[1:])) - if typ is None: - typ = get_swig_target(source) - is_cpp = typ=='c++' - if is_cpp: target_ext = '.cpp' - else: - typ2 = get_swig_target(source) - if typ!=typ2: - log.warn('expected %r but source %r defines %r swig target' \ - % (typ, source, typ2)) - if typ2=='c++': - log.warn('resetting swig target to c++ (some targets may have .c extension)') - is_cpp = True - target_ext = '.cpp' - else: - log.warn('assuming that %r has c++ swig target' % (source)) - target_file = os.path.join(target_dir,'%s_wrap%s' \ - % (name, target_ext)) - else: - log.warn(' source %s does not exist: skipping swig\'ing.' \ - % (source)) - name = ext_name[1:] - skip_swig = 1 - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %s_wrap.{c,cpp} was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.warn(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - new_sources.append(target_file) - py_files.append(os.path.join(py_target_dir, name+'.py')) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - if skip_swig: - return new_sources + py_files - - map(self.mkpath, target_dirs) - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] - if is_cpp: - swig_cmd.append('-c++') - for d in extension.include_dirs: - swig_cmd.append('-I'+d) - for source in swig_sources: - target = swig_targets[source] - depends = [source] + extension.depends - if self.force or newer_group(depends, target, 'newer'): - log.info("%s: %s" % (os.path.basename(swig) \ - + (is_cpp and '++' or ''), source)) - self.spawn(swig_cmd + self.swig_opts \ - + ["-o", target, '-outdir', py_target_dir, source]) - else: - log.debug(" skipping '%s' swig interface (up-to-date)" \ - % (source)) - - return new_sources + py_files - -_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z',re.I).match -_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z',re.I).match - -#### SWIG related auxiliary functions #### -_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', - re.I).match -_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-',re.I).search -_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-',re.I).search - -def get_swig_target(source): - f = open(source,'r') - result = 'c' - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - f.close() - return result - -def get_swig_modulename(source): - f = open(source,'r') - f_readlines = getattr(f,'xreadlines',f.readlines) - name = None - for line in f_readlines(): - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - f.close() - return name - -def _find_swig_target(target_dir,name): - for ext in ['.cpp','.c']: - target = os.path.join(target_dir,'%s_wrap%s' % (name, ext)) - if os.path.isfile(target): - break - return target - -#### F2PY related auxiliary functions #### - -_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?'\ - '__user__[\w_]*)',re.I).match - -def get_f2py_modulename(source): - name = None - f = open(source) - f_readlines = getattr(f,'xreadlines',f.readlines) - for line in f_readlines(): - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - f.close() - return name - -########################################## diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py deleted file mode 100644 index 41c8f4f02..000000000 --- a/numpy/distutils/command/config.py +++ /dev/null @@ -1,158 +0,0 @@ -# Added Fortran compiler support to config. Currently useful only for -# try_compile call. try_run works but is untested for most of Fortran -# compilers (they must define linker_exe first). -# Pearu Peterson - -import os, signal -from distutils.command.config import config as old_config -from distutils.command.config import LANG_EXT -from distutils import log -from distutils.file_util import copy_file -from numpy.distutils.exec_command import exec_command - -LANG_EXT['f77'] = '.f' -LANG_EXT['f90'] = '.f90' - -class config(old_config): - old_config.user_options += [ - ('fcompiler=', None, "specify the Fortran compiler type"), - ] - - def initialize_options(self): - self.fcompiler = None - old_config.initialize_options(self) - - def _check_compiler (self): - old_config._check_compiler(self) - from numpy.distutils.fcompiler import FCompiler, new_fcompiler - if not isinstance(self.fcompiler, FCompiler): - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - dry_run=self.dry_run, force=1, - c_compiler=self.compiler) - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - if self.fcompiler.get_version(): - self.fcompiler.customize_cmd(self) - self.fcompiler.show_customization() - - def _wrap_method(self,mth,lang,args): - from distutils.ccompiler import CompileError - from distutils.errors import DistutilsExecError - save_compiler = self.compiler - if lang in ['f77','f90']: - self.compiler = self.fcompiler - try: - ret = mth(*((self,)+args)) - except (DistutilsExecError,CompileError),msg: - self.compiler = save_compiler - raise CompileError - self.compiler = save_compiler - return ret - - def _compile (self, body, headers, include_dirs, lang): - return self._wrap_method(old_config._compile,lang, - (body, headers, include_dirs, lang)) - - def _link (self, body, - headers, include_dirs, - libraries, library_dirs, lang): - if self.compiler.compiler_type=='msvc': - libraries = (libraries or [])[:] - library_dirs = (library_dirs or [])[:] - if lang in ['f77','f90']: - lang = 'c' # always use system linker when using MSVC compiler - if self.fcompiler: - for d in self.fcompiler.library_dirs or []: - # correct path when compiling in Cygwin but with - # normal Win Python - if d.startswith('/usr/lib'): - s,o = exec_command(['cygpath', '-w', d], - use_tee=False) - if not s: d = o - library_dirs.append(d) - for libname in self.fcompiler.libraries or []: - if libname not in libraries: - libraries.append(libname) - for libname in libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in library_dirs or []: - libfile = os.path.join(libdir,'%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in library_dirs: - libfile = os.path.join(libdir,'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(libdir,'%s.lib' % (libname)) - copy_file(libfile, libfile2) - self.temp_files.append(libfile2) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' \ - % (libname, library_dirs)) - return self._wrap_method(old_config._link,lang, - (body, headers, include_dirs, - libraries, library_dirs, lang)) - - def check_func(self, func, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - # clean up distutils's config a bit: add void to main(), and - # return a value. - self._check_compiler() - body = [] - if decl: - body.append("int %s ();" % func) - body.append("int main (void) {") - if call: - if call_args is None: - call_args = '' - body.append(" %s(%s);" % (func, call_args)) - else: - body.append(" %s;" % func) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def get_output(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, - lang="c"): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Returns the exit status code - of the program and its output. - """ - from distutils.ccompiler import CompileError, LinkError - self._check_compiler() - exitcode, output = 255, '' - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - exe = os.path.join('.', exe) - exitstatus, output = exec_command(exe, execute_in='.') - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") - except (CompileError, LinkError): - log.info("failure.") - - self._clean() - return exitcode, output diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py deleted file mode 100644 index e7fee94df..000000000 --- a/numpy/distutils/command/config_compiler.py +++ /dev/null @@ -1,123 +0,0 @@ -from distutils.core import Command -from numpy.distutils import log - -#XXX: Linker flags - -def show_fortran_compilers(_cache=[]): - # Using cache to prevent infinite recursion - if _cache: return - _cache.append(1) - from numpy.distutils.fcompiler import show_fcompilers - import distutils.core - dist = distutils.core._setup_distribution - show_fcompilers(dist) - -class config_fc(Command): - """ Distutils command to hold user specified options - to Fortran compilers. - - config_fc command is used by the FCompiler.customize() method. - """ - - description = "specify Fortran 77/Fortran 90 compiler information" - - user_options = [ - ('fcompiler=',None,"specify Fortran compiler type"), - ('f77exec=', None, "specify F77 compiler command"), - ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=',None,"specify F77 compiler flags"), - ('f90flags=',None,"specify F90 compiler flags"), - ('opt=',None,"specify optimization flags"), - ('arch=',None,"specify architecture specific optimization flags"), - ('debug','g',"compile with debugging information"), - ('noopt',None,"compile without optimization"), - ('noarch',None,"compile without arch-dependent optimization"), - ] - - help_options = [ - ('help-fcompiler',None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = ['debug','noopt','noarch'] - - def initialize_options(self): - self.fcompiler = None - self.f77exec = None - self.f90exec = None - self.f77flags = None - self.f90flags = None - self.opt = None - self.arch = None - self.debug = None - self.noopt = None - self.noarch = None - - def finalize_options(self): - log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['fcompiler']: - l = [] - for c in cmd_list: - v = getattr(c,a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c,a) is None: setattr(c, a, v1) - - def run(self): - # Do nothing. - return - -class config_cc(Command): - """ Distutils command to hold user specified options - to C/C++ compilers. - """ - - description = "specify C/C++ compiler information" - - user_options = [ - ('compiler=',None,"specify C/C++ compiler type"), - ] - - def initialize_options(self): - self.compiler = None - - def finalize_options(self): - log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['compiler']: - l = [] - for c in cmd_list: - v = getattr(c,a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c,a) is None: setattr(c, a, v1) - return - - def run(self): - # Do nothing. - return diff --git a/numpy/distutils/command/develop.py b/numpy/distutils/command/develop.py deleted file mode 100644 index 167706671..000000000 --- a/numpy/distutils/command/develop.py +++ /dev/null @@ -1,15 +0,0 @@ -""" Override the develop command from setuptools so we can ensure that our -generated files (from build_src or build_scripts) are properly converted to real -files with filenames. -""" - -from setuptools.command.develop import develop as old_develop - -class develop(old_develop): - __doc__ = old_develop.__doc__ - def install_for_development(self): - # Build sources in-place, too. - self.reinitialize_command('build_src', inplace=1) - # Make sure scripts are built. - self.run_command('build_scripts') - old_develop.install_for_development(self) diff --git a/numpy/distutils/command/egg_info.py b/numpy/distutils/command/egg_info.py deleted file mode 100644 index 687faf080..000000000 --- a/numpy/distutils/command/egg_info.py +++ /dev/null @@ -1,9 +0,0 @@ -from setuptools.command.egg_info import egg_info as _egg_info - -class egg_info(_egg_info): - def run(self): - # We need to ensure that build_src has been executed in order to give - # setuptools' egg_info command real filenames instead of functions which - # generate files. - self.run_command("build_src") - _egg_info.run(self) diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py deleted file mode 100644 index 36e6b5a66..000000000 --- a/numpy/distutils/command/install.py +++ /dev/null @@ -1,36 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - import setuptools.command.install as old_install_mod -else: - import distutils.command.install as old_install_mod -old_install = old_install_mod.install -from distutils.file_util import write_file - -class install(old_install): - - def finalize_options (self): - old_install.finalize_options(self) - self.install_lib = self.install_libbase - - def run(self): - r = old_install.run(self) - if self.record: - # bdist_rpm fails when INSTALLED_FILES contains - # paths with spaces. Such paths must be enclosed - # with double-quotes. - f = open(self.record,'r') - lines = [] - need_rewrite = False - for l in f.readlines(): - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - f.close() - if need_rewrite: - self.execute(write_file, - (self.record, lines), - "re-writing list of installed files to '%s'" % - self.record) - return r diff --git a/numpy/distutils/command/install_data.py b/numpy/distutils/command/install_data.py deleted file mode 100644 index b72737f85..000000000 --- a/numpy/distutils/command/install_data.py +++ /dev/null @@ -1,13 +0,0 @@ -from distutils.command.install_data import install_data as old_install_data - -#data installer with improved intelligence over distutils -#data files are copied into the project directory instead -#of willy-nilly -class install_data (old_install_data): - - def finalize_options (self): - self.set_undefined_options('install', - ('install_lib', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) diff --git a/numpy/distutils/command/install_headers.py b/numpy/distutils/command/install_headers.py deleted file mode 100644 index 58ace1064..000000000 --- a/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header,tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy.core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/numpy/distutils/command/sdist.py b/numpy/distutils/command/sdist.py deleted file mode 100644 index 62fce9574..000000000 --- a/numpy/distutils/command/sdist.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - from setuptools.command.sdist import sdist as old_sdist -else: - from distutils.command.sdist import sdist as old_sdist - -from numpy.distutils.misc_util import get_data_files - -class sdist(old_sdist): - - def add_defaults (self): - old_sdist.add_defaults(self) - - dist = self.distribution - - if dist.has_data_files(): - for data in dist.data_files: - self.filelist.extend(get_data_files(data)) - - if dist.has_headers(): - headers = [] - for h in dist.headers: - if isinstance(h,str): headers.append(h) - else: headers.append(h[1]) - self.filelist.extend(headers) - - return diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py deleted file mode 100644 index 591ae54ad..000000000 --- a/numpy/distutils/conv_template.py +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/python - -# takes templated file .xxx.src and produces .xxx file where .xxx is .i or .c or .h -# using the following template rules - -# /**begin repeat on a line by itself marks the beginning of a segment of code to be repeated -# /**end repeat**/ on a line by itself marks it's end - -# after the /**begin repeat and before the */ -# all the named templates are placed -# these should all have the same number of replacements - -# in the main body, the names are used. -# Each replace will use one entry from the list of named replacements - -# Note that all #..# forms in a block must have the same number of -# comma-separated entries. - -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -def parse_structure(astr): - spanlist = [] - # subroutines - ind = 0 - line = 1 - while 1: - start = astr.find("/**begin repeat", ind) - if start == -1: - break - start2 = astr.find("*/",start) - start2 = astr.find("\n",start2) - fini1 = astr.find("/**end repeat**/",start2) - fini2 = astr.find("\n",fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) - ind = fini2 - spanlist.sort() - return spanlist - -# return n copies of substr with template replacement -_special_names = {} - -template_re = re.compile(r"@([\w]+)@") -named_re = re.compile(r"#([\w]*)=([^#]*?)#") - -parenrep = re.compile(r"[(]([^)]*?)[)]\*(\d+)") -def paren_repl(obj): - torep = obj.group(1) - numrep = obj.group(2) - return ','.join([torep]*int(numrep)) - -plainrep = re.compile(r"([^*]+)\*(\d+)") - -def conv(astr): - # replaces all occurrences of '(a,b,c)*4' in astr - # with 'a,b,c,a,b,c,a,b,c,a,b,c' - astr = parenrep.sub(paren_repl,astr) - # replaces occurences of xxx*3 with xxx, xxx, xxx - astr = ','.join([plainrep.sub(paren_repl,x.strip()) - for x in astr.split(',')]) - return astr - -def unique_key(adict): - # this obtains a unique key given a dictionary - # currently it works by appending together n of the letters of the - # current keys and increasing n until a unique key is found - # -- not particularly quick - allkeys = adict.keys() - done = False - n = 1 - while not done: - newkey = "".join([x[:n] for x in allkeys]) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - -def expand_sub(substr, namestr, line): - # find all named replacements - reps = named_re.findall(namestr) - names = {} - names.update(_special_names) - numsubs = None - for rep in reps: - name = rep[0].strip() - thelist = conv(rep[1]) - names[name] = thelist - - # make lists out of string entries in name dictionary - for name in names.keys(): - entry = names[name] - entrylist = entry.split(',') - names[name] = entrylist - num = len(entrylist) - if numsubs is None: - numsubs = num - elif numsubs != num: - print namestr - print substr - raise ValueError, "Mismatch in number to replace" - - # now replace all keys for each of the lists - mystr = '' - thissub = [None] - def namerepl(match): - name = match.group(1) - return names[name][thissub[0]] - for k in range(numsubs): - thissub[0] = k - mystr += ("#line %d\n%s\n\n" - % (line, template_re.sub(namerepl, substr))) - return mystr - - -_head = \ -"""/* This file was autogenerated from a template DO NOT EDIT!!!! - Changes should be made to the original source (.src) file -*/ - -""" - -def get_line_header(str,beg): - extra = [] - ind = beg-1 - char = str[ind] - while (ind > 0) and (char != '\n'): - extra.insert(0,char) - ind = ind - 1 - char = str[ind] - return ''.join(extra) - -def process_str(allstr): - newstr = allstr - writestr = _head - - struct = parse_structure(newstr) - # return a (sorted) list of tuples for each begin repeat section - # each tuple is the start and end of a region to be template repeated - - oldend = 0 - for sub in struct: - writestr += newstr[oldend:sub[0]] - expanded = expand_sub(newstr[sub[1]:sub[2]], - newstr[sub[0]:sub[1]], sub[4]) - writestr += expanded - oldend = sub[3] - - - writestr += newstr[oldend:] - return writestr - -include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" - r"(?P[\w\d./\\]+[.]src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - fid = open(source) - lines = [] - for line in fid.readlines(): - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d,fn) - if os.path.isfile(fn): - print 'Including file',fn - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - fid.close() - return lines - -def process_file(source): - lines = resolve_includes(source) - sourcefile = os.path.normcase(source).replace("\\","\\\\") - return ('#line 1 "%s"\n%s' - % (sourcefile, process_str(''.join(lines)))) - -if __name__ == "__main__": - - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file,'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname,'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py deleted file mode 100644 index 19e1b33a3..000000000 --- a/numpy/distutils/core.py +++ /dev/null @@ -1,219 +0,0 @@ - -import sys -from distutils.core import * - -if 'setuptools' in sys.modules: - have_setuptools = True - from setuptools import setup as old_setup - # easy_install imports math, it may be picked up from cwd - from setuptools.command import easy_install - try: - # very old versions of setuptools don't have this - from setuptools.command import bdist_egg - except ImportError: - have_setuptools = False -else: - from distutils.core import setup as old_setup - have_setuptools = False - -import warnings -import distutils.core -import distutils.dist - -from numpy.distutils.extension import Extension -from numpy.distutils.command import config, config_compiler, \ - build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm -from numpy.distutils.misc_util import get_data_files, is_sequence, is_string - -numpy_cmdclass = {'build': build.build, - 'build_src': build_src.build_src, - 'build_scripts': build_scripts.build_scripts, - 'config_cc': config_compiler.config_cc, - 'config_fc': config_compiler.config_fc, - 'config': config.config, - 'build_ext': build_ext.build_ext, - 'build_py': build_py.build_py, - 'build_clib': build_clib.build_clib, - 'sdist': sdist.sdist, - 'install_data': install_data.install_data, - 'install_headers': install_headers.install_headers, - 'install': install.install, - 'bdist_rpm': bdist_rpm.bdist_rpm, - } -if have_setuptools: - # Use our own versions of develop and egg_info to ensure that build_src is - # handled appropriately. - from numpy.distutils.command import develop, egg_info - numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg - numpy_cmdclass['develop'] = develop.develop - numpy_cmdclass['easy_install'] = easy_install.easy_install - numpy_cmdclass['egg_info'] = egg_info.egg_info - -def _dict_append(d, **kws): - for k,v in kws.items(): - if k not in d: - d[k] = v - continue - dv = d[k] - if isinstance(dv, tuple): - d[k] = dv + tuple(v) - elif isinstance(dv, list): - d[k] = dv + list(v) - elif isinstance(dv, dict): - _dict_append(dv, **v) - elif is_string(dv): - d[k] = dv + v - else: - raise TypeError, repr(type(dv)) - -def _command_line_ok(_cache=[]): - """ Return True if command line does not contain any - help or display requests. - """ - if _cache: - return _cache[0] - ok = True - display_opts = ['--'+n for n in Distribution.display_option_names] - for o in Distribution.display_options: - if o[1]: - display_opts.append('-'+o[1]) - for arg in sys.argv: - if arg.startswith('--help') or arg=='-h' or arg in display_opts: - ok = False - break - _cache.append(ok) - return ok - -def get_distribution(always=False): - dist = distutils.core._setup_distribution - # XXX Hack to get numpy installable with easy_install. - # The problem is easy_install runs it's own setup(), which - # sets up distutils.core._setup_distribution. However, - # when our setup() runs, that gets overwritten and lost. - # We can't use isinstance, as the DistributionWithoutHelpCommands - # class is local to a function in setuptools.command.easy_install - if dist is not None and \ - 'DistributionWithoutHelpCommands' in repr(dist): - dist = None - if always and dist is None: - dist = distutils.dist.Distribution() - return dist - -def _exit_interactive_session(_cache=[]): - if _cache: - return # been here - _cache.append(1) - print '-'*72 - raw_input('Press ENTER to close the interactive session..') - print '='*72 - -def setup(**attr): - - if len(sys.argv)<=1 and not attr.get('script_args',[]): - from interactive import interactive_sys_argv - import atexit - atexit.register(_exit_interactive_session) - sys.argv[:] = interactive_sys_argv(sys.argv) - if len(sys.argv)>1: - return setup(**attr) - - cmdclass = numpy_cmdclass.copy() - - new_attr = attr.copy() - if 'cmdclass' in new_attr: - cmdclass.update(new_attr['cmdclass']) - new_attr['cmdclass'] = cmdclass - - if 'configuration' in new_attr: - # To avoid calling configuration if there are any errors - # or help request in command in the line. - configuration = new_attr.pop('configuration') - - old_dist = distutils.core._setup_distribution - old_stop = distutils.core._setup_stop_after - distutils.core._setup_distribution = None - distutils.core._setup_stop_after = "commandline" - try: - dist = setup(**new_attr) - finally: - distutils.core._setup_distribution = old_dist - distutils.core._setup_stop_after = old_stop - if dist.help or not _command_line_ok(): - # probably displayed help, skip running any commands - return dist - - # create setup dictionary and append to new_attr - config = configuration() - if hasattr(config,'todict'): - config = config.todict() - _dict_append(new_attr, **config) - - # Move extension source libraries to libraries - libraries = [] - for ext in new_attr.get('ext_modules',[]): - new_libraries = [] - for item in ext.libraries: - if is_sequence(item): - lib_name, build_info = item - _check_append_ext_library(libraries, item) - new_libraries.append(lib_name) - elif is_string(item): - new_libraries.append(item) - else: - raise TypeError("invalid description of extension module " - "library %r" % (item,)) - ext.libraries = new_libraries - if libraries: - if 'libraries' not in new_attr: - new_attr['libraries'] = [] - for item in libraries: - _check_append_library(new_attr['libraries'], item) - - # sources in ext_modules or libraries may contain header files - if ('ext_modules' in new_attr or 'libraries' in new_attr) \ - and 'headers' not in new_attr: - new_attr['headers'] = [] - - return old_setup(**new_attr) - -def _check_append_library(libraries, item): - for libitem in libraries: - if is_sequence(libitem): - if is_sequence(item): - if item[0]==libitem[0]: - if item[1] is libitem[1]: - return - warnings.warn("[0] libraries list contains %r with" - " different build_info" % (item[0],)) - break - else: - if item==libitem[0]: - warnings.warn("[1] libraries list contains %r with" - " no build_info" % (item[0],)) - break - else: - if is_sequence(item): - if item[0]==libitem: - warnings.warn("[2] libraries list contains %r with" - " no build_info" % (item[0],)) - break - else: - if item==libitem: - return - libraries.append(item) - -def _check_append_ext_library(libraries, (lib_name,build_info)): - for item in libraries: - if is_sequence(item): - if item[0]==lib_name: - if item[1] is build_info: - return - warnings.warn("[3] libraries list contains %r with" - " different build_info" % (lib_name,)) - break - elif item==lib_name: - warnings.warn("[4] libraries list contains %r with" - " no build_info" % (lib_name,)) - break - libraries.append((lib_name,build_info)) diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py deleted file mode 100644 index 86939bb5f..000000000 --- a/numpy/distutils/cpuinfo.py +++ /dev/null @@ -1,681 +0,0 @@ -#!/usr/bin/env python -""" -cpuinfo - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Pearu Peterson -""" - -__all__ = ['cpu'] - -import sys, re, types -import os -import commands -import warnings - -def getoutput(cmd, successful_status=(0,), stacklevel=1): - try: - status, output = commands.getstatusoutput(cmd) - except EnvironmentError, e: - warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, output - if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: - return True, output - return False, output - -def command_info(successful_status=(0,), stacklevel=1, **kw): - info = {} - for key in kw: - ok, output = getoutput(kw[key], successful_status=successful_status, - stacklevel=stacklevel+1) - if ok: - info[key] = output.strip() - return info - -def command_by_line(cmd, successful_status=(0,), stacklevel=1): - ok, output = getoutput(cmd, successful_status=successful_status, - stacklevel=stacklevel+1) - if not ok: - return - for line in output.splitlines(): - yield line.strip() - -def key_value_from_command(cmd, sep, successful_status=(0,), - stacklevel=1): - d = {} - for line in command_by_line(cmd, successful_status=successful_status, - stacklevel=stacklevel+1): - l = [s.strip() for s in line.split(sep, 1)] - if len(l) == 2: - d[l[0]] = l[1] - return d - -class CPUInfoBase(object): - """Holds CPU information and provides methods for requiring - the availability of various CPU features. - """ - - def _try_call(self,func): - try: - return func() - except: - pass - - def __getattr__(self,name): - if not name.startswith('_'): - if hasattr(self,'_'+name): - attr = getattr(self,'_'+name) - if type(attr) is types.MethodType: - return lambda func=self._try_call,attr=attr : func(attr) - else: - return lambda : None - raise AttributeError,name - - def _getNCPUs(self): - return 1 - - def _is_32bit(self): - return not self.is_64bit() - -class LinuxCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = [ {} ] - ok, output = getoutput('uname -m') - if ok: - info[0]['uname_m'] = output.strip() - try: - fo = open('/proc/cpuinfo') - except EnvironmentError, e: - warnings.warn(str(e), UserWarning) - else: - for line in fo: - name_value = [s.strip() for s in line.split(':', 1)] - if len(name_value) != 2: - continue - name, value = name_value - if not info or name in info[-1]: # next processor - info.append({}) - info[-1][name] = value - fo.close() - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['vendor_id']=='AuthenticAMD' - - def _is_AthlonK6_2(self): - return self._is_AMD() and self.info[0]['model'] == '2' - - def _is_AthlonK6_3(self): - return self._is_AMD() and self.info[0]['model'] == '3' - - def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6',self.info[0]['model name']) is not None - - def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7',self.info[0]['model name']) is not None - - def _is_AthlonMP(self): - return re.match(r'.*?Athlon\(tm\) MP\b', - self.info[0]['model name']) is not None - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['family'] == '15' - - def _is_Athlon64(self): - return re.match(r'.*?Athlon\(tm\) 64\b', - self.info[0]['model name']) is not None - - def _is_AthlonHX(self): - return re.match(r'.*?Athlon HX\b', - self.info[0]['model name']) is not None - - def _is_Opteron(self): - return re.match(r'.*?Opteron\b', - self.info[0]['model name']) is not None - - def _is_Hammer(self): - return re.match(r'.*?Hammer\b', - self.info[0]['model name']) is not None - - # Alpha - - def _is_Alpha(self): - return self.info[0]['cpu']=='Alpha' - - def _is_EV4(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' - - def _is_EV5(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' - - def _is_EV56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' - - def _is_PCA56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' - - # Intel - - #XXX - _is_i386 = _not_impl - - def _is_Intel(self): - return self.info[0]['vendor_id']=='GenuineIntel' - - def _is_i486(self): - return self.info[0]['cpu']=='i486' - - def _is_i586(self): - return self.is_Intel() and self.info[0]['cpu family'] == '5' - - def _is_i686(self): - return self.is_Intel() and self.info[0]['cpu family'] == '6' - - def _is_Celeron(self): - return re.match(r'.*?Celeron', - self.info[0]['model name']) is not None - - def _is_Pentium(self): - return re.match(r'.*?Pentium', - self.info[0]['model name']) is not None - - def _is_PentiumII(self): - return re.match(r'.*?Pentium.*?II\b', - self.info[0]['model name']) is not None - - def _is_PentiumPro(self): - return re.match(r'.*?PentiumPro\b', - self.info[0]['model name']) is not None - - def _is_PentiumMMX(self): - return re.match(r'.*?Pentium.*?MMX\b', - self.info[0]['model name']) is not None - - def _is_PentiumIII(self): - return re.match(r'.*?Pentium.*?III\b', - self.info[0]['model name']) is not None - - def _is_PentiumIV(self): - return re.match(r'.*?Pentium.*?(IV|4)\b', - self.info[0]['model name']) is not None - - def _is_PentiumM(self): - return re.match(r'.*?Pentium.*?M\b', - self.info[0]['model name']) is not None - - def _is_Prescott(self): - return self.is_PentiumIV() and self.has_sse3() - - def _is_Nocona(self): - return self.is_64bit() and self.is_PentiumIV() - - def _is_Core2(self): - return self.is_64bit() and self.is_Intel() and \ - re.match(r'.*?Core\(TM\)2\b', \ - self.info[0]['model name']) is not None - - def _is_Itanium(self): - return re.match(r'.*?Itanium\b', - self.info[0]['family']) is not None - - def _is_XEON(self): - return re.match(r'.*?XEON\b', - self.info[0]['model name'],re.IGNORECASE) is not None - - _is_Xeon = _is_XEON - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_fdiv_bug(self): - return self.info[0]['fdiv_bug']=='yes' - - def _has_f00f_bug(self): - return self.info[0]['f00f_bug']=='yes' - - def _has_mmx(self): - return re.match(r'.*?\bmmx\b',self.info[0]['flags']) is not None - - def _has_sse(self): - return re.match(r'.*?\bsse\b',self.info[0]['flags']) is not None - - def _has_sse2(self): - return re.match(r'.*?\bsse2\b',self.info[0]['flags']) is not None - - def _has_sse3(self): - return re.match(r'.*?\bsse3\b',self.info[0]['flags']) is not None - - def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b',self.info[0]['flags']) is not None - - def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b',self.info[0]['flags']) is not None - - def _is_64bit(self): - if self.is_Alpha(): - return True - if self.info[0].get('clflush size','')=='64': - return True - if self.info[0].get('uname_m','')=='x86_64': - return True - if self.info[0].get('arch','')=='IA-64': - return True - return False - - def _is_32bit(self): - return not self.is_64bit() - -class IRIXCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = key_value_from_command('sysconf', sep=' ', - successful_status=(0,1)) - self.__class__.info = info - - def _not_impl(self): pass - - def _is_singleCPU(self): - return self.info.get('NUM_PROCESSORS') == '1' - - def _getNCPUs(self): - return int(self.info.get('NUM_PROCESSORS', 1)) - - def __cputype(self,n): - return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) - def _is_r2000(self): return self.__cputype(2000) - def _is_r3000(self): return self.__cputype(3000) - def _is_r3900(self): return self.__cputype(3900) - def _is_r4000(self): return self.__cputype(4000) - def _is_r4100(self): return self.__cputype(4100) - def _is_r4300(self): return self.__cputype(4300) - def _is_r4400(self): return self.__cputype(4400) - def _is_r4600(self): return self.__cputype(4600) - def _is_r4650(self): return self.__cputype(4650) - def _is_r5000(self): return self.__cputype(5000) - def _is_r6000(self): return self.__cputype(6000) - def _is_r8000(self): return self.__cputype(8000) - def _is_r10000(self): return self.__cputype(10000) - def _is_r12000(self): return self.__cputype(12000) - def _is_rorion(self): return self.__cputype('orion') - - def get_ip(self): - try: return self.info.get('MACHINE') - except: pass - def __machine(self,n): - return self.info.get('MACHINE').lower() == 'ip%s' % (n) - def _is_IP19(self): return self.__machine(19) - def _is_IP20(self): return self.__machine(20) - def _is_IP21(self): return self.__machine(21) - def _is_IP22(self): return self.__machine(22) - def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() - def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() - def _is_IP24(self): return self.__machine(24) - def _is_IP25(self): return self.__machine(25) - def _is_IP26(self): return self.__machine(26) - def _is_IP27(self): return self.__machine(27) - def _is_IP28(self): return self.__machine(28) - def _is_IP30(self): return self.__machine(30) - def _is_IP32(self): return self.__machine(32) - def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() - def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() - - -class DarwinCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - machine='machine') - info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') - self.__class__.info = info - - def _not_impl(self): pass - - def _getNCPUs(self): - return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) - - def _is_Power_Macintosh(self): - return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' - - def _is_i386(self): - return self.info['arch']=='i386' - def _is_ppc(self): - return self.info['arch']=='ppc' - - def __machine(self,n): - return self.info['machine'] == 'ppc%s'%n - def _is_ppc601(self): return self.__machine(601) - def _is_ppc602(self): return self.__machine(602) - def _is_ppc603(self): return self.__machine(603) - def _is_ppc603e(self): return self.__machine('603e') - def _is_ppc604(self): return self.__machine(604) - def _is_ppc604e(self): return self.__machine('604e') - def _is_ppc620(self): return self.__machine(620) - def _is_ppc630(self): return self.__machine(630) - def _is_ppc740(self): return self.__machine(740) - def _is_ppc7400(self): return self.__machine(7400) - def _is_ppc7450(self): return self.__machine(7450) - def _is_ppc750(self): return self.__machine(750) - def _is_ppc403(self): return self.__machine(403) - def _is_ppc505(self): return self.__machine(505) - def _is_ppc801(self): return self.__machine(801) - def _is_ppc821(self): return self.__machine(821) - def _is_ppc823(self): return self.__machine(823) - def _is_ppc860(self): return self.__machine(860) - - -class SunOSCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - mach='mach', - uname_i='uname_i', - isainfo_b='isainfo -b', - isainfo_n='isainfo -n', - ) - info['uname_X'] = key_value_from_command('uname -X', sep='=') - for line in command_by_line('psrinfo -v 0'): - m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) - if m: - info['processor'] = m.group('p') - break - self.__class__.info = info - - def _not_impl(self): pass - - def _is_32bit(self): - return self.info['isainfo_b']=='32' - def _is_64bit(self): - return self.info['isainfo_b']=='64' - - def _is_i386(self): - return self.info['isainfo_n']=='i386' - def _is_sparc(self): - return self.info['isainfo_n']=='sparc' - def _is_sparcv9(self): - return self.info['isainfo_n']=='sparcv9' - - def _getNCPUs(self): - return int(self.info['uname_X'].get('NumCPU', 1)) - - def _is_sun4(self): - return self.info['arch']=='sun4' - - def _is_SUNW(self): - return re.match(r'SUNW',self.info['uname_i']) is not None - def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5',self.info['uname_i']) is not None - def _is_ultra1(self): - return re.match(r'.*Ultra-1',self.info['uname_i']) is not None - def _is_ultra250(self): - return re.match(r'.*Ultra-250',self.info['uname_i']) is not None - def _is_ultra2(self): - return re.match(r'.*Ultra-2',self.info['uname_i']) is not None - def _is_ultra30(self): - return re.match(r'.*Ultra-30',self.info['uname_i']) is not None - def _is_ultra4(self): - return re.match(r'.*Ultra-4',self.info['uname_i']) is not None - def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10',self.info['uname_i']) is not None - def _is_ultra5(self): - return re.match(r'.*Ultra-5',self.info['uname_i']) is not None - def _is_ultra60(self): - return re.match(r'.*Ultra-60',self.info['uname_i']) is not None - def _is_ultra80(self): - return re.match(r'.*Ultra-80',self.info['uname_i']) is not None - def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise',self.info['uname_i']) is not None - def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000',self.info['uname_i']) is not None - def _is_sunfire(self): - return re.match(r'.*Sun-Fire',self.info['uname_i']) is not None - def _is_ultra(self): - return re.match(r'.*Ultra',self.info['uname_i']) is not None - - def _is_cpusparcv7(self): - return self.info['processor']=='sparcv7' - def _is_cpusparcv8(self): - return self.info['processor']=='sparcv8' - def _is_cpusparcv9(self): - return self.info['processor']=='sparcv9' - -class Win32CPUInfo(CPUInfoBase): - - info = None - pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" - # XXX: what does the value of - # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 - # mean? - - def __init__(self): - if self.info is not None: - return - info = [] - try: - #XXX: Bad style to use so long `try:...except:...`. Fix it! - import _winreg - prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)"\ - "\s+stepping\s+(?P\d+)",re.IGNORECASE) - chnd=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, self.pkey) - pnum=0 - while 1: - try: - proc=_winreg.EnumKey(chnd,pnum) - except _winreg.error: - break - else: - pnum+=1 - info.append({"Processor":proc}) - phnd=_winreg.OpenKey(chnd,proc) - pidx=0 - while True: - try: - name,value,vtpe=_winreg.EnumValue(phnd,pidx) - except _winreg.error: - break - else: - pidx=pidx+1 - info[-1][name]=value - if name=="Identifier": - srch=prgx.search(value) - if srch: - info[-1]["Family"]=int(srch.group("FML")) - info[-1]["Model"]=int(srch.group("MDL")) - info[-1]["Stepping"]=int(srch.group("STP")) - except: - print sys.exc_value,'(ignoring)' - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['VendorIdentifier']=='AuthenticAMD' - - def _is_Am486(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_Am5x86(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_AMDK5(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0,1,2,3] - - def _is_AMDK6(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6,7] - - def _is_AMDK6_2(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==8 - - def _is_AMDK6_3(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==9 - - def _is_AMDK7(self): - return self.is_AMD() and self.info[0]['Family'] == 6 - - # To reliably distinguish between the different types of AMD64 chips - # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would - # require looking at the 'brand' from cpuid - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['Family'] == 15 - - # Intel - - def _is_Intel(self): - return self.info[0]['VendorIdentifier']=='GenuineIntel' - - def _is_i386(self): - return self.info[0]['Family']==3 - - def _is_i486(self): - return self.info[0]['Family']==4 - - def _is_i586(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_i686(self): - return self.is_Intel() and self.info[0]['Family']==6 - - def _is_Pentium(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_PentiumMMX(self): - return self.is_Intel() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==4 - - def _is_PentiumPro(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model']==1 - - def _is_PentiumII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3,5,6] - - def _is_PentiumIII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7,8,9,10,11] - - def _is_PentiumIV(self): - return self.is_Intel() and self.info[0]['Family']==15 - - def _is_PentiumM(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [9, 13, 14] - - def _is_Core2(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [15, 16, 17] - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_mmx(self): - if self.is_Intel(): - return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6,15]) - elif self.is_AMD(): - return self.info[0]['Family'] in [5,6,15] - else: - return False - - def _has_sse(self): - if self.is_Intel(): - return (self.info[0]['Family']==6 and \ - self.info[0]['Model'] in [7,8,9,10,11]) \ - or self.info[0]['Family']==15 - elif self.is_AMD(): - return (self.info[0]['Family']==6 and \ - self.info[0]['Model'] in [6,7,8,10]) \ - or self.info[0]['Family']==15 - else: - return False - - def _has_sse2(self): - if self.is_Intel(): - return self.is_Pentium4() or self.is_PentiumM() \ - or self.is_Core2() - elif self.is_AMD(): - return self.is_AMD64() - else: - return False - - def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5,6,15] - - def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6,15] - -if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) - cpuinfo = LinuxCPUInfo -elif sys.platform.startswith('irix'): - cpuinfo = IRIXCPUInfo -elif sys.platform == 'darwin': - cpuinfo = DarwinCPUInfo -elif sys.platform.startswith('sunos'): - cpuinfo = SunOSCPUInfo -elif sys.platform.startswith('win32'): - cpuinfo = Win32CPUInfo -elif sys.platform.startswith('cygwin'): - cpuinfo = LinuxCPUInfo -#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. -else: - cpuinfo = CPUInfoBase - -cpu = cpuinfo() - -if __name__ == "__main__": - - cpu.is_blaa() - cpu.is_Intel() - cpu.is_Alpha() - - print 'CPU information:', - for name in dir(cpuinfo): - if name[0]=='_' and name[1]!='_': - r = getattr(cpu,name[1:])() - if r: - if r!=1: - print '%s=%s' %(name[1:],r), - else: - print name[1:], - print diff --git a/numpy/distutils/environment.py b/numpy/distutils/environment.py deleted file mode 100644 index b923c8ed5..000000000 --- a/numpy/distutils/environment.py +++ /dev/null @@ -1,70 +0,0 @@ -import os -from distutils.dist import Distribution - -__metaclass__ = type - -class EnvironmentConfig: - def __init__(self, distutils_section='DEFAULT', **kw): - self._distutils_section = distutils_section - self._conf_keys = kw - self._conf = None - self._hook_handler = None - - def dump_variable(self, name): - conf_desc = self._conf_keys[name] - hook, envvar, confvar, convert = conf_desc - if not convert: - convert = lambda x : x - print '%s.%s:' % (self._distutils_section, name) - v = self._hook_handler(name, hook) - print ' hook : %s' % (convert(v),) - if envvar: - v = os.environ.get(envvar, None) - print ' environ: %s' % (convert(v),) - if confvar and self._conf: - v = self._conf.get(confvar, (None, None))[1] - print ' config : %s' % (convert(v),) - - def dump_variables(self): - for name in self._conf_keys: - self.dump_variable(name) - - def __getattr__(self, name): - try: - conf_desc = self._conf_keys[name] - except KeyError: - raise AttributeError(name) - return self._get_var(name, conf_desc) - - def get(self, name, default=None): - try: - conf_desc = self._conf_keys[name] - except KeyError: - return default - var = self._get_var(name, conf_desc) - if var is None: - var = default - return var - - def _get_var(self, name, conf_desc): - hook, envvar, confvar, convert = conf_desc - var = self._hook_handler(name, hook) - if envvar is not None: - var = os.environ.get(envvar, var) - if confvar is not None and self._conf: - var = self._conf.get(confvar, (None, var))[1] - if convert is not None: - var = convert(var) - return var - - def clone(self, hook_handler): - ec = self.__class__(distutils_section=self._distutils_section, - **self._conf_keys) - ec._hook_handler = hook_handler - return ec - - def use_distribution(self, dist): - if isinstance(dist, Distribution): - self._conf = dist.get_option_dict(self._distutils_section) - else: - self._conf = dist diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py deleted file mode 100644 index 5863f5d6e..000000000 --- a/numpy/distutils/exec_command.py +++ /dev/null @@ -1,641 +0,0 @@ -#!/usr/bin/env python -""" -exec_command - -Implements exec_command function that is (almost) equivalent to -commands.getstatusoutput function but on NT, DOS systems the -returned status is actually correct (though, the returned status -values may be different by a factor). In addition, exec_command -takes keyword arguments for (re-)defining environment variables. - -Provides functions: - exec_command --- execute command in a specified directory and - in the modified environment. - splitcmdline --- inverse of ' '.join(argv) - find_executable --- locate a command using info from environment - variable PATH. Equivalent to posix `which` - command. - -Author: Pearu Peterson -Created: 11 January 2003 - -Requires: Python 2.x - -Succesfully tested on: - os.name | sys.platform | comments - --------+--------------+---------- - posix | linux2 | Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 - PyCrust 0.9.3, Idle 1.0.2 - posix | linux2 | Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 - posix | sunos5 | SunOS 5.9, Python 2.2, 2.3.2 - posix | darwin | Darwin 7.2.0, Python 2.3 - nt | win32 | Windows Me - Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 - Python 2.1.1 Idle 0.8 - nt | win32 | Windows 98, Python 2.1.1. Idle 0.8 - nt | win32 | Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests - fail i.e. redefining environment variables may - not work. FIXED: don't use cygwin echo! - Comment: also `cmd /c echo` will not work - but redefining environment variables do work. - posix | cygwin | Cygwin 98-4.10, Python 2.3.3(cygming special) - nt | win32 | Windows XP, Python 2.3.3 - -Known bugs: -- Tests, that send messages to stderr, fail when executed from MSYS prompt - because the messages are lost at some point. -""" - -__all__ = ['exec_command','find_executable'] - -import os -import sys - -from numpy.distutils.misc_util import is_sequence, make_temp_file -from numpy.distutils import log - -def temp_file_name(): - fo, name = make_temp_file() - fo.close() - return name - -############################################################ - -def get_pythonexe(): - pythonexe = sys.executable - if os.name in ['nt','dos']: - fdir,fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW','PYTHON') - pythonexe = os.path.join(fdir,fn) - assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) - return pythonexe - -############################################################ - -def splitcmdline(line): - """ Inverse of ' '.join(sys.argv). - """ - log.debug('splitcmdline(%r)' % (line)) - lst = [] - flag = 0 - s,pc,cc = '','','' - for nc in line+' ': - if flag==0: - flag = (pc != '\\' and \ - ((cc=='"' and 1) or (cc=="'" and 2) or \ - (cc==' ' and pc!=' ' and -2))) or flag - elif flag==1: - flag = (cc=='"' and pc!='\\' and nc==' ' and -1) or flag - elif flag==2: - flag = (cc=="'" and pc!='\\' and nc==' ' and -1) or flag - if flag!=-2: - s += cc - if flag<0: - flag = 0 - s = s.strip() - if s: - lst.append(s) - s = '' - pc,cc = cc,nc - else: - s = s.strip() - if s: - lst.append(s) - log.debug('splitcmdline -> %r' % (lst)) - return lst - -def test_splitcmdline(): - l = splitcmdline('a b cc') - assert l==['a','b','cc'], repr(l) - l = splitcmdline('a') - assert l==['a'], repr(l) - l = splitcmdline('a " b cc"') - assert l==['a','" b cc"'], repr(l) - l = splitcmdline('"a bcc" -h') - assert l==['"a bcc"','-h'], repr(l) - l = splitcmdline(r'"\"a \" bcc" -h') - assert l==[r'"\"a \" bcc"','-h'], repr(l) - l = splitcmdline(" 'a bcc' -h") - assert l==["'a bcc'",'-h'], repr(l) - l = splitcmdline(r"'\'a \' bcc' -h") - assert l==[r"'\'a \' bcc'",'-h'], repr(l) - -############################################################ - -def find_executable(exe, path=None, _cache={}): - """Return full path of a executable or None. - - Symbolic links are not followed. - """ - key = exe, path - try: - return _cache[key] - except KeyError: - pass - log.debug('find_executable(%r)' % exe) - orig_exe = exe - - if path is None: - path = os.environ.get('PATH',os.defpath) - if os.name=='posix': - realpath = os.path.realpath - else: - realpath = lambda a:a - - if exe.startswith('"'): - exe = exe[1:-1] - - suffixes = [''] - if os.name in ['nt','dos','os2']: - fn,ext = os.path.splitext(exe) - extra_suffixes = ['.exe','.com','.bat'] - if ext.lower() not in extra_suffixes: - suffixes = extra_suffixes - - if os.path.isabs(exe): - paths = [''] - else: - paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] - - for path in paths: - fn = os.path.join(path, exe) - for s in suffixes: - f_ext = fn+s - if not os.path.islink(f_ext): - f_ext = realpath(f_ext) - if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): - log.good('Found executable %s' % f_ext) - _cache[key] = f_ext - return f_ext - - log.warn('Could not locate executable %s' % orig_exe) - return None - -############################################################ - -def _preserve_environment( names ): - log.debug('_preserve_environment(%r)' % (names)) - env = {} - for name in names: - env[name] = os.environ.get(name) - return env - -def _update_environment( **env ): - log.debug('_update_environment(...)') - for name,value in env.items(): - os.environ[name] = value or '' - -def exec_command( command, - execute_in='', use_shell=None, use_tee = None, - _with_python = 1, - **env ): - """ Return (status,output) of executed command. - - command is a concatenated string of executable and arguments. - The output contains both stdout and stderr messages. - The following special keyword arguments can be used: - use_shell - execute `sh -c command` - use_tee - pipe the output of command through tee - execute_in - before run command `cd execute_in` and after `cd -`. - - On NT, DOS systems the returned status is correct for external commands. - Wild cards will not work for non-posix systems or when use_shell=0. - """ - log.debug('exec_command(%r,%s)' % (command,\ - ','.join(['%s=%r'%kv for kv in env.items()]))) - - if use_tee is None: - use_tee = os.name=='posix' - if use_shell is None: - use_shell = os.name=='posix' - execute_in = os.path.abspath(execute_in) - oldcwd = os.path.abspath(os.getcwd()) - - if __name__[-12:] == 'exec_command': - exec_dir = os.path.dirname(os.path.abspath(__file__)) - elif os.path.isfile('exec_command.py'): - exec_dir = os.path.abspath('.') - else: - exec_dir = os.path.abspath(sys.argv[0]) - if os.path.isfile(exec_dir): - exec_dir = os.path.dirname(exec_dir) - - if oldcwd!=execute_in: - os.chdir(execute_in) - log.debug('New cwd: %s' % execute_in) - else: - log.debug('Retaining cwd: %s' % oldcwd) - - oldenv = _preserve_environment( env.keys() ) - _update_environment( **env ) - - try: - # _exec_command is robust but slow, it relies on - # usable sys.std*.fileno() descriptors. If they - # are bad (like in win32 Idle, PyCrust environments) - # then _exec_command_python (even slower) - # will be used as a last resort. - # - # _exec_command_posix uses os.system and is faster - # but not on all platforms os.system will return - # a correct status. - if _with_python and (0 or sys.__stdout__.fileno()==-1): - st = _exec_command_python(command, - exec_command_dir = exec_dir, - **env) - elif os.name=='posix': - st = _exec_command_posix(command, - use_shell=use_shell, - use_tee=use_tee, - **env) - else: - st = _exec_command(command, use_shell=use_shell, - use_tee=use_tee,**env) - finally: - if oldcwd!=execute_in: - os.chdir(oldcwd) - log.debug('Restored cwd to %s' % oldcwd) - _update_environment(**oldenv) - - return st - -def _exec_command_posix( command, - use_shell = None, - use_tee = None, - **env ): - log.debug('_exec_command_posix(...)') - - if is_sequence(command): - command_str = ' '.join(list(command)) - else: - command_str = command - - tmpfile = temp_file_name() - stsfile = None - if use_tee: - stsfile = temp_file_name() - filter = '' - if use_tee == 2: - filter = r'| tr -cd "\n" | tr "\n" "."; echo' - command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\ - % (command_str,stsfile,tmpfile,filter) - else: - stsfile = temp_file_name() - command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\ - % (command_str,stsfile,tmpfile) - #command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile) - - log.debug('Running os.system(%r)' % (command_posix)) - status = os.system(command_posix) - - if use_tee: - if status: - # if command_tee fails then fall back to robust exec_command - log.warn('_exec_command_posix failed (status=%s)' % status) - return _exec_command(command, use_shell=use_shell, **env) - - if stsfile is not None: - f = open(stsfile,'r') - status_text = f.read() - status = int(status_text) - f.close() - os.remove(stsfile) - - f = open(tmpfile,'r') - text = f.read() - f.close() - os.remove(tmpfile) - - if text[-1:]=='\n': - text = text[:-1] - - return status, text - - -def _exec_command_python(command, - exec_command_dir='', **env): - log.debug('_exec_command_python(...)') - - python_exe = get_pythonexe() - cmdfile = temp_file_name() - stsfile = temp_file_name() - outfile = temp_file_name() - - f = open(cmdfile,'w') - f.write('import os\n') - f.write('import sys\n') - f.write('sys.path.insert(0,%r)\n' % (exec_command_dir)) - f.write('from exec_command import exec_command\n') - f.write('del sys.path[0]\n') - f.write('cmd = %r\n' % command) - f.write('os.environ = %r\n' % (os.environ)) - f.write('s,o = exec_command(cmd, _with_python=0, **%r)\n' % (env)) - f.write('f=open(%r,"w")\nf.write(str(s))\nf.close()\n' % (stsfile)) - f.write('f=open(%r,"w")\nf.write(o)\nf.close()\n' % (outfile)) - f.close() - - cmd = '%s %s' % (python_exe, cmdfile) - status = os.system(cmd) - if status: - raise RuntimeError("%r failed" % (cmd,)) - os.remove(cmdfile) - - f = open(stsfile,'r') - status = int(f.read()) - f.close() - os.remove(stsfile) - - f = open(outfile,'r') - text = f.read() - f.close() - os.remove(outfile) - - return status, text - -def quote_arg(arg): - if arg[0]!='"' and ' ' in arg: - return '"%s"' % arg - return arg - -def _exec_command( command, use_shell=None, use_tee = None, **env ): - log.debug('_exec_command(...)') - - if use_shell is None: - use_shell = os.name=='posix' - if use_tee is None: - use_tee = os.name=='posix' - using_command = 0 - if use_shell: - # We use shell (unless use_shell==0) so that wildcards can be - # used. - sh = os.environ.get('SHELL','/bin/sh') - if is_sequence(command): - argv = [sh,'-c',' '.join(list(command))] - else: - argv = [sh,'-c',command] - else: - # On NT, DOS we avoid using command.com as it's exit status is - # not related to the exit status of a command. - if is_sequence(command): - argv = command[:] - else: - argv = splitcmdline(command) - - if hasattr(os,'spawnvpe'): - spawn_command = os.spawnvpe - else: - spawn_command = os.spawnve - argv[0] = find_executable(argv[0]) or argv[0] - if not os.path.isfile(argv[0]): - log.warn('Executable %s does not exist' % (argv[0])) - if os.name in ['nt','dos']: - # argv[0] might be internal command - argv = [os.environ['COMSPEC'],'/C'] + argv - using_command = 1 - - # sys.__std*__ is used instead of sys.std* because environments - # like IDLE, PyCrust, etc overwrite sys.std* commands. - so_fileno = sys.__stdout__.fileno() - se_fileno = sys.__stderr__.fileno() - so_flush = sys.__stdout__.flush - se_flush = sys.__stderr__.flush - so_dup = os.dup(so_fileno) - se_dup = os.dup(se_fileno) - - outfile = temp_file_name() - fout = open(outfile,'w') - if using_command: - errfile = temp_file_name() - ferr = open(errfile,'w') - - log.debug('Running %s(%s,%r,%r,os.environ)' \ - % (spawn_command.__name__,os.P_WAIT,argv[0],argv)) - - argv0 = argv[0] - if not using_command: - argv[0] = quote_arg(argv0) - - so_flush() - se_flush() - os.dup2(fout.fileno(),so_fileno) - if using_command: - #XXX: disabled for now as it does not work from cmd under win32. - # Tests fail on msys - os.dup2(ferr.fileno(),se_fileno) - else: - os.dup2(fout.fileno(),se_fileno) - try: - status = spawn_command(os.P_WAIT,argv0,argv,os.environ) - except OSError,errmess: - status = 999 - sys.stderr.write('%s: %s'%(errmess,argv[0])) - - so_flush() - se_flush() - os.dup2(so_dup,so_fileno) - os.dup2(se_dup,se_fileno) - - fout.close() - fout = open(outfile,'r') - text = fout.read() - fout.close() - os.remove(outfile) - - if using_command: - ferr.close() - ferr = open(errfile,'r') - errmess = ferr.read() - ferr.close() - os.remove(errfile) - if errmess and not status: - # Not sure how to handle the case where errmess - # contains only warning messages and that should - # not be treated as errors. - #status = 998 - if text: - text = text + '\n' - #text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess) - text = text + errmess - print errmess - if text[-1:]=='\n': - text = text[:-1] - if status is None: - status = 0 - - if use_tee: - print text - - return status, text - - -def test_nt(**kws): - pythonexe = get_pythonexe() - echo = find_executable('echo') - using_cygwin_echo = echo != 'echo' - if using_cygwin_echo: - log.warn('Using cygwin echo in win32 environment is not supported') - - s,o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'AAA\',\'\')"') - assert s==0 and o=='',(s,o) - - s,o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'AAA\')"', - AAA='Tere') - assert s==0 and o=='Tere',(s,o) - - os.environ['BBB'] = 'Hi' - s,o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'BBB\',\'\')"') - assert s==0 and o=='Hi',(s,o) - - s,o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'BBB\',\'\')"', - BBB='Hey') - assert s==0 and o=='Hey',(s,o) - - s,o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'BBB\',\'\')"') - assert s==0 and o=='Hi',(s,o) - elif 0: - s,o=exec_command('echo Hello') - assert s==0 and o=='Hello',(s,o) - - s,o=exec_command('echo a%AAA%') - assert s==0 and o=='a',(s,o) - - s,o=exec_command('echo a%AAA%',AAA='Tere') - assert s==0 and o=='aTere',(s,o) - - os.environ['BBB'] = 'Hi' - s,o=exec_command('echo a%BBB%') - assert s==0 and o=='aHi',(s,o) - - s,o=exec_command('echo a%BBB%',BBB='Hey') - assert s==0 and o=='aHey', (s,o) - s,o=exec_command('echo a%BBB%') - assert s==0 and o=='aHi',(s,o) - - s,o=exec_command('this_is_not_a_command') - assert s and o!='',(s,o) - - s,o=exec_command('type not_existing_file') - assert s and o!='',(s,o) - - s,o=exec_command('echo path=%path%') - assert s==0 and o!='',(s,o) - - s,o=exec_command('%s -c "import sys;sys.stderr.write(sys.platform)"' \ - % pythonexe) - assert s==0 and o=='win32',(s,o) - - s,o=exec_command('%s -c "raise \'Ignore me.\'"' % pythonexe) - assert s==1 and o,(s,o) - - s,o=exec_command('%s -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"'\ - % pythonexe) - assert s==0 and o=='012',(s,o) - - s,o=exec_command('%s -c "import sys;sys.exit(15)"' % pythonexe) - assert s==15 and o=='',(s,o) - - s,o=exec_command('%s -c "print \'Heipa\'"' % pythonexe) - assert s==0 and o=='Heipa',(s,o) - - print 'ok' - -def test_posix(**kws): - s,o=exec_command("echo Hello",**kws) - assert s==0 and o=='Hello',(s,o) - - s,o=exec_command('echo $AAA',**kws) - assert s==0 and o=='',(s,o) - - s,o=exec_command('echo "$AAA"',AAA='Tere',**kws) - assert s==0 and o=='Tere',(s,o) - - - s,o=exec_command('echo "$AAA"',**kws) - assert s==0 and o=='',(s,o) - - os.environ['BBB'] = 'Hi' - s,o=exec_command('echo "$BBB"',**kws) - assert s==0 and o=='Hi',(s,o) - - s,o=exec_command('echo "$BBB"',BBB='Hey',**kws) - assert s==0 and o=='Hey',(s,o) - - s,o=exec_command('echo "$BBB"',**kws) - assert s==0 and o=='Hi',(s,o) - - - s,o=exec_command('this_is_not_a_command',**kws) - assert s!=0 and o!='',(s,o) - - s,o=exec_command('echo path=$PATH',**kws) - assert s==0 and o!='',(s,o) - - s,o=exec_command('python -c "import sys,os;sys.stderr.write(os.name)"',**kws) - assert s==0 and o=='posix',(s,o) - - s,o=exec_command('python -c "raise \'Ignore me.\'"',**kws) - assert s==1 and o,(s,o) - - s,o=exec_command('python -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"',**kws) - assert s==0 and o=='012',(s,o) - - s,o=exec_command('python -c "import sys;sys.exit(15)"',**kws) - assert s==15 and o=='',(s,o) - - s,o=exec_command('python -c "print \'Heipa\'"',**kws) - assert s==0 and o=='Heipa',(s,o) - - print 'ok' - -def test_execute_in(**kws): - pythonexe = get_pythonexe() - tmpfile = temp_file_name() - fn = os.path.basename(tmpfile) - tmpdir = os.path.dirname(tmpfile) - f = open(tmpfile,'w') - f.write('Hello') - f.close() - - s,o = exec_command('%s -c "print \'Ignore the following IOError:\','\ - 'open(%r,\'r\')"' % (pythonexe,fn),**kws) - assert s and o!='',(s,o) - s,o = exec_command('%s -c "print open(%r,\'r\').read()"' % (pythonexe,fn), - execute_in = tmpdir,**kws) - assert s==0 and o=='Hello',(s,o) - os.remove(tmpfile) - print 'ok' - -def test_svn(**kws): - s,o = exec_command(['svn','status'],**kws) - assert s,(s,o) - print 'svn ok' - -def test_cl(**kws): - if os.name=='nt': - s,o = exec_command(['cl','/V'],**kws) - assert s,(s,o) - print 'cl ok' - -if os.name=='posix': - test = test_posix -elif os.name in ['nt','dos']: - test = test_nt -else: - raise NotImplementedError,'exec_command tests for '+os.name - -############################################################ - -if __name__ == "__main__": - - test_splitcmdline() - test(use_tee=0) - test(use_tee=1) - test_execute_in(use_tee=0) - test_execute_in(use_tee=1) - test_svn(use_tee=1) - test_cl(use_tee=1) diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py deleted file mode 100644 index 2db62969e..000000000 --- a/numpy/distutils/extension.py +++ /dev/null @@ -1,74 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts. - -Overridden to support f2py. -""" - -__revision__ = "$Id: extension.py,v 1.1 2005/04/09 19:29:34 pearu Exp $" - -from distutils.extension import Extension as old_Extension - -import re -cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match -fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z',re.I).match - -class Extension(old_Extension): - def __init__ (self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - f2py_options=None, - module_dirs=None, - ): - old_Extension.__init__(self,name, [], - include_dirs, - define_macros, - undef_macros, - library_dirs, - libraries, - runtime_library_dirs, - extra_objects, - extra_compile_args, - extra_link_args, - export_symbols) - # Avoid assert statements checking that sources contains strings: - self.sources = sources - - # Python 2.4 distutils new features - self.swig_opts = swig_opts or [] - - # Python 2.3 distutils new features - self.depends = depends or [] - self.language = language - - # numpy_distutils features - self.f2py_options = f2py_options or [] - self.module_dirs = module_dirs or [] - - return - - def has_cxx_sources(self): - for source in self.sources: - if cxx_ext_re(str(source)): - return True - return False - - def has_f2py_sources(self): - for source in self.sources: - if fortran_pyf_ext_re(source): - return True - return False - -# class Extension diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py deleted file mode 100644 index 039d6fba8..000000000 --- a/numpy/distutils/fcompiler/__init__.py +++ /dev/null @@ -1,952 +0,0 @@ -"""numpy.distutils.fcompiler - -Contains FCompiler, an abstract base class that defines the interface -for the numpy.distutils Fortran compiler abstraction model. - -Terminology: - -To be consistent, where the term 'executable' is used, it means the single -file, like 'gcc', that is executed, and should be a string. In contrast, -'command' means the entire command line, like ['gcc', '-c', 'file.c'], and -should be a list. - -But note that FCompiler.executables is actually a dictionary of commands. -""" - -__all__ = ['FCompiler','new_fcompiler','show_fcompilers', - 'dummy_fortran_file'] - -import os -import sys -import re -import new -try: - set -except NameError: - from sets import Set as set - -from distutils.sysconfig import get_config_var, get_python_lib -from distutils.fancy_getopt import FancyGetopt -from distutils.errors import DistutilsModuleError, \ - DistutilsExecError, CompileError, LinkError, DistutilsPlatformError -from distutils.util import split_quoted, strtobool - -from numpy.distutils.ccompiler import CCompiler, gen_lib_options -from numpy.distutils import log -from numpy.distutils.misc_util import is_string, all_strings, is_sequence, make_temp_file -from numpy.distutils.environment import EnvironmentConfig -from numpy.distutils.exec_command import find_executable - -__metaclass__ = type - -class CompilerNotFound(Exception): - pass - -def flaglist(s): - if is_string(s): - return split_quoted(s) - else: - return s - -def str2bool(s): - if is_string(s): - return strtobool(s) - return bool(s) - -def is_sequence_of_strings(seq): - return is_sequence(seq) and all_strings(seq) - -class FCompiler(CCompiler): - """Abstract base class to define the interface that must be implemented - by real Fortran compiler classes. - - Methods that subclasses may redefine: - - update_executables(), find_executables(), get_version() - get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() - get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), - get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), - get_flags_arch_f90(), get_flags_debug_f90(), - get_flags_fix(), get_flags_linker_so() - - DON'T call these methods (except get_version) after - constructing a compiler instance or inside any other method. - All methods, except update_executables() and find_executables(), - may call the get_version() method. - - After constructing a compiler instance, always call customize(dist=None) - method that finalizes compiler construction and makes the following - attributes available: - compiler_f77 - compiler_f90 - compiler_fix - linker_so - archiver - ranlib - libraries - library_dirs - """ - - # These are the environment variables and distutils keys used. - # Each configuration descripition is - # (, , , ) - # The hook names are handled by the self._environment_hook method. - # - names starting with 'self.' call methods in this class - # - names starting with 'exe.' return the key in the executables dict - # - names like 'flags.YYY' return self.get_flag_YYY() - # convert is either None or a function to convert a string to the - # appropiate type used. - - distutils_vars = EnvironmentConfig( - distutils_section='config_fc', - noopt = (None, None, 'noopt', str2bool), - noarch = (None, None, 'noarch', str2bool), - debug = (None, None, 'debug', str2bool), - verbose = (None, None, 'verbose', str2bool), - ) - - command_vars = EnvironmentConfig( - distutils_section='config_fc', - compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None), - compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None), - compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None), - version_cmd = ('exe.version_cmd', None, None, None), - linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None), - linker_exe = ('exe.linker_exe', 'LD', 'ld', None), - archiver = (None, 'AR', 'ar', None), - ranlib = (None, 'RANLIB', 'ranlib', None), - ) - - flag_vars = EnvironmentConfig( - distutils_section='config_fc', - f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist), - f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist), - free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist), - fix = ('flags.fix', None, None, flaglist), - opt = ('flags.opt', 'FOPT', 'opt', flaglist), - opt_f77 = ('flags.opt_f77', None, None, flaglist), - opt_f90 = ('flags.opt_f90', None, None, flaglist), - arch = ('flags.arch', 'FARCH', 'arch', flaglist), - arch_f77 = ('flags.arch_f77', None, None, flaglist), - arch_f90 = ('flags.arch_f90', None, None, flaglist), - debug = ('flags.debug', 'FDEBUG', 'fdebug', None, flaglist), - debug_f77 = ('flags.debug_f77', None, None, flaglist), - debug_f90 = ('flags.debug_f90', None, None, flaglist), - flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist), - linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist), - linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist), - ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist), - ) - - language_map = {'.f':'f77', - '.for':'f77', - '.F':'f77', # XXX: needs preprocessor - '.ftn':'f77', - '.f77':'f77', - '.f90':'f90', - '.F90':'f90', # XXX: needs preprocessor - '.f95':'f90', - } - language_order = ['f90','f77'] - - - # These will be set by the subclass - - compiler_type = None - compiler_aliases = () - version_pattern = None - - possible_executables = [] - executables = { - 'version_cmd' : ["f77", "-v"], - 'compiler_f77' : ["f77"], - 'compiler_f90' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'linker_so' : ["f90", "-shared"], - 'linker_exe' : ["f90"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None, - } - - # If compiler does not support compiling Fortran 90 then it can - # suggest using another compiler. For example, gnu would suggest - # gnu95 compiler type when there are F90 sources. - suggested_f90_compiler = None - - compile_switch = "-c" - object_switch = "-o " # Ending space matters! It will be stripped - # but if it is missing then object_switch - # will be prefixed to object file name by - # string concatenation. - library_switch = "-o " # Ditto! - - # Switch to specify where module files are created and searched - # for USE statement. Normally it is a string and also here ending - # space matters. See above. - module_dir_switch = None - - # Switch to specify where module files are searched for USE statement. - module_include_switch = '-I' - - pic_flags = [] # Flags to create position-independent code - - src_extensions = ['.for','.ftn','.f77','.f','.f90','.f95','.F','.F90'] - obj_extension = ".o" - shared_lib_extension = get_config_var('SO') # or .dll - static_lib_extension = ".a" # or .lib - static_lib_format = "lib%s%s" # or %s%s - shared_lib_format = "%s%s" - exe_extension = "" - - _exe_cache = {} - - _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', - 'ranlib'] - - # This will be set by new_fcompiler when called in - # command/{build_ext.py, build_clib.py, config.py} files. - c_compiler = None - - def __init__(self, *args, **kw): - CCompiler.__init__(self, *args, **kw) - self.distutils_vars = self.distutils_vars.clone(self._environment_hook) - self.command_vars = self.command_vars.clone(self._environment_hook) - self.flag_vars = self.flag_vars.clone(self._environment_hook) - self.executables = self.executables.copy() - for e in self._executable_keys: - if e not in self.executables: - self.executables[e] = None - - # Some methods depend on .customize() being called first, so - # this keeps track of whether that's happened yet. - self._is_customised = False - - def __copy__(self): - obj = new.instance(self.__class__, self.__dict__) - obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) - obj.command_vars = obj.command_vars.clone(obj._environment_hook) - obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) - obj.executables = obj.executables.copy() - return obj - - def copy(self): - return self.__copy__() - - # Use properties for the attributes used by CCompiler. Setting them - # as attributes from the self.executables dictionary is error-prone, - # so we get them from there each time. - def _command_property(key): - def fget(self): - assert self._is_customised - return self.executables[key] - return property(fget=fget) - version_cmd = _command_property('version_cmd') - compiler_f77 = _command_property('compiler_f77') - compiler_f90 = _command_property('compiler_f90') - compiler_fix = _command_property('compiler_fix') - linker_so = _command_property('linker_so') - linker_exe = _command_property('linker_exe') - archiver = _command_property('archiver') - ranlib = _command_property('ranlib') - - # Make our terminology consistent. - def set_executable(self, key, value): - self.set_command(key, value) - - def set_commands(self, **kw): - for k, v in kw.items(): - self.set_command(k, v) - - def set_command(self, key, value): - if not key in self._executable_keys: - raise ValueError( - "unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - if is_string(value): - value = split_quoted(value) - assert value is None or is_sequence_of_strings(value[1:]), (key, value) - self.executables[key] = value - - ###################################################################### - ## Methods that subclasses may redefine. But don't call these methods! - ## They are private to FCompiler class and may return unexpected - ## results if used elsewhere. So, you have been warned.. - - def find_executables(self): - """Go through the self.executables dictionary, and attempt to - find and assign appropiate executables. - - Executable names are looked for in the environment (environment - variables, the distutils.cfg, and command line), the 0th-element of - the command list, and the self.possible_executables list. - - Also, if the 0th element is "" or "", the Fortran 77 - or the Fortran 90 compiler executable is used, unless overridden - by an environment setting. - - Subclasses should call this if overriden. - """ - assert self._is_customised - exe_cache = self._exe_cache - def cached_find_executable(exe): - if exe in exe_cache: - return exe_cache[exe] - fc_exe = find_executable(exe) - exe_cache[exe] = exe_cache[fc_exe] = fc_exe - return fc_exe - def verify_command_form(name, value): - if value is not None and not is_sequence_of_strings(value): - raise ValueError( - "%s value %r is invalid in class %s" % - (name, value, self.__class__.__name__)) - def set_exe(exe_key, f77=None, f90=None): - cmd = self.executables.get(exe_key, None) - if not cmd: - return None - # Note that we get cmd[0] here if the environment doesn't - # have anything set - exe_from_environ = getattr(self.command_vars, exe_key) - if not exe_from_environ: - possibles = [f90, f77] + self.possible_executables - else: - possibles = [exe_from_environ] + self.possible_executables - - seen = set() - unique_possibles = [] - for e in possibles: - if e == '': - e = f77 - elif e == '': - e = f90 - if not e or e in seen: - continue - seen.add(e) - unique_possibles.append(e) - - for exe in unique_possibles: - fc_exe = cached_find_executable(exe) - if fc_exe: - cmd[0] = fc_exe - return fc_exe - self.set_command(exe_key, None) - return None - - ctype = self.compiler_type - f90 = set_exe('compiler_f90') - if not f90: - f77 = set_exe('compiler_f77') - if f77: - log.warn('%s: no Fortran 90 compiler found' % ctype) - else: - raise CompilerNotFound('%s: f90 nor f77' % ctype) - else: - f77 = set_exe('compiler_f77', f90=f90) - if not f77: - log.warn('%s: no Fortran 77 compiler found' % ctype) - set_exe('compiler_fix', f90=f90) - - set_exe('linker_so', f77=f77, f90=f90) - set_exe('linker_exe', f77=f77, f90=f90) - set_exe('version_cmd', f77=f77, f90=f90) - set_exe('archiver') - set_exe('ranlib') - - def update_executables(elf): - """Called at the beginning of customisation. Subclasses should - override this if they need to set up the executables dictionary. - - Note that self.find_executables() is run afterwards, so the - self.executables dictionary values can contain or as - the command, which will be replaced by the found F77 or F90 - compiler. - """ - pass - - def get_flags(self): - """List of flags common to all compiler types.""" - return [] + self.pic_flags - - def _get_command_flags(self, key): - cmd = self.executables.get(key, None) - if cmd is None: - return [] - return cmd[1:] - - def get_flags_f77(self): - """List of Fortran 77 specific flags.""" - return self._get_command_flags('compiler_f77') - def get_flags_f90(self): - """List of Fortran 90 specific flags.""" - return self._get_command_flags('compiler_f90') - def get_flags_free(self): - """List of Fortran 90 free format specific flags.""" - return [] - def get_flags_fix(self): - """List of Fortran 90 fixed format specific flags.""" - return self._get_command_flags('compiler_fix') - def get_flags_linker_so(self): - """List of linker flags to build a shared library.""" - return self._get_command_flags('linker_so') - def get_flags_linker_exe(self): - """List of linker flags to build an executable.""" - return self._get_command_flags('linker_exe') - def get_flags_ar(self): - """List of archiver flags. """ - return self._get_command_flags('archiver') - def get_flags_opt(self): - """List of architecture independent compiler flags.""" - return [] - def get_flags_arch(self): - """List of architecture dependent compiler flags.""" - return [] - def get_flags_debug(self): - """List of compiler flags to compile with debugging information.""" - return [] - - get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt - get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch - get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug - - def get_libraries(self): - """List of compiler libraries.""" - return self.libraries[:] - def get_library_dirs(self): - """List of compiler library directories.""" - return self.library_dirs[:] - - def get_version(self, force=False, ok_status=[0]): - assert self._is_customised - return CCompiler.get_version(self, force=force, ok_status=ok_status) - - ############################################################ - - ## Public methods: - - def customize(self, dist = None): - """Customize Fortran compiler. - - This method gets Fortran compiler specific information from - (i) class definition, (ii) environment, (iii) distutils config - files, and (iv) command line (later overrides earlier). - - This method should be always called after constructing a - compiler instance. But not in __init__ because Distribution - instance is needed for (iii) and (iv). - """ - log.info('customize %s' % (self.__class__.__name__)) - - self._is_customised = True - - self.distutils_vars.use_distribution(dist) - self.command_vars.use_distribution(dist) - self.flag_vars.use_distribution(dist) - - self.update_executables() - - # find_executables takes care of setting the compiler commands, - # version_cmd, linker_so, linker_exe, ar, and ranlib - self.find_executables() - - noopt = self.distutils_vars.get('noopt', False) - noarch = self.distutils_vars.get('noarch', noopt) - debug = self.distutils_vars.get('debug', False) - - f77 = self.command_vars.compiler_f77 - f90 = self.command_vars.compiler_f90 - - f77flags = [] - f90flags = [] - freeflags = [] - fixflags = [] - - if f77: - f77flags = self.flag_vars.f77 - if f90: - f90flags = self.flag_vars.f90 - freeflags = self.flag_vars.free - # XXX Assuming that free format is default for f90 compiler. - fix = self.command_vars.compiler_fix - if fix: - fixflags = self.flag_vars.fix + f90flags - - oflags, aflags, dflags = [], [], [] - # examine get_flags__ for extra flags - # only add them if the method is different from get_flags_ - def get_flags(tag, flags): - # note that self.flag_vars. calls self.get_flags_() - flags.extend(getattr(self.flag_vars, tag)) - this_get = getattr(self, 'get_flags_' + tag) - for name, c, flagvar in [('f77', f77, f77flags), - ('f90', f90, f90flags), - ('f90', fix, fixflags)]: - t = '%s_%s' % (tag, name) - if c and this_get is not getattr(self, 'get_flags_' + t): - flagvar.extend(getattr(self.flag_vars, t)) - if not noopt: - get_flags('opt', oflags) - if not noarch: - get_flags('arch', aflags) - if debug: - get_flags('debug', dflags) - - fflags = self.flag_vars.flags + dflags + oflags + aflags - - if f77: - self.set_commands(compiler_f77=[f77]+f77flags+fflags) - if f90: - self.set_commands(compiler_f90=[f90]+freeflags+f90flags+fflags) - if fix: - self.set_commands(compiler_fix=[fix]+fixflags+fflags) - - - #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS - linker_so = self.linker_so - if linker_so: - linker_so_flags = self.flag_vars.linker_so - if sys.platform.startswith('aix'): - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - self.set_commands(linker_so=linker_so+linker_so_flags) - - linker_exe = self.linker_exe - if linker_exe: - linker_exe_flags = self.flag_vars.linker_exe - self.set_commands(linker_exe=linker_exe+linker_exe_flags) - - ar = self.command_vars.archiver - if ar: - arflags = self.flag_vars.ar - self.set_commands(archiver=[ar]+arflags) - - self.set_library_dirs(self.get_library_dirs()) - self.set_libraries(self.get_libraries()) - - def dump_properties(self): - """Print out the attributes of a compiler instance.""" - props = [] - for key in self.executables.keys() + \ - ['version','libraries','library_dirs', - 'object_switch','compile_switch']: - if hasattr(self,key): - v = getattr(self,key) - props.append((key, None, '= '+repr(v))) - props.sort() - - pretty_printer = FancyGetopt(props) - for l in pretty_printer.generate_help("%s instance properties:" \ - % (self.__class__.__name__)): - if l[:4]==' --': - l = ' ' + l[4:] - print l - - ################### - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - src_flags = {} - if is_f_file(src) and not has_f90_header(src): - flavor = ':f77' - compiler = self.compiler_f77 - src_flags = get_f77flags(src) - elif is_free_format(src): - flavor = ':f90' - compiler = self.compiler_f90 - if compiler is None: - raise DistutilsExecError, 'f90 not supported by %s needed for %s'\ - % (self.__class__.__name__,src) - else: - flavor = ':fix' - compiler = self.compiler_fix - if compiler is None: - raise DistutilsExecError, 'f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__,src) - if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(),obj] - else: - o_args = [self.object_switch.strip()+obj] - - assert self.compile_switch.strip() - s_args = [self.compile_switch, src] - - extra_flags = src_flags.get(self.compiler_type,[]) - if extra_flags: - log.info('using compile options from source: %r' \ - % ' '.join(extra_flags)) - - command = compiler + cc_args + extra_flags + s_args + o_args \ - + extra_postargs - - display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, - src) - try: - self.spawn(command,display=display) - except DistutilsExecError, msg: - raise CompileError, msg - - def module_options(self, module_dirs, module_build_dir): - options = [] - if self.module_dir_switch is not None: - if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(),module_build_dir]) - else: - options.append(self.module_dir_switch.strip()+module_build_dir) - else: - print 'XXX: module_build_dir=%r option ignored' % (module_build_dir) - print 'XXX: Fix module_dir_switch for ',self.__class__.__name__ - if self.module_include_switch is not None: - for d in [module_build_dir]+module_dirs: - options.append('%s%s' % (self.module_include_switch, d)) - else: - print 'XXX: module_dirs=%r option ignored' % (module_dirs) - print 'XXX: Fix module_include_switch for ',self.__class__.__name__ - return options - - def library_option(self, lib): - return "-l" + lib - def library_dir_option(self, dir): - return "-L" + dir - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = \ - self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if is_string(output_dir): - output_filename = os.path.join(output_dir, output_filename) - elif output_dir is not None: - raise TypeError, "'output_dir' must be a string or None" - - if self._need_link(objects, output_filename): - if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(),output_filename] - else: - o_args = [self.library_switch.strip()+output_filename] - - if is_string(self.objects): - ld_args = objects + [self.objects] - else: - ld_args = objects + self.objects - ld_args = ld_args + lib_opts + o_args - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - command = linker + ld_args - try: - self.spawn(command) - except DistutilsExecError, msg: - raise LinkError, msg - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def _environment_hook(self, name, hook_name): - if hook_name is None: - return None - if is_string(hook_name): - if hook_name.startswith('self.'): - hook_name = hook_name[5:] - hook = getattr(self, hook_name) - return hook() - elif hook_name.startswith('exe.'): - hook_name = hook_name[4:] - var = self.executables[hook_name] - if var: - return var[0] - else: - return None - elif hook_name.startswith('flags.'): - hook_name = hook_name[6:] - hook = getattr(self, 'get_flags_' + hook_name) - return hook() - else: - return hook_name() - - ## class FCompiler - -_default_compilers = ( - # sys.platform mappings - ('win32', ('gnu','intelv','absoft','compaqv','intelev','gnu95','g95')), - ('cygwin.*', ('gnu','intelv','absoft','compaqv','intelev','gnu95','g95')), - ('linux.*', ('gnu','intel','lahey','pg','absoft','nag','vast','compaq', - 'intele','intelem','gnu95','g95')), - ('darwin.*', ('nag', 'absoft', 'ibm', 'intel', 'gnu', 'gnu95', 'g95')), - ('sunos.*', ('sun','gnu','gnu95','g95')), - ('irix.*', ('mips','gnu','gnu95',)), - ('aix.*', ('ibm','gnu','gnu95',)), - # os.name mappings - ('posix', ('gnu','gnu95',)), - ('nt', ('gnu','gnu95',)), - ('mac', ('gnu','gnu95',)), - ) - -fcompiler_class = None -fcompiler_aliases = None - -def load_all_fcompiler_classes(): - """Cache all the FCompiler classes found in modules in the - numpy.distutils.fcompiler package. - """ - from glob import glob - global fcompiler_class, fcompiler_aliases - if fcompiler_class is not None: - return - pys = os.path.join(os.path.dirname(__file__), '*.py') - fcompiler_class = {} - fcompiler_aliases = {} - for fname in glob(pys): - module_name, ext = os.path.splitext(os.path.basename(fname)) - module_name = 'numpy.distutils.fcompiler.' + module_name - __import__ (module_name) - module = sys.modules[module_name] - if hasattr(module, 'compilers'): - for cname in module.compilers: - klass = getattr(module, cname) - desc = (klass.compiler_type, klass, klass.description) - fcompiler_class[klass.compiler_type] = desc - for alias in klass.compiler_aliases: - if alias in fcompiler_aliases: - raise ValueError("alias %r defined for both %s and %s" - % (alias, klass.__name__, - fcompiler_aliases[alias][1].__name__)) - fcompiler_aliases[alias] = desc - -def _find_existing_fcompiler(compiler_types, - osname=None, platform=None, - requiref90=False, - c_compiler=None): - from numpy.distutils.core import get_distribution - dist = get_distribution(always=True) - for compiler_type in compiler_types: - v = None - try: - c = new_fcompiler(plat=platform, compiler=compiler_type, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if requiref90 and c.compiler_f90 is None: - v = None - new_compiler = c.suggested_f90_compiler - if new_compiler: - log.warn('Trying %r compiler as suggested by %r ' - 'compiler for f90 support.' % (compiler_type, - new_compiler)) - c = new_fcompiler(plat=platform, compiler=new_compiler, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if v is not None: - compiler_type = new_compiler - if requiref90 and c.compiler_f90 is None: - raise ValueError('%s does not support compiling f90 codes, ' - 'skipping.' % (c.__class__.__name__)) - except DistutilsModuleError: - log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) - except CompilerNotFound: - log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) - if v is not None: - return compiler_type - return None - -def available_fcompilers_for_platform(osname=None, platform=None): - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - matching_compiler_types = [] - for pattern, compiler_type in _default_compilers: - if re.match(pattern, platform) or re.match(pattern, osname): - for ct in compiler_type: - if ct not in matching_compiler_types: - matching_compiler_types.append(ct) - if not matching_compiler_types: - matching_compiler_types.append('gnu') - return matching_compiler_types - -def get_default_fcompiler(osname=None, platform=None, requiref90=False, - c_compiler=None): - """Determine the default Fortran compiler to use for the given - platform.""" - matching_compiler_types = available_fcompilers_for_platform(osname, - platform) - compiler_type = _find_existing_fcompiler(matching_compiler_types, - osname=osname, - platform=platform, - requiref90=requiref90, - c_compiler=c_compiler) - return compiler_type - -def new_fcompiler(plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0, - requiref90=False, - c_compiler = None): - """Generate an instance of some FCompiler subclass for the supplied - platform/compiler combination. - """ - load_all_fcompiler_classes() - if plat is None: - plat = os.name - if compiler is None: - compiler = get_default_fcompiler(plat, requiref90=requiref90, - c_compiler=c_compiler) - if compiler in fcompiler_class: - module_name, klass, long_description = fcompiler_class[compiler] - elif compiler in fcompiler_aliases: - module_name, klass, long_description = fcompiler_aliases[compiler] - else: - msg = "don't know how to compile Fortran code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler." % compiler - msg = msg + " Supported compilers are: %s)" \ - % (','.join(fcompiler_class.keys())) - log.warn(msg) - return None - - compiler = klass(verbose=verbose, dry_run=dry_run, force=force) - compiler.c_compiler = c_compiler - return compiler - -def show_fcompilers(dist=None): - """Print list of available compilers (used by the "--help-fcompiler" - option to "config_fc"). - """ - if dist is None: - from distutils.dist import Distribution - from numpy.distutils.command.config_compiler import config_fc - dist = Distribution() - dist.script_name = os.path.basename(sys.argv[0]) - dist.script_args = ['config_fc'] + sys.argv[1:] - try: - dist.script_args.remove('--help-fcompiler') - except ValueError: - pass - dist.cmdclass['config_fc'] = config_fc - dist.parse_config_files() - dist.parse_command_line() - compilers = [] - compilers_na = [] - compilers_ni = [] - if not fcompiler_class: - load_all_fcompiler_classes() - platform_compilers = available_fcompilers_for_platform() - for compiler in platform_compilers: - v = None - log.set_verbosity(-2) - try: - c = new_fcompiler(compiler=compiler, verbose=dist.verbose) - c.customize(dist) - v = c.get_version() - except (DistutilsModuleError, CompilerNotFound), e: - log.debug("show_fcompilers: %s not found" % (compiler,)) - log.debug(repr(e)) - - if v is None: - compilers_na.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2])) - else: - c.dump_properties() - compilers.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2] + ' (%s)' % v)) - - compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) - compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) - for fc in compilers_ni] - - compilers.sort() - compilers_na.sort() - compilers_ni.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("Fortran compilers found:") - pretty_printer = FancyGetopt(compilers_na) - pretty_printer.print_help("Compilers available for this " - "platform, but not found:") - if compilers_ni: - pretty_printer = FancyGetopt(compilers_ni) - pretty_printer.print_help("Compilers not available on this platform:") - print "For compiler details, run 'config_fc --verbose' setup command." - - -def dummy_fortran_file(): - fo, name = make_temp_file(suffix='.f') - fo.write(" subroutine dummy()\n end\n") - fo.close() - return name[:-2] - - -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]',re.I).match - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - f = open(file,'r') - line = f.readline() - n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - line = line.rstrip() - if line and line[0]!='!': - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': - result = 1 - break - line = f.readline() - f.close() - return result - -def has_f90_header(src): - f = open(src,'r') - line = f.readline() - f.close() - return _has_f90_header(line) or _has_fix_header(line) - -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)',re.I) -def get_f77flags(src): - """ - Search the first 20 lines of fortran 77 code for line pattern - `CF77FLAGS()=` - Return a dictionary {:}. - """ - flags = {} - f = open(src,'r') - i = 0 - for line in f.readlines(): - i += 1 - if i>20: break - m = _f77flags_re.match(line) - if not m: continue - fcname = m.group('fcname').strip() - fflags = m.group('fflags').strip() - flags[fcname] = split_quoted(fflags) - f.close() - return flags - -if __name__ == '__main__': - show_fcompilers() diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py deleted file mode 100644 index 96fc49e80..000000000 --- a/numpy/distutils/fcompiler/absoft.py +++ /dev/null @@ -1,157 +0,0 @@ - -# http://www.absoft.com/literature/osxuserguide.pdf -# http://www.absoft.com/documentation.html - -# Notes: -# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py -# generated extension modules (works for f2py v2.45.241_1936 and up) - -import os - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from numpy.distutils.misc_util import cyg2win32 - -compilers = ['AbsoftFCompiler'] - -class AbsoftFCompiler(FCompiler): - - compiler_type = 'absoft' - description = 'Absoft Corp Fortran Compiler' - #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' - version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ - r' (?P[^\s*,]*)(.*?Absoft Corp|)' - - # on windows: f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 - - # samt5735(8)$ f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 - # Note that fink installs g77 as f77, so need to use f90 for detection. - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : ["f77"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - if os.name=='nt': - library_switch = '/out:' #No space after /out:! - - module_dir_switch = None - module_include_switch = '-p' - - def update_executables(self): - f = cyg2win32(dummy_fortran_file()) - self.executables['version_cmd'] = ['', '-V', '-c', - f+'.f', '-o', f+'.o'] - - def get_flags_linker_so(self): - if os.name=='nt': - opt = ['/dll'] - # The "-K shared" switches are being left in for pre-9.0 versions - # of Absoft though I don't think versions earlier than 9 can - # actually be used to build shared libraries. In fact, version - # 8 of Absoft doesn't recognize "-K shared" and will fail. - elif self.get_version() >= '9.0': - opt = ['-shared'] - else: - opt = ["-K","shared"] - return opt - - def library_dir_option(self, dir): - if os.name=='nt': - return ['-link','/PATH:"%s"' % (dir)] - return "-L" + dir - - def library_option(self, lib): - if os.name=='nt': - return '%s.lib' % (lib) - return "-l" + lib - - def get_library_dirs(self): - opt = FCompiler.get_library_dirs(self) - d = os.environ.get('ABSOFT') - if d: - if self.get_version() >= '10.0': - # use shared libraries, the static libraries were not compiled -fPIC - prefix = 'sh' - else: - prefix = '' - if cpu.is_64bit(): - suffix = '64' - else: - suffix = '' - opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) - return opt - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - if self.get_version() >= '10.0': - opt.extend(['af90math', 'afio', 'af77math', 'U77']) - elif self.get_version() >= '8.0': - opt.extend(['f90math','fio','f77math','U77']) - else: - opt.extend(['fio','f90math','fmath','U77']) - if os.name =='nt': - opt.append('COMDLG32') - return opt - - def get_flags(self): - opt = FCompiler.get_flags(self) - if os.name != 'nt': - opt.extend(['-s']) - if self.get_version(): - if self.get_version()>='8.2': - opt.append('-fpic') - return opt - - def get_flags_f77(self): - opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22','-N90','-N110']) - v = self.get_version() - if os.name == 'nt': - if v and v>='8.0': - opt.extend(['-f','-N15']) - else: - opt.append('-f') - if v: - if v<='4.6': - opt.append('-B108') - else: - # Though -N15 is undocumented, it works with - # Absoft 8.0 on Linux - opt.append('-N15') - return opt - - def get_flags_f90(self): - opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1","-YCOM_NAMES=LCS","-YCOM_PFX","-YEXT_PFX", - "-YCOM_SFX=_","-YEXT_SFX=_","-YEXT_NAMES=LCS"]) - if self.get_version(): - if self.get_version()>'4.6': - opt.extend(["-YDEALLOC=ALL"]) - return opt - - def get_flags_fix(self): - opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1","-YCOM_NAMES=LCS","-YCOM_PFX","-YEXT_PFX", - "-YCOM_SFX=_","-YEXT_SFX=_","-YEXT_NAMES=LCS"]) - opt.extend(["-f","fixed"]) - return opt - - def get_flags_opt(self): - opt = ['-O'] - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='absoft') - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py deleted file mode 100644 index 7b60bfdae..000000000 --- a/numpy/distutils/fcompiler/compaq.py +++ /dev/null @@ -1,115 +0,0 @@ - -#http://www.compaq.com/fortran/docs/ - -import os -import sys - -from numpy.distutils.fcompiler import FCompiler -from distutils.errors import DistutilsPlatformError - -compilers = ['CompaqFCompiler'] -if os.name != 'posix': - # Otherwise we'd get a false positive on posix systems with - # case-insensitive filesystems (like darwin), because we'll pick - # up /bin/df - compilers.append('CompaqVisualFCompiler') - -class CompaqFCompiler(FCompiler): - - compiler_type = 'compaq' - description = 'Compaq Fortran Compiler' - version_pattern = r'Compaq Fortran (?P[^\s]*).*' - - if sys.platform[:5]=='linux': - fc_exe = 'fort' - else: - fc_exe = 'f90' - - executables = { - 'version_cmd' : ['', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl","-fixed"], - 'compiler_fix' : [fc_exe, "-fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = '-module ' # not tested - module_include_switch = '-I' - - def get_flags(self): - return ['-assume no2underscore','-nomixed_str_len_arg'] - def get_flags_debug(self): - return ['-g','-check bounds'] - def get_flags_opt(self): - return ['-O4','-align dcommons','-assume bigarrays', - '-assume nozsize','-math_library fast'] - def get_flags_arch(self): - return ['-arch host', '-tune host'] - def get_flags_linker_so(self): - if sys.platform[:5]=='linux': - return ['-shared'] - return ['-shared','-Wl,-expect_unresolved,*'] - -class CompaqVisualFCompiler(FCompiler): - - compiler_type = 'compaqv' - description = 'DIGITAL or Compaq Visual Fortran Compiler' - version_pattern = r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'\ - ' Version (?P[^\s]*).*' - - compile_switch = '/compile_only' - object_switch = '/object:' - library_switch = '/OUT:' #No space after /OUT:! - - static_lib_extension = ".lib" - static_lib_format = "%s%s" - module_dir_switch = '/module:' - module_include_switch = '/I' - - ar_exe = 'lib.exe' - fc_exe = 'DF' - - if sys.platform=='win32': - from distutils.msvccompiler import MSVCCompiler - - try: - m = MSVCCompiler() - m.initialize() - ar_exe = m.lib - except DistutilsPlatformError, msg: - print 'Ignoring "%s" (one should fix me in fcompiler/compaq.py)' % (msg) - except AttributeError, msg: - if '_MSVCCompiler__root' in str(msg): - print 'Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg) - else: - raise - - executables = { - 'version_cmd' : ['', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl","/fixed"], - 'compiler_fix' : [fc_exe, "/fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : [ar_exe, "/OUT:"], - 'ranlib' : None - } - - def get_flags(self): - return ['/nologo','/MD','/WX','/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase','/assume:underscore'] - def get_flags_opt(self): - return ['/Ox','/fast','/optimize:5','/unroll:0','/math_library:fast'] - def get_flags_arch(self): - return ['/threads'] - def get_flags_debug(self): - return ['/debug'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='compaq') - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/g95.py b/numpy/distutils/fcompiler/g95.py deleted file mode 100644 index 6a3545582..000000000 --- a/numpy/distutils/fcompiler/g95.py +++ /dev/null @@ -1,44 +0,0 @@ -# http://g95.sourceforge.net/ - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['G95FCompiler'] - -class G95FCompiler(FCompiler): - compiler_type = 'g95' - description = 'G95 Fortran Compiler' - -# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95!) May 22 2006) - - version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["g95", "-ffixed-form"], - 'compiler_fix' : ["g95", "-ffixed-form"], - 'compiler_f90' : ["g95"], - 'linker_so' : ["","-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-fmod=' - module_include_switch = '-I' - - def get_flags(self): - return ['-fno-second-underscore'] - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - compiler = G95FCompiler() - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py deleted file mode 100644 index b281cb5c1..000000000 --- a/numpy/distutils/fcompiler/gnu.py +++ /dev/null @@ -1,387 +0,0 @@ -import re -import os -import sys -import warnings - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import exec_command -from numpy.distutils.misc_util import msvc_runtime_library - -compilers = ['GnuFCompiler', 'Gnu95FCompiler'] - -class GnuFCompiler(FCompiler): - compiler_type = 'gnu' - compiler_aliases = ('g77',) - description = 'GNU Fortran 77 compiler' - - def gnu_version_match(self, version_string): - """Handle the different versions of GNU fortran compilers""" - m = re.match(r'GNU Fortran', version_string) - if not m: - return None - m = re.match(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) - if m: - return ('gfortran', m.group(1)) - m = re.match(r'GNU Fortran.*?([0-9-.]+)', version_string) - if m: - v = m.group(1) - if v.startswith('0') or v.startswith('2') or v.startswith('3'): - # the '0' is for early g77's - return ('g77', v) - else: - # at some point in the 4.x series, the ' 95' was dropped - # from the version string - return ('gfortran', v) - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'g77': - return None - return v[1] - - # 'g77 --version' results - # SunOS: GNU Fortran (GCC 3.2) 3.2 20020814 (release) - # Debian: GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian) - # GNU Fortran (GCC) 3.3.3 (Debian 20040401) - # GNU Fortran 0.5.25 20010319 (prerelease) - # Redhat: GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2 20030222 (Red Hat Linux 3.2.2-5) - # GNU Fortran (GCC) 3.4.2 (mingw-special) - - possible_executables = ['g77', 'f77'] - executables = { - 'version_cmd' : [None, "--version"], - 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], - 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes - 'compiler_fix' : None, - 'linker_so' : [None, "-g", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-g", "-Wall"] - } - module_dir_switch = None - module_include_switch = None - - # Cygwin: f771: warning: -fPIC ignored for target (all code is - # position independent) - if os.name != 'nt' and sys.platform != 'cygwin': - pic_flags = ['-fPIC'] - - # use -mno-cygwin for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - g2c = 'g2c' - - suggested_f90_compiler = 'gnu95' - - #def get_linker_so(self): - # # win32 linking should be handled by standard linker - # # Darwin g77 cannot be used as a linker. - # #if re.match(r'(darwin)', sys.platform): - # # return - # return FCompiler.get_linker_so(self) - - def get_flags_linker_so(self): - opt = self.linker_so[1:] - if sys.platform=='darwin': - # MACOSX_DEPLOYMENT_TARGET must be at least 10.3. This is - # a reasonable default value even when building on 10.4 when using - # the official Python distribution and those derived from it (when - # not broken). - target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - if target is None or target == '': - target = '10.3' - major, minor = target.split('.') - if int(minor) < 3: - minor = '3' - warnings.warn('Environment variable ' - 'MACOSX_DEPLOYMENT_TARGET reset to %s.%s' % (major, minor)) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = '%s.%s' % (major, - minor) - - opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) - else: - opt.append("-shared") - if sys.platform.startswith('sunos'): - # SunOS often has dynamically loaded symbols defined in the - # static library libg2c.a The linker doesn't like this. To - # ignore the problem, use the -mimpure-text flag. It isn't - # the safest thing, but seems to work. 'man gcc' says: - # ".. Instead of using -mimpure-text, you should compile all - # source code with -fpic or -fPIC." - opt.append('-mimpure-text') - return opt - - def get_libgcc_dir(self): - status, output = exec_command(self.compiler_f77 + - ['-print-libgcc-file-name'], - use_tee=0) - if not status: - return os.path.dirname(output) - return None - - def get_library_dirs(self): - opt = [] - if sys.platform[:5] != 'linux': - d = self.get_libgcc_dir() - if d: - # if windows and not cygwin, libg2c lies in a different folder - if sys.platform == 'win32' and not d.startswith('/usr/lib'): - d = os.path.normpath(d) - if not os.path.exists(os.path.join(d, 'libg2c.a')): - d2 = os.path.abspath(os.path.join(d, - '../../../../lib')) - if os.path.exists(os.path.join(d2, 'libg2c.a')): - opt.append(d2) - opt.append(d) - return opt - - def get_libraries(self): - opt = [] - d = self.get_libgcc_dir() - if d is not None: - g2c = self.g2c + '-pic' - f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d,f)): - g2c = self.g2c - else: - g2c = self.g2c - - if g2c is not None: - opt.append(g2c) - c_compiler = self.c_compiler - if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type=='msvc': - # the following code is not needed (read: breaks) when using MinGW - # in case want to link F77 compiled code with MSVC - opt.append('gcc') - runtime_lib = msvc_runtime_library() - if runtime_lib: - opt.append(runtime_lib) - if sys.platform == 'darwin': - opt.append('cc_dynamic') - return opt - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - if self.get_version()<='3.3.3': - # With this compiler version building Fortran BLAS/LAPACK - # with -O3 caused failures in lib.lapack heevr,syevr tests. - opt = ['-O2'] - else: - opt = ['-O3'] - opt.append('-funroll-loops') - return opt - - def get_flags_arch(self): - opt = [] - if sys.platform == 'darwin': - # Since Apple doesn't distribute a GNU Fortran compiler, we - # can't add -arch ppc or -arch i386, as only their version - # of the GNU compilers accepts those. - for a in '601 602 603 603e 604 604e 620 630 740 7400 7450 750'\ - '403 505 801 821 823 860'.split(): - if getattr(cpu,'is_ppc%s'%a)(): - opt.append('-mcpu='+a) - opt.append('-mtune='+a) - break - return opt - - # default march options in case we find nothing better - if cpu.is_i686(): - march_opt = '-march=i686' - elif cpu.is_i586(): - march_opt = '-march=i586' - elif cpu.is_i486(): - march_opt = '-march=i486' - elif cpu.is_i386(): - march_opt = '-march=i386' - else: - march_opt = '' - - gnu_ver = self.get_version() - - if gnu_ver >= '0.5.26': # gcc 3.0 - if cpu.is_AthlonK6(): - march_opt = '-march=k6' - elif cpu.is_AthlonK7(): - march_opt = '-march=athlon' - - if gnu_ver >= '3.1.1': - if cpu.is_AthlonK6_2(): - march_opt = '-march=k6-2' - elif cpu.is_AthlonK6_3(): - march_opt = '-march=k6-3' - elif cpu.is_AthlonMP(): - march_opt = '-march=athlon-mp' - # there's also: athlon-tbird, athlon-4, athlon-xp - elif cpu.is_Nocona(): - march_opt = '-march=nocona' - elif cpu.is_Core2(): - march_opt = '-march=nocona' - elif cpu.is_Xeon() and cpu.is_64bit(): - march_opt = '-march=nocona' - elif cpu.is_Prescott(): - march_opt = '-march=prescott' - elif cpu.is_PentiumIV(): - march_opt = '-march=pentium4' - elif cpu.is_PentiumIII(): - march_opt = '-march=pentium3' - elif cpu.is_PentiumM(): - march_opt = '-march=pentium3' - elif cpu.is_PentiumII(): - march_opt = '-march=pentium2' - - if gnu_ver >= '3.4': - # Actually, I think these all do the same things - if cpu.is_Opteron(): - march_opt = '-march=opteron' - elif cpu.is_Athlon64(): - march_opt = '-march=athlon64' - elif cpu.is_AMD64(): - march_opt = '-march=k8' - - if gnu_ver >= '3.4.4': - if cpu.is_PentiumM(): - march_opt = '-march=pentium-m' - # Future: - # if gnu_ver >= '4.3': - # if cpu.is_Core2(): - # march_opt = '-march=core2' - - # Note: gcc 3.2 on win32 has breakage with -march specified - if '3.1.1' <= gnu_ver <= '3.4' and sys.platform=='win32': - march_opt = '' - - if march_opt: - opt.append(march_opt) - - # other CPU flags - if gnu_ver >= '3.1.1': - if cpu.has_mmx(): opt.append('-mmmx') - if cpu.has_3dnow(): opt.append('-m3dnow') - - if gnu_ver > '3.2.2': - if cpu.has_sse2(): opt.append('-msse2') - if cpu.has_sse(): opt.append('-msse') - if gnu_ver >= '3.4': - if cpu.has_sse3(): opt.append('-msse3') - if cpu.is_Intel(): - opt.append('-fomit-frame-pointer') - if cpu.is_32bit(): - opt.append('-malign-double') - return opt - -class Gnu95FCompiler(GnuFCompiler): - compiler_type = 'gnu95' - compiler_aliases = ('gfortran',) - description = 'GNU Fortran 95 compiler' - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'gfortran': - return None - return v[1] - - # 'gfortran --version' results: - # XXX is the below right? - # Debian: GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3)) - # GNU Fortran 95 (GCC) 4.1.2 20061115 (prerelease) (Debian 4.1.1-21) - # OS X: GNU Fortran 95 (GCC) 4.1.0 - # GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental) - # GNU Fortran (GCC) 4.3.0 20070316 (experimental) - - possible_executables = ['gfortran', 'f95'] - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : [None, "-Wall", "-ffixed-form", - "-fno-second-underscore"], - 'compiler_f90' : [None, "-Wall", "-fno-second-underscore"], - 'compiler_fix' : [None, "-Wall", "-ffixed-form", - "-fno-second-underscore"], - 'linker_so' : ["", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-Wall"] - } - - # use -mno-cygwin flag for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - module_dir_switch = '-J' - module_include_switch = '-I' - - g2c = 'gfortran' - - # Note that this is here instead of GnuFCompiler as gcc < 4 uses a - # different output format (which isn't as useful) than gcc >= 4, - # and we don't have to worry about g77 being universal (as it can't be). - def target_architecture(self, extra_opts=()): - """Return the architecture that the compiler will build for. - This is most useful for detecting universal compilers in OS X.""" - extra_opts = list(extra_opts) - status, output = exec_command(self.compiler_f90 + ['-v'] + extra_opts, - use_tee=False) - if status == 0: - m = re.match(r'(?m)^Target: (.*)$', output) - if m: - return m.group(1) - return None - - def is_universal_compiler(self): - """Return True if this compiler can compile universal binaries - (for OS X). - - Currently only checks for i686 and powerpc architectures (no 64-bit - support yet). - """ - if sys.platform != 'darwin': - return False - i686_arch = self.target_architecture(extra_opts=['-arch', 'i686']) - if not i686_arch or not i686_arch.startswith('i686-'): - return False - ppc_arch = self.target_architecture(extra_opts=['-arch', 'ppc']) - if not ppc_arch or not ppc_arch.startswith('powerpc-'): - return False - return True - - def _add_arches_for_universal_build(self, flags): - if self.is_universal_compiler(): - flags[:0] = ['-arch', 'i686', '-arch', 'ppc'] - return flags - - def get_flags(self): - flags = GnuFCompiler.get_flags(self) - return self._add_arches_for_universal_build(flags) - - def get_flags_linker_so(self): - flags = GnuFCompiler.get_flags_linker_so(self) - return self._add_arches_for_universal_build(flags) - - def get_libraries(self): - opt = GnuFCompiler.get_libraries(self) - if sys.platform == 'darwin': - opt.remove('cc_dynamic') - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - compiler = GnuFCompiler() - compiler.customize() - print compiler.get_version() - raw_input('Press ENTER to continue...') - try: - compiler = Gnu95FCompiler() - compiler.customize() - print compiler.get_version() - except Exception, msg: - print msg - raw_input('Press ENTER to continue...') diff --git a/numpy/distutils/fcompiler/hpux.py b/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 7fe145f03..000000000 --- a/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,40 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : None, - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+pic=long'] - def get_flags(self): - return self.pic_flags + ['+ppu'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_version(self, force=0, ok_status=[256,0]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self,force,ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='hpux') - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py deleted file mode 100644 index cf1ee1a8d..000000000 --- a/numpy/distutils/fcompiler/ibm.py +++ /dev/null @@ -1,95 +0,0 @@ -import os -import re -import sys - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import exec_command, find_executable -from numpy.distutils.misc_util import make_temp_file -from distutils import log - -compilers = ['IBMFCompiler'] - -class IBMFCompiler(FCompiler): - compiler_type = 'ibm' - description = 'IBM XL Fortran Compiler' - version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V)(?P[^\s*]*)' - #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 - - executables = { - 'version_cmd' : ["", "-qversion"], - 'compiler_f77' : ["xlf"], - 'compiler_fix' : ["xlf90", "-qfixed"], - 'compiler_f90' : ["xlf90"], - 'linker_so' : ["xlf95"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_version(self,*args,**kwds): - version = FCompiler.get_version(self,*args,**kwds) - - if version is None and sys.platform.startswith('aix'): - # use lslpp to find out xlf version - lslpp = find_executable('lslpp') - xlf = find_executable('xlf') - if os.path.exists(xlf) and os.path.exists(lslpp): - s,o = exec_command(lslpp + ' -Lc xlfcmp') - m = re.search('xlfcmp:(?P\d+([.]\d+)+)', o) - if m: version = m.group('version') - - xlf_dir = '/etc/opt/ibmcmp/xlf' - if version is None and os.path.isdir(xlf_dir): - # linux: - # If the output of xlf does not contain version info - # (that's the case with xlf 8.1, for instance) then - # let's try another method: - l = os.listdir(xlf_dir) - l.sort() - l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir,d,'xlf.cfg'))] - if l: - from distutils.version import LooseVersion - self.version = version = LooseVersion(l[0]) - return version - - def get_flags(self): - return ['-qextname'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - opt = [] - if sys.platform=='darwin': - opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') - else: - opt.append('-bshared') - version = self.get_version(ok_status=[0,40]) - if version is not None: - if sys.platform.startswith('aix'): - xlf_cfg = '/etc/xlf.cfg' - else: - xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version - fo, new_cfg = make_temp_file(suffix='_xlf.cfg') - log.info('Creating '+new_cfg) - fi = open(xlf_cfg,'r') - crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P.*)/crt1.o').match - for line in fi.readlines(): - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fi.close() - fo.close() - opt.append('-F'+new_cfg) - return opt - - def get_flags_opt(self): - return ['-O5'] - -if __name__ == '__main__': - log.set_verbosity(2) - compiler = IBMFCompiler() - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py deleted file mode 100644 index ba93538ac..000000000 --- a/numpy/distutils/fcompiler/intel.py +++ /dev/null @@ -1,248 +0,0 @@ -# -*- encoding: iso-8859-1 -*- -# above encoding b/c there's a non-ASCII character in the sample output -# of intele -# http://developer.intel.com/software/products/compilers/flin/ - -import sys - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file - -compilers = ['IntelFCompiler', 'IntelVisualFCompiler', - 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', - 'IntelEM64TFCompiler'] - -def intel_version_match(type): - # Match against the important stuff in the version string - return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) - -class BaseIntelFCompiler(FCompiler): - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '-FI', '-V', '-c', - f + '.f', '-o', f + '.o'] - -class IntelFCompiler(BaseIntelFCompiler): - - compiler_type = 'intel' - compiler_aliases = ('ifort',) - description = 'Intel Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - possible_executables = ['ifort', 'ifc'] - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : [None, "-72", "-w90", "-w95"], - 'compiler_f90' : [None], - 'compiler_fix' : [None, "-FI"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags(self): - v = self.get_version() - if v >= '10.0': - # Use -fPIC instead of -KPIC. - pic_flags = ['-fPIC'] - else: - pic_flags = ['-KPIC'] - opt = pic_flags + ["-cm"] - return opt - - def get_flags_free(self): - return ["-FR"] - - def get_flags_opt(self): - return ['-O3','-unroll'] - - def get_flags_arch(self): - v = self.get_version() - opt = [] - if cpu.has_fdiv_bug(): - opt.append('-fdiv_check') - if cpu.has_f00f_bug(): - opt.append('-0f_check') - if cpu.is_PentiumPro() or cpu.is_PentiumII() or cpu.is_PentiumIII(): - opt.extend(['-tpp6']) - elif cpu.is_PentiumM(): - opt.extend(['-tpp7','-xB']) - elif cpu.is_Pentium(): - opt.append('-tpp5') - elif cpu.is_PentiumIV() or cpu.is_Xeon(): - opt.extend(['-tpp7','-xW']) - if v and v <= '7.1': - if cpu.has_mmx() and (cpu.is_PentiumII() or cpu.is_PentiumIII()): - opt.append('-xM') - elif v and v >= '8.0': - if cpu.is_PentiumIII(): - opt.append('-xK') - if cpu.has_sse3(): - opt.extend(['-xP']) - elif cpu.is_PentiumIV(): - opt.append('-xW') - if cpu.has_sse2(): - opt.append('-xN') - elif cpu.is_PentiumM(): - opt.extend(['-xB']) - if (cpu.is_Xeon() or cpu.is_Core2() or cpu.is_Core2Extreme()) and cpu.getNCPUs()==2: - opt.extend(['-xT']) - if cpu.has_sse3() and (cpu.is_PentiumIV() or cpu.is_CoreDuo() or cpu.is_CoreSolo()): - opt.extend(['-xP']) - - if cpu.has_sse2(): - opt.append('-arch SSE2') - elif cpu.has_sse(): - opt.append('-arch SSE') - return opt - - def get_flags_linker_so(self): - opt = FCompiler.get_flags_linker_so(self) - v = self.get_version() - if v and v >= '8.0': - opt.append('-nofor_main') - if sys.platform == 'darwin': - # Here, it's -dynamiclib - try: - idx = opt.index('-shared') - opt.remove('-shared') - except ValueError: - idx = 0 - opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] - return opt - -class IntelItaniumFCompiler(IntelFCompiler): - compiler_type = 'intele' - compiler_aliases = () - description = 'Intel Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - -#Intel(R) Fortran Itanium(R) Compiler for Itanium(R)-based applications -#Version 9.1    Build 20060928 Package ID: l_fc_c_9.1.039 -#Copyright (C) 1985-2006 Intel Corporation.  All rights reserved. -#30 DAY EVALUATION LICENSE - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - -class IntelEM64TFCompiler(IntelFCompiler): - compiler_type = 'intelem' - compiler_aliases = () - description = 'Intel Fortran Compiler for EM64T-based apps' - - version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_arch(self): - opt = [] - if cpu.is_PentiumIV() or cpu.is_Xeon(): - opt.extend(['-tpp7', '-xW']) - return opt - -# Is there no difference in the version string between the above compilers -# and the Visual compilers? - -class IntelVisualFCompiler(BaseIntelFCompiler): - compiler_type = 'intelv' - description = 'Intel Visual Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - ar_exe = 'lib.exe' - possible_executables = ['ifl'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None,"-FI","-w90","-w95"], - 'compiler_fix' : [None,"-FI","-4L72","-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - compile_switch = '/c ' - object_switch = '/Fo' #No space after /Fo! - library_switch = '/OUT:' #No space after /OUT:! - module_dir_switch = '/module:' #No space after /module: - module_include_switch = '/I' - - def get_flags(self): - opt = ['/nologo','/MD','/nbs','/Qlowercase','/us'] - return opt - - def get_flags_free(self): - return ["-FR"] - - def get_flags_debug(self): - return ['/4Yb','/d2'] - - def get_flags_opt(self): - return ['/O3','/Qip','/Qipo','/Qipo_obj'] - - def get_flags_arch(self): - opt = [] - if cpu.is_PentiumPro() or cpu.is_PentiumII(): - opt.extend(['/G6','/Qaxi']) - elif cpu.is_PentiumIII(): - opt.extend(['/G6','/QaxK']) - elif cpu.is_Pentium(): - opt.append('/G5') - elif cpu.is_PentiumIV(): - opt.extend(['/G7','/QaxW']) - if cpu.has_mmx(): - opt.append('/QaxM') - return opt - -class IntelItaniumVisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelev' - description = 'Intel Visual Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - - possible_executables = ['efl'] # XXX this is a wild guess - ar_exe = IntelVisualFCompiler.ar_exe - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None,"-FI","-w90","-w95"], - 'compiler_fix' : [None,"-FI","-4L72","-w"], - 'compiler_f90' : [None], - 'linker_so' : ['',"-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='intel') - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/lahey.py b/numpy/distutils/fcompiler/lahey.py deleted file mode 100644 index 68e56ddbb..000000000 --- a/numpy/distutils/fcompiler/lahey.py +++ /dev/null @@ -1,47 +0,0 @@ -import os - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['LaheyFCompiler'] - -class LaheyFCompiler(FCompiler): - - compiler_type = 'lahey' - description = 'Lahey/Fujitsu Fortran 95 Compiler' - version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["lf95", "--fix"], - 'compiler_fix' : ["lf95", "--fix"], - 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95","-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g','--chk','--chkglobal'] - def get_library_dirs(self): - opt = [] - d = os.environ.get('LAHEY') - if d: - opt.append(os.path.join(d,'lib')) - return opt - def get_libraries(self): - opt = [] - opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='lahey') - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/mips.py b/numpy/distutils/fcompiler/mips.py deleted file mode 100644 index ce5f7f439..000000000 --- a/numpy/distutils/fcompiler/mips.py +++ /dev/null @@ -1,56 +0,0 @@ -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler - -compilers = ['MIPSFCompiler'] - -class MIPSFCompiler(FCompiler): - - compiler_type = 'mips' - description = 'MIPSpro Fortran Compiler' - version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "-version"], - 'compiler_f77' : ["f77", "-f77"], - 'compiler_fix' : ["f90", "-fixedform"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["f90","-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['-KPIC'] - - def get_flags(self): - return self.pic_flags + ['-n32'] - def get_flags_opt(self): - return ['-O3'] - def get_flags_arch(self): - opt = [] - for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu,'is_IP%s'%a)(): - opt.append('-TARG:platform=IP%s' % a) - break - return opt - def get_flags_arch_f77(self): - r = None - if cpu.is_r10000(): r = 10000 - elif cpu.is_r12000(): r = 12000 - elif cpu.is_r8000(): r = 8000 - elif cpu.is_r5000(): r = 5000 - elif cpu.is_r4000(): r = 4000 - if r is not None: - return ['r%s' % (r)] - return [] - def get_flags_arch_f90(self): - r = self.get_flags_arch_f77() - if r: - r[0] = '-' + r[0] - return r - -if __name__ == '__main__': - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='mips') - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py deleted file mode 100644 index 478fb7563..000000000 --- a/numpy/distutils/fcompiler/nag.py +++ /dev/null @@ -1,43 +0,0 @@ -import sys -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NAGFCompiler'] - -class NAGFCompiler(FCompiler): - - compiler_type = 'nag' - description = 'NAGWare Fortran 95 Compiler' - version_pattern = r'NAGWare Fortran 95 compiler Release (?P[^\s]*)' - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f95", "-fixed"], - 'compiler_fix' : ["f95", "-fixed"], - 'compiler_f90' : ["f95"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform=='darwin': - return ['-unsharedf95','-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] - def get_flags_arch(self): - version = self.get_version() - if version < '5.1': - return ['-target=native'] - else: - return [''] - def get_flags_debug(self): - return ['-g','-gline','-g90','-nan','-C'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='nag') - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/none.py b/numpy/distutils/fcompiler/none.py deleted file mode 100644 index bf3d3b167..000000000 --- a/numpy/distutils/fcompiler/none.py +++ /dev/null @@ -1,30 +0,0 @@ - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NoneFCompiler'] - -class NoneFCompiler(FCompiler): - - compiler_type = 'none' - description = 'Fake Fortran compiler' - - executables = {'compiler_f77' : None, - 'compiler_f90' : None, - 'compiler_fix' : None, - 'linker_so' : None, - 'linker_exe' : None, - 'archiver' : None, - 'ranlib' : None, - 'version_cmd' : None, - } - - def find_executables(self): - pass - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - compiler = NoneFCompiler() - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py deleted file mode 100644 index 8ce77abc0..000000000 --- a/numpy/distutils/fcompiler/pg.py +++ /dev/null @@ -1,41 +0,0 @@ - -# http://www.pgroup.com - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['PGroupFCompiler'] - -class PGroupFCompiler(FCompiler): - - compiler_type = 'pg' - description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf) (?P[\d.-]+).*' - - executables = { - 'version_cmd' : ["", "-V 2>/dev/null"], - 'compiler_f77' : ["pgf77"], - 'compiler_fix' : ["pgf90", "-Mfixed"], - 'compiler_f90' : ["pgf90"], - 'linker_so' : ["pgf90","-shared","-fpic"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform','-Mnosecond_underscore'] - return self.pic_flags + opt - def get_flags_opt(self): - return ['-fast'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='pg') - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py deleted file mode 100644 index 20fda0e99..000000000 --- a/numpy/distutils/fcompiler/sun.py +++ /dev/null @@ -1,50 +0,0 @@ -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler - -compilers = ['SunFCompiler'] - -class SunFCompiler(FCompiler): - - compiler_type = 'sun' - description = 'Sun or Forte Fortran 95 Compiler' - # ex: - # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 - version_match = simple_version_match( - start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["","-Bdynamic","-G"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = '-moddir=' - module_include_switch = '-M' - pic_flags = ['-xcode=pic32'] - - def get_flags_f77(self): - ret = ["-ftrap=%none"] - if (self.get_version() or '') >= '7': - ret.append("-f77") - else: - ret.append("-fixed") - return ret - def get_opt(self): - return ['-fast','-dalign'] - def get_arch(self): - return ['-xtarget=generic'] - def get_libraries(self): - opt = [] - opt.extend(['fsu','sunmath','mvec','f77compat']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='sun') - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/fcompiler/vast.py b/numpy/distutils/fcompiler/vast.py deleted file mode 100644 index 00ce2f146..000000000 --- a/numpy/distutils/fcompiler/vast.py +++ /dev/null @@ -1,54 +0,0 @@ -import os - -from numpy.distutils.fcompiler.gnu import GnuFCompiler - -compilers = ['VastFCompiler'] - -class VastFCompiler(GnuFCompiler): - compiler_type = 'vast' - compiler_aliases = () - description = 'Pacific-Sierra Research Fortran 90 Compiler' - version_pattern = r'\s*Pacific-Sierra Research vf90 '\ - '(Personal|Professional)\s+(?P[^\s]*)' - - # VAST f90 does not support -o with -c. So, object files are created - # to the current directory and then moved to build directory - object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' - - executables = { - 'version_cmd' : ["vf90", "-v"], - 'compiler_f77' : ["g77"], - 'compiler_fix' : ["f90", "-Wv,-ya"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def find_executables(self): - pass - - def get_version_cmd(self): - f90 = self.compiler_f90[0] - d, b = os.path.split(f90) - vf90 = os.path.join(d, 'v'+b) - return vf90 - - def get_flags_arch(self): - vast_version = self.get_version() - gnu = GnuFCompiler() - gnu.customize(None) - self.version = gnu.get_version() - opt = GnuFCompiler.get_flags_arch(self) - self.version = vast_version - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='vast') - compiler.customize() - print compiler.get_version() diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py deleted file mode 100644 index 2f900b566..000000000 --- a/numpy/distutils/from_template.py +++ /dev/null @@ -1,256 +0,0 @@ -#!/usr/bin/python -""" - -process_file(filename) - - takes templated file .xxx.src and produces .xxx file where .xxx - is .pyf .f90 or .f using the following template rules: - - '<..>' denotes a template. - - All function and subroutine blocks in a source file with names that - contain '<..>' will be replicated according to the rules in '<..>'. - - The number of comma-separeted words in '<..>' will determine the number of - replicates. - - '<..>' may have two different forms, named and short. For example, - - named: - where anywhere inside a block '

' will be replaced with - 'd', 's', 'z', and 'c' for each replicate of the block. - - <_c> is already defined: <_c=s,d,c,z> - <_t> is already defined: <_t=real,double precision,complex,double complex> - - short: - , a short form of the named, useful when no

appears inside - a block. - - In general, '<..>' contains a comma separated list of arbitrary - expressions. If these expression must contain a comma|leftarrow|rightarrow, - then prepend the comma|leftarrow|rightarrow with a backslash. - - If an expression matches '\\' then it will be replaced - by -th expression. - - Note that all '<..>' forms in a block must have the same number of - comma-separated entries. - - Predefined named template rules: - - - - - - -""" - -__all__ = ['process_str','process_file'] - -import os -import sys -import re - -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b',re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)',re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b',re.I) - -def parse_structure(astr): - """ Return a list of tuples for each function or subroutine each - tuple is the start and end of a subroutine or function to be - expanded. - """ - - spanlist = [] - ind = 0 - while 1: - m = routine_start_re.search(astr,ind) - if m is None: - break - start = m.start() - if function_start_re.match(astr,start,m.end()): - while 1: - i = astr.rfind('\n',ind,start) - if i==-1: - break - start = i - if astr[i:i+7]!='\n $': - break - start += 1 - m = routine_end_re.search(astr,m.end()) - ind = end = m and m.end()-1 or len(astr) - spanlist.append((start,end)) - return spanlist - -template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") -named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") -list_re = re.compile(r"<\s*((.*?))\s*>") - -def find_repl_patterns(astr): - reps = named_re.findall(astr) - names = {} - for rep in reps: - name = rep[0].strip() or unique_key(names) - repl = rep[1].replace('\,','@comma@') - thelist = conv(repl) - names[name] = thelist - return names - -item_re = re.compile(r"\A\\(?P\d+)\Z") -def conv(astr): - b = astr.split(',') - l = [x.strip() for x in b] - for i in range(len(l)): - m = item_re.match(l[i]) - if m: - j = int(m.group('index')) - l[i] = l[j] - return ','.join(l) - -def unique_key(adict): - """ Obtain a unique key given a dictionary.""" - allkeys = adict.keys() - done = False - n = 1 - while not done: - newkey = '__l%s' % (n) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr,names): - substr = substr.replace('\>','@rightarrow@') - substr = substr.replace('\<','@leftarrow@') - lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>",substr) # get rid of definition templates - - def listrepl(mobj): - thelist = conv(mobj.group(1).replace('\,','@comma@')) - if template_name_re.match(thelist): - return "<%s>" % (thelist) - name = None - for key in lnames.keys(): # see if list is already in dictionary - if lnames[key] == thelist: - name = key - if name is None: # this list is not in the dictionary yet - name = unique_key(lnames) - lnames[name] = thelist - return "<%s>" % name - - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed - - numsubs = None - base_rule = None - rules = {} - for r in template_re.findall(substr): - if r not in rules: - thelist = lnames.get(r,names.get(r,None)) - if thelist is None: - raise ValueError,'No replicates found for <%s>' % (r) - if r not in names and not thelist.startswith('_'): - names[r] = thelist - rule = [i.replace('@comma@',',') for i in thelist.split(',')] - num = len(rule) - - if numsubs is None: - numsubs = num - rules[r] = rule - base_rule = r - elif num == numsubs: - rules[r] = rule - else: - print "Mismatch in number of replacements (base <%s=%s>)"\ - " for <%s=%s>. Ignoring." % (base_rule, - ','.join(rules[base_rule]), - r,thelist) - if not rules: - return substr - - def namerepl(mobj): - name = mobj.group(1) - return rules.get(name,(k+1)*[name])[k] - - newstr = '' - for k in range(numsubs): - newstr += template_re.sub(namerepl, substr) + '\n\n' - - newstr = newstr.replace('@rightarrow@','>') - newstr = newstr.replace('@leftarrow@','<') - return newstr - -def process_str(allstr): - newstr = allstr - writestr = '' #_head # using _head will break free-format files - - struct = parse_structure(newstr) - - oldend = 0 - names = {} - names.update(_special_names) - for sub in struct: - writestr += newstr[oldend:sub[0]] - names.update(find_repl_patterns(newstr[oldend:sub[0]])) - writestr += expand_sub(newstr[sub[0]:sub[1]],names) - oldend = sub[1] - writestr += newstr[oldend:] - - return writestr - -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+[.]src)['\"]",re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - fid = open(source) - lines = [] - for line in fid.readlines(): - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d,fn) - if os.path.isfile(fn): - print 'Including file',fn - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - fid.close() - return lines - -def process_file(source): - lines = resolve_includes(source) - return process_str(''.join(lines)) - -_special_names = find_repl_patterns(''' -<_c=s,d,c,z> -<_t=real,double precision,complex,double complex> - - - - - -''') - -if __name__ == "__main__": - - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file,'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname,'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) diff --git a/numpy/distutils/info.py b/numpy/distutils/info.py deleted file mode 100644 index 3d27a8092..000000000 --- a/numpy/distutils/info.py +++ /dev/null @@ -1,5 +0,0 @@ -""" -Enhanced distutils with Fortran compilers support and more. -""" - -postpone_import = True diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py deleted file mode 100644 index e03c5beba..000000000 --- a/numpy/distutils/intelccompiler.py +++ /dev/null @@ -1,29 +0,0 @@ - -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.exec_command import find_executable - -class IntelCCompiler(UnixCCompiler): - - """ A modified Intel compiler compatible with an gcc built Python. - """ - - compiler_type = 'intel' - cc_exe = 'icc' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose,dry_run, force) - compiler = self.cc_exe - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - linker_exe=compiler, - linker_so=compiler + ' -shared') - -class IntelItaniumCCompiler(IntelCCompiler): - compiler_type = 'intele' - - # On Itanium, the Intel Compiler used to be called ecc, let's search for - # it (now it's also icc, so ecc is last in the search). - for cc_exe in map(find_executable,['icc','ecc']): - if cc_exe: - break diff --git a/numpy/distutils/interactive.py b/numpy/distutils/interactive.py deleted file mode 100644 index bc741254d..000000000 --- a/numpy/distutils/interactive.py +++ /dev/null @@ -1,187 +0,0 @@ -import os -import sys -from pprint import pformat - -__all__ = ['interactive_sys_argv'] - -def show_information(*args): - print 'Python',sys.version - for a in ['platform','prefix','byteorder','path']: - print 'sys.%s = %s' % (a,pformat(getattr(sys,a))) - for a in ['name']: - print 'os.%s = %s' % (a,pformat(getattr(os,a))) - if hasattr(os,'uname'): - print 'system,node,release,version,machine = ',os.uname() - -def show_environ(*args): - for k,i in os.environ.items(): - print ' %s = %s' % (k, i) - -def show_fortran_compilers(*args): - from fcompiler import show_fcompilers - show_fcompilers() - -def show_compilers(*args): - from distutils.ccompiler import show_compilers - show_compilers() - -def show_tasks(argv,ccompiler,fcompiler): - print """\ - -Tasks: - i - Show python/platform/machine information - ie - Show environment information - c - Show C compilers information - c - Set C compiler (current:%s) - f - Show Fortran compilers information - f - Set Fortran compiler (current:%s) - e - Edit proposed sys.argv[1:]. - -Task aliases: - 0 - Configure - 1 - Build - 2 - Install - 2 - Install with prefix. - 3 - Inplace build - 4 - Source distribution - 5 - Binary distribution - -Proposed sys.argv = %s - """ % (ccompiler, fcompiler, argv) - - -from exec_command import splitcmdline - -def edit_argv(*args): - argv = args[0] - readline = args[1] - if readline is not None: - readline.add_history(' '.join(argv[1:])) - try: - s = raw_input('Edit argv [UpArrow to retrive %r]: ' % (' '.join(argv[1:]))) - except EOFError: - return - if s: - argv[1:] = splitcmdline(s) - return - -def interactive_sys_argv(argv): - print '='*72 - print 'Starting interactive session' - print '-'*72 - - readline = None - try: - try: - import readline - except ImportError: - pass - else: - import tempfile - tdir = tempfile.gettempdir() - username = os.environ.get('USER',os.environ.get('USERNAME','UNKNOWN')) - histfile = os.path.join(tdir,".pyhist_interactive_setup-" + username) - try: - try: readline.read_history_file(histfile) - except IOError: pass - import atexit - atexit.register(readline.write_history_file, histfile) - except AttributeError: pass - except Exception, msg: - print msg - - task_dict = {'i':show_information, - 'ie':show_environ, - 'f':show_fortran_compilers, - 'c':show_compilers, - 'e':edit_argv, - } - c_compiler_name = None - f_compiler_name = None - - while 1: - show_tasks(argv,c_compiler_name, f_compiler_name) - try: - task = raw_input('Choose a task (^D to quit, Enter to continue with setup): ').lower() - except EOFError: - print - task = 'quit' - if task=='': break - if task=='quit': sys.exit() - task_func = task_dict.get(task,None) - if task_func is None: - if task[0]=='c': - c_compiler_name = task[1:] - if c_compiler_name=='none': - c_compiler_name = None - continue - if task[0]=='f': - f_compiler_name = task[1:] - if f_compiler_name=='none': - f_compiler_name = None - continue - if task[0]=='2' and len(task)>1: - prefix = task[1:] - task = task[0] - else: - prefix = None - if task == '4': - argv[1:] = ['sdist','-f'] - continue - elif task in '01235': - cmd_opts = {'config':[],'config_fc':[], - 'build_ext':[],'build_src':[], - 'build_clib':[]} - if c_compiler_name is not None: - c = '--compiler=%s' % (c_compiler_name) - cmd_opts['config'].append(c) - if task != '0': - cmd_opts['build_ext'].append(c) - cmd_opts['build_clib'].append(c) - if f_compiler_name is not None: - c = '--fcompiler=%s' % (f_compiler_name) - cmd_opts['config_fc'].append(c) - if task != '0': - cmd_opts['build_ext'].append(c) - cmd_opts['build_clib'].append(c) - if task=='3': - cmd_opts['build_ext'].append('--inplace') - cmd_opts['build_src'].append('--inplace') - conf = [] - sorted_keys = ['config','config_fc','build_src', - 'build_clib','build_ext'] - for k in sorted_keys: - opts = cmd_opts[k] - if opts: conf.extend([k]+opts) - if task=='0': - if 'config' not in conf: - conf.append('config') - argv[1:] = conf - elif task=='1': - argv[1:] = conf+['build'] - elif task=='2': - if prefix is not None: - argv[1:] = conf+['install','--prefix=%s' % (prefix)] - else: - argv[1:] = conf+['install'] - elif task=='3': - argv[1:] = conf+['build'] - elif task=='5': - if sys.platform=='win32': - argv[1:] = conf+['bdist_wininst'] - else: - argv[1:] = conf+['bdist'] - else: - print 'Skipping unknown task:',`task` - else: - print '-'*68 - try: - task_func(argv,readline) - except Exception,msg: - print 'Failed running task %s: %s' % (task,msg) - break - print '-'*68 - print - - print '-'*72 - return argv diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py deleted file mode 100644 index 583f244c0..000000000 --- a/numpy/distutils/lib2def.py +++ /dev/null @@ -1,113 +0,0 @@ -import re -import sys -import os - -__doc__ = """This module generates a DEF file from the symbols in -an MSVC-compiled DLL import library. It correctly discriminates between -data and functions. The data is collected from the output of the program -nm(1). - -Usage: - python lib2def.py [libname.lib] [output.def] -or - python lib2def.py [libname.lib] > output.def - -libname.lib defaults to python.lib and output.def defaults to stdout - -Author: Robert Kern -Last Update: April 30, 1999 -""" - -__version__ = '0.1a' - -py_ver = "%d%d" % tuple(sys.version_info[:2]) - -DEFAULT_NM = 'nm -Cs' - -DEF_HEADER = """LIBRARY python%s.dll -;CODE PRELOAD MOVEABLE DISCARDABLE -;DATA PRELOAD SINGLE - -EXPORTS -""" % py_ver -# the header of the DEF file - -FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) -DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) - -def parse_cmd(): - """Parses the command-line arguments. - -libfile, deffile = parse_cmd()""" - if len(sys.argv) == 3: - if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': - libfile, deffile = sys.argv[1:] - elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': - deffile, libfile = sys.argv[1:] - else: - print "I'm assuming that your first argument is the library" - print "and the second is the DEF file." - elif len(sys.argv) == 2: - if sys.argv[1][-4:] == '.def': - deffile = sys.argv[1] - libfile = 'python%s.lib' % py_ver - elif sys.argv[1][-4:] == '.lib': - deffile = None - libfile = sys.argv[1] - else: - libfile = 'python%s.lib' % py_ver - deffile = None - return libfile, deffile - -def getnm(nm_cmd = 'nm -Cs python%s.lib' % py_ver): - """Returns the output of nm_cmd via a pipe. - -nm_output = getnam(nm_cmd = 'nm -Cs py_lib')""" - f = os.popen(nm_cmd) - nm_output = f.read() - f.close() - return nm_output - -def parse_nm(nm_output): - """Returns a tuple of lists: dlist for the list of data -symbols and flist for the list of function symbols. - -dlist, flist = parse_nm(nm_output)""" - data = DATA_RE.findall(nm_output) - func = FUNC_RE.findall(nm_output) - - flist = [] - for sym in data: - if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): - flist.append(sym) - - dlist = [] - for sym in data: - if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): - dlist.append(sym) - - dlist.sort() - flist.sort() - return dlist, flist - -def output_def(dlist, flist, header, file = sys.stdout): - """Outputs the final DEF file to a file defaulting to stdout. - -output_def(dlist, flist, header, file = sys.stdout)""" - for data_sym in dlist: - header = header + '\t%s DATA\n' % data_sym - header = header + '\n' # blank line - for func_sym in flist: - header = header + '\t%s\n' % func_sym - file.write(header) - -if __name__ == '__main__': - libfile, deffile = parse_cmd() - if deffile is None: - deffile = sys.stdout - else: - deffile = open(deffile, 'w') - nm_cmd = '%s %s' % (DEFAULT_NM, libfile) - nm_output = getnm(nm_cmd) - dlist, flist = parse_nm(nm_output) - output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py deleted file mode 100644 index 4e6c1f38e..000000000 --- a/numpy/distutils/line_endings.py +++ /dev/null @@ -1,74 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings -""" - -import sys, re, os - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print file, "Directory!" - return - - data = open(file, "rb").read() - if '\0' in data: - print file, "Binary!" - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print 'dos2unix:', file - f = open(file, "wb") - f.write(newdata) - f.close() - return file - else: - print file, 'ok' - -def dos2unix_one_dir(modified_files,dir_name,file_names): - for file in file_names: - full_path = os.path.join(dir_name,file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name,dos2unix_one_dir,modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print file, "Directory!" - return - - data = open(file, "rb").read() - if '\0' in data: - print file, "Binary!" - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print 'unix2dos:', file - f = open(file, "wb") - f.write(newdata) - f.close() - return file - else: - print file, 'ok' - -def unix2dos_one_dir(modified_files,dir_name,file_names): - for file in file_names: - full_path = os.path.join(dir_name,file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name,unix2dos_one_dir,modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py deleted file mode 100644 index 403df0ac1..000000000 --- a/numpy/distutils/log.py +++ /dev/null @@ -1,73 +0,0 @@ -# Colored log, requires Python 2.3 or up. - -import sys -from distutils.log import * -from distutils.log import Log as old_Log -from distutils.log import _global_log -from misc_util import red_text, yellow_text, cyan_text, green_text, is_sequence, is_string - - -def _fix_args(args,flag=1): - if is_string(args): - return args.replace('%','%%') - if flag and is_sequence(args): - return tuple([_fix_args(a,flag=0) for a in args]) - return args - -class Log(old_Log): - def _log(self, level, msg, args): - if level >= self.threshold: - if args: - print _global_color_map[level](msg % _fix_args(args)) - else: - print _global_color_map[level](msg) - sys.stdout.flush() - - def good(self, msg, *args): - """If we'd log WARN messages, log this message as a 'nice' anti-warn - message. - """ - if WARN >= self.threshold: - if args: - print green_text(msg % _fix_args(args)) - else: - print green_text(msg) - sys.stdout.flush() -_global_log.__class__ = Log - -good = _global_log.good - -def set_threshold(level, force=False): - prev_level = _global_log.threshold - if prev_level > DEBUG or force: - # If we're running at DEBUG, don't change the threshold, as there's - # likely a good reason why we're running at this level. - _global_log.threshold = level - if level <= DEBUG: - info('set_threshold: setting thershold to DEBUG level, it can be changed only with force argument') - else: - info('set_threshold: not changing thershold from DEBUG level %s to %s' % (prev_level,level)) - return prev_level - -def set_verbosity(v, force=False): - prev_level = _global_log.threshold - if v < 0: - set_threshold(ERROR, force) - elif v == 0: - set_threshold(WARN, force) - elif v == 1: - set_threshold(INFO, force) - elif v >= 2: - set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level,1) - -_global_color_map = { - DEBUG:cyan_text, - INFO:yellow_text, - WARN:red_text, - ERROR:red_text, - FATAL:red_text -} - -# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. -set_verbosity(0, force=True) diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py deleted file mode 100644 index 91a80a626..000000000 --- a/numpy/distutils/mingw32ccompiler.py +++ /dev/null @@ -1,227 +0,0 @@ -""" -Support code for building Python extensions on Windows. - - # NT stuff - # 1. Make sure libpython.a exists for gcc. If not, build it. - # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) - # 3. Force windows to use g77 - -""" - -import os -import sys -import log - -# Overwrite certain distutils.ccompiler functions: -import numpy.distutils.ccompiler - -# NT stuff -# 1. Make sure libpython.a exists for gcc. If not, build it. -# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) -# --> this is done in numpy/distutils/ccompiler.py -# 3. Force windows to use g77 - -import distutils.cygwinccompiler -from distutils.version import StrictVersion -from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options -from distutils.errors import DistutilsExecError, CompileError, UnknownFileError - -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.misc_util import msvc_runtime_library - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): - """ A modified MingW32 compiler compatible with an MSVC built Python. - - """ - - compiler_type = 'mingw32' - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, - verbose,dry_run, force) - - # we need to support 3.2 which doesn't match the standard - # get_versions methods regex - if self.gcc_version is None: - import re - out = os.popen('gcc -dumpversion','r') - out_string = out.read() - out.close() - result = re.search('(\d+\.\d+)',out_string) - if result: - self.gcc_version = StrictVersion(result.group(1)) - - # A real mingw32 doesn't need to specify a different entry point, - # but cygwin 2.91.57 in no-cygwin-mode needs it. - if self.gcc_version <= "2.91.57": - entry_point = '--entry _DllMain@12' - else: - entry_point = '' - - if self.linker_dll == 'dllwrap': - # Commented out '--driver-name g++' part that fixes weird - # g++.exe: g++: No such file or directory - # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). - # If the --driver-name part is required for some environment - # then make the inclusion of this part specific to that environment. - self.linker = 'dllwrap' # --driver-name g++' - elif self.linker_dll == 'gcc': - self.linker = 'g++' - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't exist. - - build_import_library() - - # **changes: eric jones 4/11/01 - # 2. increased optimization and turned off all warnings - # 3. also added --driver-name g++ - #self.set_executables(compiler='gcc -mno-cygwin -O2 -w', - # compiler_so='gcc -mno-cygwin -mdll -O2 -w', - # linker_exe='gcc -mno-cygwin', - # linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s' - # % (self.linker, entry_point)) - if self.gcc_version <= "3.0.0": - self.set_executables(compiler='gcc -mno-cygwin -O2 -w', - compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='%s -mno-cygwin -mdll -static %s' - % (self.linker, entry_point)) - else: - self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall', - compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='g++ -mno-cygwin -shared') - # added for python2.3 support - # we can't pass it through set_executables because pre 2.2 would fail - self.compiler_cxx = ['g++'] - - # Maybe we should also append -mthreads, but then the finished - # dlls need another dll (mingwm10.dll see Mingw32 docs) - # (-mthreads: Support thread-safe exception handling on `Mingw32') - - # no additional libraries needed - #self.dll_libraries=[] - return - - # __init__ () - - def link(self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols = None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - # Include the appropiate MSVC runtime library if Python was built - # with MSVC >= 7.0 (MinGW standard is msvcrt) - runtime_library = msvc_runtime_library() - if runtime_library: - if not libraries: - libraries = [] - libraries.append(runtime_library) - args = (self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, #export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang) - if self.gcc_version < "3.0.0": - func = distutils.cygwinccompiler.CygwinCCompiler.link - else: - func = UnixCCompiler.link - func(*args[:func.im_func.func_code.co_argcount]) - return - - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - - # added these lines to strip off windows drive letters - # without it, .o files are placed next to .c files - # instead of the build directory - drv,base = os.path.splitdrive(base) - if drv: - base = base[1:] - - if ext not in (self.src_extensions + ['.rc','.res']): - raise UnknownFileError, \ - "unknown file type '%s' (from '%s')" % \ - (ext, src_name) - if strip_dir: - base = os.path.basename (base) - if ext == '.res' or ext == '.rc': - # these need to be compiled to object files - obj_names.append (os.path.join (output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - -def build_import_library(): - """ Build the import libraries for Mingw32-gcc on Windows - """ - if os.name != 'nt': - return - lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix,'libs',lib_name) - out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) - out_file = os.path.join(sys.prefix,'libs',out_name) - if not os.path.isfile(lib_file): - log.warn('Cannot build import library: "%s" not found' % (lib_file)) - return - if os.path.isfile(out_file): - log.debug('Skip building import library: "%s" exists' % (out_file)) - return - log.info('Building import library: "%s"' % (out_file)) - - from numpy.distutils import lib2def - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix,'libs',def_name) - nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file) - nm_output = lib2def.getnm(nm_cmd) - dlist, flist = lib2def.parse_nm(nm_output) - lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w')) - - dll_name = "python%d%d.dll" % tuple(sys.version_info[:2]) - args = (dll_name,def_file,out_file) - cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args - status = os.system(cmd) - # for now, fail silently - if status: - log.warn('Failed to build import library for gcc. Linking will fail.') - #if not success: - # msg = "Couldn't find import library, and failed to build it." - # raise DistutilsPlatformError, msg - return diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py deleted file mode 100644 index 79fdd46ec..000000000 --- a/numpy/distutils/misc_util.py +++ /dev/null @@ -1,1530 +0,0 @@ -import os -import re -import sys -import imp -import copy -import glob -import atexit -import tempfile - -try: - set -except NameError: - from sets import Set as set - -__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', - 'dict_append', 'appendpath', 'generate_config_py', - 'get_cmd', 'allpath', 'get_mathlibs', - 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32','mingw32','all_strings', - 'has_f_sources', 'has_cxx_sources', 'filter_sources', - 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', - 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath','njoin', - 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'quote_args', 'get_build_architecture'] - -def quote_args(args): - # don't used _nt_quote_args as it does not check if - # args items already have quotes or not. - args = list(args) - for i in range(len(args)): - a = args[i] - if ' ' in a and a[0] not in '"\'': - args[i] = '"%s"' % (a) - return args - -def allpath(name): - "Convert a /-separated pathname to one using the OS's path separator." - splitted = name.split('/') - return os.path.join(*splitted) - -def rel_path(path, parent_path): - """Return path relative to parent_path. - """ - pd = os.path.abspath(parent_path) - apath = os.path.abspath(path) - if len(apath)= 0 - and curses.tigetnum("pairs") >= 0 - and ((curses.tigetstr("setf") is not None - and curses.tigetstr("setb") is not None) - or (curses.tigetstr("setaf") is not None - and curses.tigetstr("setab") is not None) - or curses.tigetstr("scp") is not None)): - return 1 - except Exception,msg: - pass - return 0 - -if terminal_has_colors(): - _colour_codes = dict(black=0, red=1, green=2, yellow=3, - blue=4, magenta=5, cyan=6, white=7) - def colour_text(s, fg=None, bg=None, bold=False): - seq = [] - if bold: - seq.append('1') - if fg: - fgcode = 30 + _colour_codes.get(fg.lower(), 0) - seq.append(str(fgcode)) - if bg: - bgcode = 40 + _colour_codes.get(fg.lower(), 7) - seq.append(str(bgcode)) - if seq: - return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) - else: - return s -else: - def colour_text(s, fg=None, bg=None): - return s - -def red_text(s): - return colour_text(s, 'red') -def green_text(s): - return colour_text(s, 'green') -def yellow_text(s): - return colour_text(s, 'yellow') -def cyan_text(s): - return colour_text(s, 'cyan') -def blue_text(s): - return colour_text(s, 'blue') - -######################### - -def cyg2win32(path): - if sys.platform=='cygwin' and path.startswith('/cygdrive'): - path = path[10] + ':' + os.path.normcase(path[11:]) - return path - -def mingw32(): - """Return true when using mingw32 environment. - """ - if sys.platform=='win32': - if os.environ.get('OSTYPE','')=='msys': - return True - if os.environ.get('MSYSTEM','')=='MINGW32': - return True - return False - -def msvc_runtime_library(): - "Return name of MSVC runtime library if Python was built with MSVC >= 7" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = sys.version[msc_pos+6:msc_pos+10] - lib = {'1300' : 'msvcr70', # MSVC 7.0 - '1310' : 'msvcr71', # MSVC 7.1 - '1400' : 'msvcr80', # MSVC 8 - }.get(msc_ver, None) - else: - lib = None - return lib - -def msvc_on_amd64(): - if not (sys.platform=='win32' or os.name=='nt'): - return - if get_build_architecture() != 'AMD64': - return - if 'DISTUTILS_USE_SDK' in os.environ: - return - # try to avoid _MSVCCompiler__root attribute error - print 'Forcing DISTUTILS_USE_SDK=1' - os.environ['DISTUTILS_USE_SDK']='1' - return - -######################### - -#XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match -fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z',re.I).match -f90_ext_match = re.compile(r'.*[.](f90|f95)\Z',re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)',re.I).match -def _get_f90_modules(source): - """Return a list of Fortran f90 module names that - given source file defines. - """ - if not f90_ext_match(source): - return [] - modules = [] - f = open(source,'r') - f_readlines = getattr(f,'xreadlines',f.readlines) - for line in f_readlines(): - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - f.close() - return modules - -def is_string(s): - return isinstance(s, str) - -def all_strings(lst): - """Return True if all items in lst are string objects. """ - for item in lst: - if not is_string(item): - return False - return True - -def is_sequence(seq): - if is_string(seq): - return False - try: - len(seq) - except: - return False - return True - -def is_glob_pattern(s): - return is_string(s) and ('*' in s or '?' is s) - -def as_list(seq): - if is_sequence(seq): - return list(seq) - else: - return [seq] - -def get_language(sources): - # not used in numpy/scipy packages, use build_ext.detect_language instead - """Determine language value (c,f77,f90) from sources """ - language = None - for source in sources: - if isinstance(source, str): - if f90_ext_match(source): - language = 'f90' - break - elif fortran_ext_match(source): - language = 'f77' - return language - -def has_f_sources(sources): - """Return True if sources contains Fortran files """ - for source in sources: - if fortran_ext_match(source): - return True - return False - -def has_cxx_sources(sources): - """Return True if sources contains C++ files """ - for source in sources: - if cxx_ext_match(source): - return True - return False - -def filter_sources(sources): - """Return four lists of filenames containing - C, C++, Fortran, and Fortran 90 module sources, - respectively. - """ - c_sources = [] - cxx_sources = [] - f_sources = [] - fmodule_sources = [] - for source in sources: - if fortran_ext_match(source): - modules = _get_f90_modules(source) - if modules: - fmodule_sources.append(source) - else: - f_sources.append(source) - elif cxx_ext_match(source): - cxx_sources.append(source) - else: - c_sources.append(source) - return c_sources, cxx_sources, f_sources, fmodule_sources - - -def _get_headers(directory_list): - # get *.h files from list of directories - headers = [] - for d in directory_list: - head = glob.glob(os.path.join(d,"*.h")) #XXX: *.hpp files?? - headers.extend(head) - return headers - -def _get_directories(list_of_sources): - # get unique directories from list of sources. - direcs = [] - for f in list_of_sources: - d = os.path.split(f) - if d[0] != '' and not d[0] in direcs: - direcs.append(d[0]) - return direcs - -def get_dependencies(sources): - #XXX scan sources for include statements - return _get_headers(_get_directories(sources)) - -def is_local_src_dir(directory): - """Return true if directory is local directory. - """ - if not is_string(directory): - return False - abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(),abs_dir]) - new_dir = abs_dir[len(c):].split(os.sep) - if new_dir and not new_dir[0]: - new_dir = new_dir[1:] - if new_dir and new_dir[0]=='build': - return False - new_dir = os.sep.join(new_dir) - return os.path.isdir(new_dir) - -def general_source_files(top_path): - pruned_directories = {'CVS':1, '.svn':1, 'build':1} - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for f in filenames: - if not prune_file_pat.search(f): - yield os.path.join(dirpath, f) - -def general_source_directories_files(top_path): - """Return a directory name relative to top_path and - files contained. - """ - pruned_directories = ['CVS','.svn','build'] - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for d in dirnames: - dpath = os.path.join(dirpath, d) - rpath = rel_path(dpath, top_path) - files = [] - for f in os.listdir(dpath): - fn = os.path.join(dpath,f) - if os.path.isfile(fn) and not prune_file_pat.search(fn): - files.append(fn) - yield rpath, files - dpath = top_path - rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath,f) for f in os.listdir(dpath) \ - if not prune_file_pat.search(f)] - files = [f for f in filenames if os.path.isfile(f)] - yield rpath, files - - -def get_ext_source_files(ext): - # Get sources and any include files in the same directory. - filenames = [] - sources = filter(is_string, ext.sources) - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - for d in ext.depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_script_files(scripts): - scripts = filter(is_string, scripts) - return scripts - -def get_lib_source_files(lib): - filenames = [] - sources = lib[1].get('sources',[]) - sources = filter(is_string, sources) - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends',[]) - for d in depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_data_files(data): - if is_string(data): - return [data] - sources = data[1] - filenames = [] - for s in sources: - if callable(s): - continue - if is_local_src_dir(s): - filenames.extend(list(general_source_files(s))) - elif is_string(s): - if os.path.isfile(s): - filenames.append(s) - else: - print 'Not existing data file:',s - else: - raise TypeError,repr(s) - return filenames - -def dot_join(*args): - return '.'.join([a for a in args if a]) - -def get_frame(level=0): - """Return frame object from call stack with given level. - """ - try: - return sys._getframe(level+1) - except AttributeError: - frame = sys.exc_info()[2].tb_frame - for _ in range(level+1): - frame = frame.f_back - return frame - -###################### - -class Configuration(object): - - _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules'] - _dict_keys = ['package_dir'] - _extra_keys = ['name', 'version'] - - numpy_include_dirs = [] - - def __init__(self, - package_name=None, - parent_name=None, - top_path=None, - package_path=None, - caller_level=1, - **attrs): - """Construct configuration instance of a package. - - package_name -- name of the package - Ex.: 'distutils' - parent_name -- name of the parent package - Ex.: 'numpy' - top_path -- directory of the toplevel package - Ex.: the directory where the numpy package source sits - package_path -- directory of package. Will be computed by magic from the - directory of the caller module if not specified - Ex.: the directory where numpy.distutils is - caller_level -- frame level to caller namespace, internal parameter. - """ - self.name = dot_join(parent_name, package_name) - self.version = None - - caller_frame = get_frame(caller_level) - self.local_path = get_path_from_frame(caller_frame, top_path) - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - if top_path is None: - top_path = self.local_path - self.local_path = '' - if package_path is None: - package_path = self.local_path - elif os.path.isdir(njoin(self.local_path,package_path)): - package_path = njoin(self.local_path,package_path) - if not os.path.isdir(package_path or '.'): - raise ValueError("%r is not a directory" % (package_path,)) - self.top_path = top_path - self.package_path = package_path - # this is the relative path in the installed package - self.path_in_package = os.path.join(*self.name.split('.')) - - self.list_keys = self._list_keys[:] - self.dict_keys = self._dict_keys[:] - - for n in self.list_keys: - v = copy.copy(attrs.get(n, [])) - setattr(self, n, as_list(v)) - - for n in self.dict_keys: - v = copy.copy(attrs.get(n, {})) - setattr(self, n, v) - - known_keys = self.list_keys + self.dict_keys - self.extra_keys = self._extra_keys[:] - for n in attrs.keys(): - if n in known_keys: - continue - a = attrs[n] - setattr(self,n,a) - if isinstance(a, list): - self.list_keys.append(n) - elif isinstance(a, dict): - self.dict_keys.append(n) - else: - self.extra_keys.append(n) - - if os.path.exists(njoin(package_path,'__init__.py')): - self.packages.append(self.name) - self.package_dir[self.name] = package_path - - self.options = dict( - ignore_setup_xxx_py = False, - assume_default_configuration = False, - delegate_options_to_subpackages = False, - quiet = False, - ) - - caller_instance = None - for i in range(1,3): - try: - f = get_frame(i) - except ValueError: - break - try: - caller_instance = eval('self',f.f_globals,f.f_locals) - break - except NameError: - pass - if isinstance(caller_instance, self.__class__): - if caller_instance.options['delegate_options_to_subpackages']: - self.set_options(**caller_instance.options) - - def todict(self): - """Return configuration distionary suitable for passing - to distutils.core.setup() function. - """ - self._optimize_data_files() - d = {} - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for n in known_keys: - a = getattr(self,n) - if a: - d[n] = a - return d - - def info(self, message): - if not self.options['quiet']: - print message - - def warn(self, message): - print>>sys.stderr, blue_text('Warning: %s' % (message,)) - - def set_options(self, **options): - """Configure Configuration instance. - - The following options are available: - - ignore_setup_xxx_py - - assume_default_configuration - - delegate_options_to_subpackages - - quiet - """ - for key, value in options.items(): - if key in self.options: - self.options[key] = value - else: - raise ValueError,'Unknown option: '+key - - def get_distribution(self): - from numpy.distutils.core import get_distribution - return get_distribution() - - def _wildcard_get_subpackage(self, subpackage_name, - parent_name, - caller_level = 1): - l = subpackage_name.split('.') - subpackage_path = njoin([self.local_path]+l) - dirs = filter(os.path.isdir,glob.glob(subpackage_path)) - config_list = [] - for d in dirs: - if not os.path.isfile(njoin(d,'__init__.py')): - continue - if 'build' in d.split(os.sep): - continue - n = '.'.join(d.split(os.sep)[-len(l):]) - c = self.get_subpackage(n, - parent_name = parent_name, - caller_level = caller_level+1) - config_list.extend(c) - return config_list - - def _get_configuration_from_setup_py(self, setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = 1): - # In case setup_py imports local modules: - sys.path.insert(0,os.path.dirname(setup_py)) - try: - fo_setup_py = open(setup_py, 'U') - setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name,subpackage_name,setup_name) - setup_module = imp.load_module('_'.join(n.split('.')), - fo_setup_py, - setup_py, - ('.py', 'U', 1)) - fo_setup_py.close() - if not hasattr(setup_module,'configuration'): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s does not define configuration())'\ - % (setup_module)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level + 1) - else: - pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) - args = (pn,) - if setup_module.configuration.func_code.co_argcount > 1: - args = args + (self.top_path,) - config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name,subpackage_name): - self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name,subpackage_name), config.name)) - finally: - del sys.path[0] - return config - - def get_subpackage(self,subpackage_name, - subpackage_path=None, - parent_name=None, - caller_level = 1): - """Return list of subpackage configurations. - - '*' in subpackage_name is handled as a wildcard. - """ - if subpackage_name is None: - if subpackage_path is None: - raise ValueError( - "either subpackage_name or subpackage_path must be specified") - subpackage_name = os.path.basename(subpackage_path) - - # handle wildcards - l = subpackage_name.split('.') - if subpackage_path is None and '*' in subpackage_name: - return self._wildcard_get_subpackage(subpackage_name, - parent_name, - caller_level = caller_level+1) - assert '*' not in subpackage_name,`subpackage_name, subpackage_path,parent_name` - if subpackage_path is None: - subpackage_path = njoin([self.local_path] + l) - else: - subpackage_path = njoin([subpackage_path] + l[:-1]) - subpackage_path = self.paths([subpackage_path])[0] - setup_py = njoin(subpackage_path, 'setup.py') - if not self.options['ignore_setup_xxx_py']: - if not os.path.isfile(setup_py): - setup_py = njoin(subpackage_path, - 'setup_%s.py' % (subpackage_name)) - if not os.path.isfile(setup_py): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s/{setup_%s,setup}.py was not found)' \ - % (os.path.dirname(setup_py), subpackage_name)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level+1) - else: - config = self._get_configuration_from_setup_py( - setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = caller_level + 1) - if config: - return [config] - else: - return [] - - def add_subpackage(self,subpackage_name, - subpackage_path=None, - standalone = False): - """Add subpackage to configuration. - """ - if standalone: - parent_name = None - else: - parent_name = self.name - config_list = self.get_subpackage(subpackage_name,subpackage_path, - parent_name = parent_name, - caller_level = 2) - if not config_list: - self.warn('No configuration returned, assuming unavailable.') - for config in config_list: - d = config - if isinstance(config, Configuration): - d = config.todict() - assert isinstance(d,dict),`type(d)` - - self.info('Appending %s configuration to %s' \ - % (d.get('name'), self.name)) - self.dict_append(**d) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - - def add_data_dir(self,data_path): - """Recursively add files under data_path to data_files list. - Argument can be either - - 2-sequence (,) - - path to data directory where python datadir suffix defaults - to package dir. - - Rules for installation paths: - foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar - (gun, foo/bar) -> parent/gun - foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b - (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun - (gun/*, foo/*) -> parent/gun/a, parent/gun/b - /foo/bar -> (bar, /foo/bar) -> parent/bar - (gun, /foo/bar) -> parent/gun - (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar - """ - if is_sequence(data_path): - d, data_path = data_path - else: - d = None - if is_sequence(data_path): - [self.add_data_dir((d,p)) for p in data_path] - return - if not is_string(data_path): - raise TypeError("not a string: %r" % (data_path,)) - if d is None: - if os.path.isabs(data_path): - return self.add_data_dir((os.path.basename(data_path), data_path)) - return self.add_data_dir((data_path, data_path)) - paths = self.paths(data_path, include_non_existing=False) - if is_glob_pattern(data_path): - if is_glob_pattern(d): - pattern_list = allpath(d).split(os.sep) - pattern_list.reverse() - # /a/*//b/ -> /a/*/b - rl = range(len(pattern_list)-1); rl.reverse() - for i in rl: - if not pattern_list[i]: - del pattern_list[i] - # - for path in paths: - if not os.path.isdir(path): - print 'Not a directory, skipping',path - continue - rpath = rel_path(path, self.local_path) - path_list = rpath.split(os.sep) - path_list.reverse() - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - if i>=len(path_list): - raise ValueError,'cannot fill pattern %r with %r' \ - % (d, path) - target_list.append(path_list[i]) - else: - assert s==path_list[i],`s,path_list[i],data_path,d,path,rpath` - target_list.append(s) - i += 1 - if path_list[i:]: - self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list,path_list)) - target_list.reverse() - self.add_data_dir((os.sep.join(target_list),path)) - else: - for path in paths: - self.add_data_dir((d,path)) - return - assert not is_glob_pattern(d),`d` - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - for path in paths: - for d1,f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package,d,d1) - data_files.append((target_path, f)) - - def _optimize_data_files(self): - data_dict = {} - for p,files in self.data_files: - if p not in data_dict: - data_dict[p] = set() - map(data_dict[p].add,files) - self.data_files[:] = [(p,list(files)) for p,files in data_dict.items()] - - def add_data_files(self,*files): - """Add data files to configuration data_files. - Argument(s) can be either - - 2-sequence (,) - - paths to data files where python datadir prefix defaults - to package dir. - - Rules for installation paths: - file.txt -> (., file.txt)-> parent/file.txt - foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - *.txt -> parent/a.txt, parent/b.txt - foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt - */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt - (sun, file.txt) -> parent/sun/file.txt - (sun, bar/file.txt) -> parent/sun/file.txt - (sun, /foo/bar/file.txt) -> parent/sun/file.txt - (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt - (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt - (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt - """ - - if len(files)>1: - map(self.add_data_files, files) - return - assert len(files)==1 - if is_sequence(files[0]): - d,files = files[0] - else: - d = None - if is_string(files): - filepat = files - elif is_sequence(files): - if len(files)==1: - filepat = files[0] - else: - for f in files: - self.add_data_files((d,f)) - return - else: - raise TypeError,`type(files)` - - if d is None: - if callable(filepat): - d = '' - elif os.path.isabs(filepat): - d = '' - else: - d = os.path.dirname(filepat) - self.add_data_files((d,files)) - return - - paths = self.paths(filepat, include_non_existing=False) - if is_glob_pattern(filepat): - if is_glob_pattern(d): - pattern_list = d.split(os.sep) - pattern_list.reverse() - for path in paths: - path_list = path.split(os.sep) - path_list.reverse() - path_list.pop() # filename - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - target_list.append(path_list[i]) - i += 1 - else: - target_list.append(s) - target_list.reverse() - self.add_data_files((os.sep.join(target_list), path)) - else: - self.add_data_files((d,paths)) - return - assert not is_glob_pattern(d),`d,filepat` - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - data_files.append((os.path.join(self.path_in_package,d),paths)) - - ### XXX Implement add_py_modules - - def add_include_dirs(self,*paths): - """Add paths to configuration include directories. - """ - include_dirs = self.paths(paths) - dist = self.get_distribution() - if dist is not None: - dist.include_dirs.extend(include_dirs) - else: - self.include_dirs.extend(include_dirs) - - def add_numarray_include_dirs(self): - import numpy.numarray.util as nnu - self.add_include_dirs(*nnu.get_numarray_include_dirs()) - - def add_headers(self,*files): - """Add installable headers to configuration. - Argument(s) can be either - - 2-sequence (,) - - path(s) to header file(s) where python includedir suffix will default - to package name. - """ - headers = [] - for path in files: - if is_string(path): - [headers.append((self.name,p)) for p in self.paths(path)] - else: - if not isinstance(path, (tuple, list)) or len(path) != 2: - raise TypeError(repr(path)) - [headers.append((path[0],p)) for p in self.paths(path[1])] - dist = self.get_distribution() - if dist is not None: - dist.headers.extend(headers) - else: - self.headers.extend(headers) - - def paths(self,*paths,**kws): - """Apply glob to paths and prepend local_path if needed. - """ - include_non_existing = kws.get('include_non_existing',True) - return gpaths(paths, - local_path = self.local_path, - include_non_existing=include_non_existing) - - def _fix_paths_dict(self,kw): - for k in kw.keys(): - v = kw[k] - if k in ['sources','depends','include_dirs','library_dirs', - 'module_dirs','extra_objects']: - new_v = self.paths(v) - kw[k] = new_v - - def add_extension(self,name,sources,**kw): - """Add extension to configuration. - - Keywords: - include_dirs, define_macros, undef_macros, - library_dirs, libraries, runtime_library_dirs, - extra_objects, extra_compile_args, extra_link_args, - export_symbols, swig_opts, depends, language, - f2py_options, module_dirs - extra_info - dict or list of dict of keywords to be - appended to keywords. - """ - ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name,name) - ext_args['sources'] = sources - - if 'extra_info' in ext_args: - extra_info = ext_args['extra_info'] - del ext_args['extra_info'] - if isinstance(extra_info, dict): - extra_info = [extra_info] - for info in extra_info: - assert isinstance(info, dict), repr(info) - dict_append(ext_args,**info) - - self._fix_paths_dict(ext_args) - - # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries',[]) - libnames = [] - ext_args['libraries'] = [] - for libname in libraries: - if isinstance(libname,tuple): - self._fix_paths_dict(libname[1]) - - # Handle library names of the form libname@relative/path/to/library - if '@' in libname: - lname,lpath = libname.split('@',1) - lpath = os.path.abspath(njoin(self.local_path,lpath)) - if os.path.isdir(lpath): - c = self.get_subpackage(None,lpath, - caller_level = 2) - if isinstance(c,Configuration): - c = c.todict() - for l in [l[0] for l in c.get('libraries',[])]: - llname = l.split('__OF__',1)[0] - if llname == lname: - c.pop('name',None) - dict_append(ext_args,**c) - break - continue - libnames.append(libname) - - ext_args['libraries'] = libnames + ext_args['libraries'] - - from numpy.distutils.core import Extension - ext = Extension(**ext_args) - self.ext_modules.append(ext) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add an extension '+name) - return ext - - def add_library(self,name,sources,**build_info): - """Add library to configuration. - - Valid keywords for build_info: - depends - macros - include_dirs - extra_compiler_args - f2py_options - language - """ - build_info = copy.copy(build_info) - name = name #+ '__OF__' + self.name - build_info['sources'] = sources - - self._fix_paths_dict(build_info) - - self.libraries.append((name,build_info)) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) - - def add_scripts(self,*files): - """Add scripts to configuration. - """ - scripts = self.paths(files) - dist = self.get_distribution() - if dist is not None: - dist.scripts.extend(scripts) - else: - self.scripts.extend(scripts) - - def dict_append(self,**dict): - for key in self.list_keys: - a = getattr(self,key) - a.extend(dict.get(key,[])) - for key in self.dict_keys: - a = getattr(self,key) - a.update(dict.get(key,{})) - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for key in dict.keys(): - if key not in known_keys: - a = getattr(self, key, None) - if a and a==dict[key]: continue - self.warn('Inheriting attribute %r=%r from %r' \ - % (key,dict[key],dict.get('name','?'))) - setattr(self,key,dict[key]) - self.extra_keys.append(key) - elif key in self.extra_keys: - self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self,key), dict[key])) - elif key in known_keys: - # key is already processed above - pass - else: - raise ValueError, "Don't know about key=%r" % (key) - - def __str__(self): - from pprint import pformat - known_keys = self.list_keys + self.dict_keys + self.extra_keys - s = '<'+5*'-' + '\n' - s += 'Configuration of '+self.name+':\n' - known_keys.sort() - for k in known_keys: - a = getattr(self,k,None) - if a: - s += '%s = %s\n' % (k,pformat(a)) - s += 5*'-' + '>' - return s - - def get_config_cmd(self): - cmd = get_cmd('config') - cmd.ensure_finalized() - cmd.dump_source = 0 - cmd.noisy = 0 - old_path = os.environ.get('PATH') - if old_path: - path = os.pathsep.join(['.',old_path]) - os.environ['PATH'] = path - return cmd - - def get_build_temp_dir(self): - cmd = get_cmd('build') - cmd.ensure_finalized() - return cmd.build_temp - - def have_f77c(self): - """Check for availability of Fortran 77 compiler. - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f77') - return flag - - def have_f90c(self): - """Check for availability of Fortran 90 compiler. - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f90') - return flag - - def append_to(self, extlib): - """Append libraries, include_dirs to extension or library item. - """ - if is_sequence(extlib): - lib_name, build_info = extlib - dict_append(build_info, - libraries=self.libraries, - include_dirs=self.include_dirs) - else: - from numpy.distutils.core import Extension - assert isinstance(extlib,Extension), repr(extlib) - extlib.libraries.extend(self.libraries) - extlib.include_dirs.extend(self.include_dirs) - - def _get_svn_revision(self,path): - """Return path's SVN revision number. - """ - revision = None - m = None - try: - sin, sout = os.popen4('svnversion') - m = re.match(r'(?P\d+)', sout.read()) - except: - pass - if m: - revision = int(m.group('revision')) - return revision - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK',None): - entries = njoin(path,'_svn','entries') - else: - entries = njoin(path,'.svn','entries') - if os.path.isfile(entries): - f = open(entries) - fstr = f.read() - f.close() - if fstr[:5] == '\d+)"',fstr) - if m: - revision = int(m.group('revision')) - else: # non-xml entries file --- check to be sure that - m = re.search(r'dir[\n\r]+(?P\d+)', fstr) - if m: - revision = int(m.group('revision')) - return revision - - def get_version(self, version_file=None, version_variable=None): - """Try to get version string of a package. - """ - version = getattr(self,'version',None) - if version is not None: - return version - - # Get version from version file. - if version_file is None: - files = ['__version__.py', - self.name.split('.')[-1]+'_version.py', - 'version.py', - '__svn_version__.py'] - else: - files = [version_file] - if version_variable is None: - version_vars = ['version', - '__version__', - self.name.split('.')[-1]+'_version'] - else: - version_vars = [version_variable] - for f in files: - fn = njoin(self.local_path,f) - if os.path.isfile(fn): - info = (open(fn),fn,('.py','U',1)) - name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name,name) - try: - version_module = imp.load_module('_'.join(n.split('.')),*info) - except ImportError,msg: - self.warn(str(msg)) - version_module = None - if version_module is None: - continue - - for a in version_vars: - version = getattr(version_module,a,None) - if version is not None: - break - if version is not None: - break - - if version is not None: - self.version = version - return version - - # Get version as SVN revision number - revision = self._get_svn_revision(self.local_path) - if revision is not None: - version = str(revision) - self.version = version - - return version - - def make_svn_version_py(self, delete=True): - """Generate package __svn_version__.py file from SVN revision number, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - If __svn_version__.py existed before, nothing is done. - """ - target = njoin(self.local_path,'__svn_version__.py') - revision = self._get_svn_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_svn_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target,version)) - f = open(target,'w') - f.write('version = %r\n' % (version)) - f.close() - - import atexit - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_svn_version_py())) - - def make_config_py(self,name='__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - """ - self.py_modules.append((self.name,name,generate_config_py)) - - def get_info(self,*names): - """Get resources information. - """ - from system_info import get_info, dict_append - info_dict = {} - for a in names: - dict_append(info_dict,**get_info(a)) - return info_dict - - -def get_cmd(cmdname, _cache={}): - if cmdname not in _cache: - import distutils.core - dist = distutils.core._setup_distribution - if dist is None: - from distutils.errors import DistutilsInternalError - raise DistutilsInternalError( - 'setup distribution instance not initialized') - cmd = dist.get_command_obj(cmdname) - _cache[cmdname] = cmd - return _cache[cmdname] - -def get_numpy_include_dirs(): - # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] - include_dirs = Configuration.numpy_include_dirs[:] - if not include_dirs: - import numpy - include_dirs = [ numpy.get_include() ] - # else running numpy/core/setup.py - return include_dirs - -######################### - -def default_config_dict(name = None, parent_name = None, local_path=None): - """Return a configuration dictionary for usage in - configuration() function defined in file setup_.py. - """ - import warnings - warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ - 'deprecated default_config_dict(%r,%r,%r)' - % (name, parent_name, local_path, - name, parent_name, local_path, - )) - c = Configuration(name, parent_name, local_path) - return c.todict() - - -def dict_append(d, **kws): - for k, v in kws.items(): - if k in d: - ov = d[k] - if isinstance(ov,str): - d[k] = v - else: - d[k].extend(v) - else: - d[k] = v - -def appendpath(prefix, path): - if os.path.sep != '/': - prefix = prefix.replace('/', os.path.sep) - path = path.replace('/', os.path.sep) - drive = '' - if os.path.isabs(path): - drive = os.path.splitdrive(prefix)[0] - absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] - pathdrive, path = os.path.splitdrive(path) - d = os.path.commonprefix([absprefix, path]) - if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ - or os.path.join(path[:len(d)], path[len(d):]) != path: - # Handle invalid paths - d = os.path.dirname(d) - subpath = path[len(d):] - if os.path.isabs(subpath): - subpath = subpath[1:] - else: - subpath = path - return os.path.normpath(njoin(drive + prefix, subpath)) - -def generate_config_py(target): - """Generate config.py file containing system_info information - used during building the package. - - Usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from numpy.distutils.system_info import system_info - from distutils.dir_util import mkpath - mkpath(os.path.dirname(target)) - f = open(target, 'w') - f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(r''' -def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - -def show(): - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print name + ":" - if not info_dict: - print " NOT AVAILABLE" - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print " %s = %s" % (k,v) - print - ''') - - f.close() - return target - -if sys.version[:3] >= '2.5': - def get_build_architecture(): - from distutils.msvccompiler import get_build_architecture - return get_build_architecture() -else: - #copied from python 2.5.1 distutils/msvccompiler.py - def get_build_architecture(): - """Return the processor architecture. - - Possible results are "Intel", "Itanium", or "AMD64". - """ - prefix = " bit (" - i = sys.version.find(prefix) - if i == -1: - return "Intel" - j = sys.version.find(")", i) - return sys.version[i+len(prefix):j] diff --git a/numpy/distutils/setup.py b/numpy/distutils/setup.py deleted file mode 100644 index b16225f41..000000000 --- a/numpy/distutils/setup.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('distutils',parent_package,top_path) - config.add_subpackage('command') - config.add_subpackage('fcompiler') - config.add_data_dir('tests') - config.add_data_files('site.cfg') - config.make_config_py() - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py deleted file mode 100644 index ea9ba1ca3..000000000 --- a/numpy/distutils/system_info.py +++ /dev/null @@ -1,1947 +0,0 @@ -#!/bin/env python -""" -This file defines a set of system_info classes for getting -information about various resources (libraries, library directories, -include directories, etc.) in the system. Currently, the following -classes are available: - - atlas_info - atlas_threads_info - atlas_blas_info - atlas_blas_threads_info - lapack_atlas_info - blas_info - lapack_info - blas_opt_info # usage recommended - lapack_opt_info # usage recommended - fftw_info,dfftw_info,sfftw_info - fftw_threads_info,dfftw_threads_info,sfftw_threads_info - djbfft_info - x11_info - lapack_src_info - blas_src_info - numpy_info - numarray_info - numpy_info - boost_python_info - agg2_info - wx_info - gdk_pixbuf_xlib_2_info - gdk_pixbuf_2_info - gdk_x11_2_info - gtkp_x11_2_info - gtkp_2_info - xft_info - freetype2_info - umfpack_info - -Usage: - info_dict = get_info() - where is a string 'atlas','x11','fftw','lapack','blas', - 'lapack_src', 'blas_src', etc. For a complete list of allowed names, - see the definition of get_info() function below. - - Returned info_dict is a dictionary which is compatible with - distutils.setup keyword arguments. If info_dict == {}, then the - asked resource is not available (system_info could not find it). - - Several *_info classes specify an environment variable to specify - the locations of software. When setting the corresponding environment - variable to 'None' then the software will be ignored, even when it - is available in system. - -Global parameters: - system_info.search_static_first - search static libraries (.a) - in precedence to shared ones (.so, .sl) if enabled. - system_info.verbosity - output the results to stdout if enabled. - -The file 'site.cfg' is looked for in - -1) Directory of main setup.py file being run. -2) Home directory of user running the setup.py file as ~/.numpy-site.cfg -3) System wide directory (location of this file...) - -The first one found is used to get system configuration options The -format is that used by ConfigParser (i.e., Windows .INI style). The -section DEFAULT has options that are the default for each section. The -available sections are fftw, atlas, and x11. Appropiate defaults are -used if nothing is specified. - -The order of finding the locations of resources is the following: - 1. environment variable - 2. section in site.cfg - 3. DEFAULT section in site.cfg -Only the first complete match is returned. - -Example: ----------- -[DEFAULT] -library_dirs = /usr/lib:/usr/local/lib:/opt/lib -include_dirs = /usr/include:/usr/local/include:/opt/include -src_dirs = /usr/local/src:/opt/src -# search static libraries (.a) in preference to shared ones (.so) -search_static_first = 0 - -[fftw] -fftw_libs = rfftw, fftw -fftw_opt_libs = rfftw_threaded, fftw_threaded -# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs - -[atlas] -library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas -# for overriding the names of the atlas libraries -atlas_libs = lapack, f77blas, cblas, atlas - -[x11] -library_dirs = /usr/X11R6/lib -include_dirs = /usr/X11R6/include ----------- - -Authors: - Pearu Peterson , February 2002 - David M. Cooke , April 2002 - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -""" - -import sys -import os -import re -import copy -import warnings -from glob import glob -import ConfigParser - -from distutils.errors import DistutilsError -from distutils.dist import Distribution -import distutils.sysconfig -from distutils import log - -from numpy.distutils.exec_command import \ - find_executable, exec_command, get_pythonexe -from numpy.distutils.misc_util import is_sequence, is_string -from numpy.distutils.command.config import config as cmd_config - -if sys.platform == 'win32': - default_lib_dirs = ['C:\\', - os.path.join(distutils.sysconfig.EXEC_PREFIX, - 'libs')] - default_include_dirs = [] - default_src_dirs = ['.'] - default_x11_lib_dirs = [] - default_x11_include_dirs = [] -else: - default_lib_dirs = ['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'] - default_include_dirs = ['/usr/local/include', - '/opt/include', '/usr/include', - '/opt/local/include', '/sw/include'] - default_src_dirs = ['.','/usr/local/src', '/opt/src','/sw/src'] - - try: - platform = os.uname() - bit64 = platform[-1].endswith('64') - except: - bit64 = False - - if bit64: - default_x11_lib_dirs = ['/usr/lib64'] - else: - default_x11_lib_dirs = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - - default_x11_include_dirs = ['/usr/X11R6/include','/usr/X11/include', - '/usr/include'] - -if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: - default_lib_dirs.insert(0,os.path.join(sys.prefix, 'lib')) - default_include_dirs.append(os.path.join(sys.prefix, 'include')) - default_src_dirs.append(os.path.join(sys.prefix, 'src')) - -default_lib_dirs = filter(os.path.isdir, default_lib_dirs) -default_include_dirs = filter(os.path.isdir, default_include_dirs) -default_src_dirs = filter(os.path.isdir, default_src_dirs) - -so_ext = distutils.sysconfig.get_config_vars('SO')[0] or '' - -def get_standard_file(fname): - """Returns a list of files named 'fname' from - 1) System-wide directory (directory-location of this module) - 2) Users HOME directory (os.environ['HOME']) - 3) Local directory - """ - # System-wide file - filenames = [] - try: - f = __file__ - except NameError: - f = sys.argv[0] - else: - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) - - # Home directory - # And look for the user config file - try: - f = os.environ['HOME'] - except KeyError: - pass - else: - user_file = os.path.join(f, fname) - if os.path.isfile(user_file): - filenames.append(user_file) - - # Local file - if os.path.isfile(fname): - filenames.append(os.path.abspath(fname)) - - return filenames - -def get_info(name,notfound_action=0): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'atlas':atlas_info, # use lapack_opt or blas_opt instead - 'atlas_threads':atlas_threads_info, # ditto - 'atlas_blas':atlas_blas_info, - 'atlas_blas_threads':atlas_blas_threads_info, - 'lapack_atlas':lapack_atlas_info, # use lapack_opt instead - 'lapack_atlas_threads':lapack_atlas_threads_info, # ditto - 'mkl':mkl_info, - 'lapack_mkl':lapack_mkl_info, # use lapack_opt instead - 'blas_mkl':blas_mkl_info, # use blas_opt instead - 'x11':x11_info, - 'fft_opt':fft_opt_info, - 'fftw':fftw_info, - 'fftw2':fftw2_info, - 'fftw3':fftw3_info, - 'dfftw':dfftw_info, - 'sfftw':sfftw_info, - 'fftw_threads':fftw_threads_info, - 'dfftw_threads':dfftw_threads_info, - 'sfftw_threads':sfftw_threads_info, - 'djbfft':djbfft_info, - 'blas':blas_info, # use blas_opt instead - 'lapack':lapack_info, # use lapack_opt instead - 'lapack_src':lapack_src_info, - 'blas_src':blas_src_info, - 'numpy':numpy_info, - 'f2py':f2py_info, - 'Numeric':Numeric_info, - 'numeric':Numeric_info, - 'numarray':numarray_info, - 'numerix':numerix_info, - 'lapack_opt':lapack_opt_info, - 'blas_opt':blas_opt_info, - 'boost_python':boost_python_info, - 'agg2':agg2_info, - 'wx':wx_info, - 'gdk_pixbuf_xlib_2':gdk_pixbuf_xlib_2_info, - 'gdk-pixbuf-xlib-2.0':gdk_pixbuf_xlib_2_info, - 'gdk_pixbuf_2':gdk_pixbuf_2_info, - 'gdk-pixbuf-2.0':gdk_pixbuf_2_info, - 'gdk':gdk_info, - 'gdk_2':gdk_2_info, - 'gdk-2.0':gdk_2_info, - 'gdk_x11_2':gdk_x11_2_info, - 'gdk-x11-2.0':gdk_x11_2_info, - 'gtkp_x11_2':gtkp_x11_2_info, - 'gtk+-x11-2.0':gtkp_x11_2_info, - 'gtkp_2':gtkp_2_info, - 'gtk+-2.0':gtkp_2_info, - 'xft':xft_info, - 'freetype2':freetype2_info, - 'umfpack':umfpack_info, - 'amd':amd_info, - }.get(name.lower(),system_info) - return cl().get_info(notfound_action) - -class NotFoundError(DistutilsError): - """Some third-party program or library is not found.""" - -class AtlasNotFoundError(NotFoundError): - """ - Atlas (http://math-atlas.sourceforge.net/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [atlas]) or by setting - the ATLAS environment variable.""" - -class LapackNotFoundError(NotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [lapack]) or by setting - the LAPACK environment variable.""" - -class LapackSrcNotFoundError(LapackNotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [lapack_src]) or by setting - the LAPACK_SRC environment variable.""" - -class BlasNotFoundError(NotFoundError): - """ - Blas (http://www.netlib.org/blas/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [blas]) or by setting - the BLAS environment variable.""" - -class BlasSrcNotFoundError(BlasNotFoundError): - """ - Blas (http://www.netlib.org/blas/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [blas_src]) or by setting - the BLAS_SRC environment variable.""" - -class FFTWNotFoundError(NotFoundError): - """ - FFTW (http://www.fftw.org/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [fftw]) or by setting - the FFTW environment variable.""" - -class DJBFFTNotFoundError(NotFoundError): - """ - DJBFFT (http://cr.yp.to/djbfft.html) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [djbfft]) or by setting - the DJBFFT environment variable.""" - -class NumericNotFoundError(NotFoundError): - """ - Numeric (http://www.numpy.org/) module not found. - Get it from above location, install it, and retry setup.py.""" - -class X11NotFoundError(NotFoundError): - """X11 libraries not found.""" - -class UmfpackNotFoundError(NotFoundError): - """ - UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/) - not found. Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [umfpack]) or by setting - the UMFPACK environment variable.""" - -class system_info: - - """ get_info() is the only public method. Don't use others. - """ - section = 'DEFAULT' - dir_env_var = None - search_static_first = 0 # XXX: disabled by default, may disappear in - # future unless it is proved to be useful. - verbosity = 1 - saved_results = {} - - notfounderror = NotFoundError - - def __init__ (self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - verbosity = 1, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {} - defaults['libraries'] = '' - defaults['library_dirs'] = os.pathsep.join(default_lib_dirs) - defaults['include_dirs'] = os.pathsep.join(default_include_dirs) - defaults['src_dirs'] = os.pathsep.join(default_src_dirs) - defaults['search_static_first'] = str(self.search_static_first) - self.cp = ConfigParser.ConfigParser(defaults) - self.files = [] - self.files.extend(get_standard_file('.numpy-site.cfg')) - self.files.extend(get_standard_file('site.cfg')) - self.parse_config_files() - self.search_static_first = self.cp.getboolean(self.section, - 'search_static_first') - assert isinstance(self.search_static_first, int) - - def parse_config_files(self): - self.cp.read(self.files) - if not self.cp.has_section(self.section): - self.cp.add_section(self.section) - - def calc_libraries_info(self): - libs = self.get_libraries() - dirs = self.get_lib_dirs() - info = {} - for lib in libs: - i = None - for d in dirs: - i = self.check_libs(d,[lib]) - if i is not None: - break - if i is not None: - dict_append(info,**i) - else: - log.info('Library %s was not found. Ignoring' % (lib)) - return info - - def set_info(self,**info): - if info: - lib_info = self.calc_libraries_info() - dict_append(info,**lib_info) - self.saved_results[self.__class__.__name__] = info - - def has_info(self): - return self.__class__.__name__ in self.saved_results - - def get_info(self,notfound_action=0): - """ Return a dictonary with items that are compatible - with numpy.distutils.setup keyword arguments. - """ - flag = 0 - if not self.has_info(): - flag = 1 - log.info(self.__class__.__name__ + ':') - if hasattr(self, 'calc_info'): - self.calc_info() - if notfound_action: - if not self.has_info(): - if notfound_action==1: - warnings.warn(self.notfounderror.__doc__) - elif notfound_action==2: - raise self.notfounderror,self.notfounderror.__doc__ - else: - raise ValueError(repr(notfound_action)) - - if not self.has_info(): - log.info(' NOT AVAILABLE') - self.set_info() - else: - log.info(' FOUND:') - - res = self.saved_results.get(self.__class__.__name__) - if self.verbosity>0 and flag: - for k,v in res.items(): - v = str(v) - if k in ['sources','libraries'] and len(v)>270: - v = v[:120]+'...\n...\n...'+v[-120:] - log.info(' %s = %s', k, v) - log.info('') - - return copy.deepcopy(res) - - def get_paths(self, section, key): - dirs = self.cp.get(section, key).split(os.pathsep) - env_var = self.dir_env_var - if env_var: - if is_sequence(env_var): - e0 = env_var[-1] - for e in env_var: - if e in os.environ: - e0 = e - break - if not env_var[0]==e0: - log.info('Setting %s=%s' % (env_var[0],e0)) - env_var = e0 - if env_var and env_var in os.environ: - d = os.environ[env_var] - if d=='None': - log.info('Disabled %s: %s',self.__class__.__name__,'(%s is None)' \ - % (env_var,)) - return [] - if os.path.isfile(d): - dirs = [os.path.dirname(d)] + dirs - l = getattr(self,'_lib_names',[]) - if len(l)==1: - b = os.path.basename(d) - b = os.path.splitext(b)[0] - if b[:3]=='lib': - log.info('Replacing _lib_names[0]==%r with %r' \ - % (self._lib_names[0], b[3:])) - self._lib_names[0] = b[3:] - else: - ds = d.split(os.pathsep) - ds2 = [] - for d in ds: - if os.path.isdir(d): - ds2.append(d) - for dd in ['include','lib']: - d1 = os.path.join(d,dd) - if os.path.isdir(d1): - ds2.append(d1) - dirs = ds2 + dirs - default_dirs = self.cp.get('DEFAULT', key).split(os.pathsep) - dirs.extend(default_dirs) - ret = [] - for d in dirs: - if os.path.isdir(d) and d not in ret: - ret.append(d) - log.debug('( %s = %s )', key, ':'.join(ret)) - return ret - - def get_lib_dirs(self, key='library_dirs'): - return self.get_paths(self.section, key) - - def get_include_dirs(self, key='include_dirs'): - return self.get_paths(self.section, key) - - def get_src_dirs(self, key='src_dirs'): - return self.get_paths(self.section, key) - - def get_libs(self, key, default): - try: - libs = self.cp.get(self.section, key) - except ConfigParser.NoOptionError: - if not default: - return [] - if is_string(default): - return [default] - return default - return [b for b in [a.strip() for a in libs.split(',')] if b] - - def get_libraries(self, key='libraries'): - return self.get_libs(key,'') - - def library_extensions(self): - static_exts = ['.a'] - if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC - if self.search_static_first: - exts = static_exts + [so_ext] - else: - exts = [so_ext] + static_exts - if sys.platform == 'cygwin': - exts.append('.dll.a') - if sys.platform == 'darwin': - exts.append('.dylib') - return exts - - def check_libs(self,lib_dir,libs,opt_libs =[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks for all libraries as shared libraries first, then - static (or vice versa if self.search_static_first is True). - """ - exts = self.library_extensions() - info = None - for ext in exts: - info = self._check_libs(lib_dir,libs,opt_libs,[ext]) - if info is not None: - break - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), lib_dir) - return info - - def check_libs2(self, lib_dir, libs, opt_libs =[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks each library for shared or static. - """ - exts = self.library_extensions() - info = self._check_libs(lib_dir,libs,opt_libs,exts) - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), lib_dir) - return info - - def _lib_list(self, lib_dir, libs, exts): - assert is_string(lib_dir) - liblist = [] - # under windows first try without 'lib' prefix - if sys.platform == 'win32': - lib_prefixes = ['', 'lib'] - else: - lib_prefixes = ['lib'] - # for each library name, see if we can find a file for it. - for l in libs: - for ext in exts: - for prefix in lib_prefixes: - p = self.combine_paths(lib_dir, prefix+l+ext) - if p: - break - if p: - assert len(p)==1 - # ??? splitext on p[0] would do this for cygwin - # doesn't seem correct - if ext == '.dll.a': - l += '.dll' - liblist.append(l) - break - return liblist - - def _check_libs(self, lib_dir, libs, opt_libs, exts): - found_libs = self._lib_list(lib_dir, libs, exts) - if len(found_libs) == len(libs): - info = {'libraries' : found_libs, 'library_dirs' : [lib_dir]} - opt_found_libs = self._lib_list(lib_dir, opt_libs, exts) - if len(opt_found_libs) == len(opt_libs): - info['libraries'].extend(opt_found_libs) - return info - else: - return None - - def combine_paths(self,*args): - """Return a list of existing paths composed by all combinations - of items from the arguments. - """ - return combine_paths(*args,**{'verbosity':self.verbosity}) - - -class fft_opt_info(system_info): - - def calc_info(self): - info = {} - fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') - djbfft_info = get_info('djbfft') - if fftw_info: - dict_append(info,**fftw_info) - if djbfft_info: - dict_append(info,**djbfft_info) - self.set_info(**info) - return - - -class fftw_info(system_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [ { 'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H',None)]}, - { 'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h','rfftw.h'], - 'macros':[('SCIPY_FFTW_H',None)]}] - - def __init__(self): - system_info.__init__(self) - - def calc_ver_info(self,ver_param): - """Returns True on successful version detection, else False""" - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - incl_dir = None - libs = self.get_libs(self.section+'_libs', ver_param['libs']) - info = None - for d in lib_dirs: - r = self.check_libs(d,libs) - if r is not None: - info = r - break - if info is not None: - flag = 0 - for d in incl_dirs: - if len(self.combine_paths(d,ver_param['includes']))==len(ver_param['includes']): - dict_append(info,include_dirs=[d]) - flag = 1 - incl_dirs = [d] - incl_dir = d - break - if flag: - dict_append(info,define_macros=ver_param['macros']) - else: - info = None - if info is not None: - self.set_info(**info) - return True - else: - log.info(' %s not found' % (ver_param['name'])) - return False - - def calc_info(self): - for i in self.ver_info: - if self.calc_ver_info(i): - break - -class fftw2_info(fftw_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [ { 'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h','rfftw.h'], - 'macros':[('SCIPY_FFTW_H',None)]} - ] - -class fftw3_info(fftw_info): - #variables to override - section = 'fftw3' - dir_env_var = 'FFTW3' - notfounderror = FFTWNotFoundError - ver_info = [ { 'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H',None)]}, - ] - -class dfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [ { 'name':'dfftw', - 'libs':['drfftw','dfftw'], - 'includes':['dfftw.h','drfftw.h'], - 'macros':[('SCIPY_DFFTW_H',None)]} ] - -class sfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [ { 'name':'sfftw', - 'libs':['srfftw','sfftw'], - 'includes':['sfftw.h','srfftw.h'], - 'macros':[('SCIPY_SFFTW_H',None)]} ] - -class fftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [ { 'name':'fftw threads', - 'libs':['rfftw_threads','fftw_threads'], - 'includes':['fftw_threads.h','rfftw_threads.h'], - 'macros':[('SCIPY_FFTW_THREADS_H',None)]} ] - -class dfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [ { 'name':'dfftw threads', - 'libs':['drfftw_threads','dfftw_threads'], - 'includes':['dfftw_threads.h','drfftw_threads.h'], - 'macros':[('SCIPY_DFFTW_THREADS_H',None)]} ] - -class sfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [ { 'name':'sfftw threads', - 'libs':['srfftw_threads','sfftw_threads'], - 'includes':['sfftw_threads.h','srfftw_threads.h'], - 'macros':[('SCIPY_SFFTW_THREADS_H',None)]} ] - -class djbfft_info(system_info): - section = 'djbfft' - dir_env_var = 'DJBFFT' - notfounderror = DJBFFTNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d,['djbfft'])+[d]) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - info = None - for d in lib_dirs: - p = self.combine_paths (d,['djbfft.a']) - if p: - info = {'extra_objects':p} - break - p = self.combine_paths (d,['libdjbfft.a','libdjbfft'+so_ext]) - if p: - info = {'libraries':['djbfft'],'library_dirs':[d]} - break - if info is None: - return - for d in incl_dirs: - if len(self.combine_paths(d,['fftc8.h','fftfreq.h']))==2: - dict_append(info,include_dirs=[d], - define_macros=[('SCIPY_DJBFFT_H',None)]) - self.set_info(**info) - return - return - -class mkl_info(system_info): - section = 'mkl' - dir_env_var = 'MKL' - _lib_mkl = ['mkl','vml','guide'] - - def get_mkl_rootdir(self): - mklroot = os.environ.get('MKLROOT',None) - if mklroot is not None: - return mklroot - paths = os.environ.get('LD_LIBRARY_PATH','').split(os.pathsep) - ld_so_conf = '/etc/ld.so.conf' - if os.path.isfile(ld_so_conf): - for d in open(ld_so_conf,'r').readlines(): - d = d.strip() - if d: paths.append(d) - intel_mkl_dirs = [] - for path in paths: - path_atoms = path.split(os.sep) - for m in path_atoms: - if m.startswith('mkl'): - d = os.sep.join(path_atoms[:path_atoms.index(m)+2]) - intel_mkl_dirs.append(d) - break - for d in paths: - dirs = glob(os.path.join(d,'mkl','*')) + glob(os.path.join(d,'mkl*')) - for d in dirs: - if os.path.isdir(os.path.join(d,'lib')): - return d - return None - - def __init__(self): - mklroot = self.get_mkl_rootdir() - if mklroot is None: - system_info.__init__(self) - else: - from cpuinfo import cpu - l = 'mkl' # use shared library - if cpu.is_Itanium(): - plt = '64' - #l = 'mkl_ipf' - elif cpu.is_Xeon(): - plt = 'em64t' - #l = 'mkl_em64t' - else: - plt = '32' - #l = 'mkl_ia32' - if l not in self._lib_mkl: - self._lib_mkl.insert(0,l) - system_info.__init__(self, - default_lib_dirs=[os.path.join(mklroot,'lib',plt)], - default_include_dirs=[os.path.join(mklroot,'include')]) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - mkl_libs = self.get_libs('mkl_libs',self._lib_mkl) - mkl = None - for d in lib_dirs: - mkl = self.check_libs2(d,mkl_libs) - if mkl is not None: - break - if mkl is None: - return - info = {} - dict_append(info,**mkl) - dict_append(info, - define_macros=[('SCIPY_MKL_H',None)], - include_dirs = incl_dirs) - if sys.platform == 'win32': - pass # win32 has no pthread library - else: - dict_append(info, libraries=['pthread']) - self.set_info(**info) - -class lapack_mkl_info(mkl_info): - - def calc_info(self): - mkl = get_info('mkl') - if not mkl: - return - if sys.platform == 'win32': - lapack_libs = self.get_libs('lapack_libs',['mkl_lapack']) - else: - lapack_libs = self.get_libs('lapack_libs',['mkl_lapack32','mkl_lapack64']) - - info = {'libraries': lapack_libs} - dict_append(info,**mkl) - self.set_info(**info) - -class blas_mkl_info(mkl_info): - pass - -class atlas_info(system_info): - section = 'atlas' - dir_env_var = 'ATLAS' - _lib_names = ['f77blas','cblas'] - if sys.platform[:7]=='freebsd': - _lib_atlas = ['atlas_r'] - _lib_lapack = ['alapack_r'] - else: - _lib_atlas = ['atlas'] - _lib_lapack = ['lapack'] - - notfounderror = AtlasNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d,['atlas*','ATLAS*', - 'sse','3dnow','sse2'])+[d]) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - atlas_libs = self.get_libs('atlas_libs', - self._lib_names + self._lib_atlas) - lapack_libs = self.get_libs('lapack_libs',self._lib_lapack) - atlas = None - lapack = None - atlas_1 = None - for d in lib_dirs: - atlas = self.check_libs2(d,atlas_libs,[]) - lapack_atlas = self.check_libs2(d,['lapack_atlas'],[]) - if atlas is not None: - lib_dirs2 = [d] + self.combine_paths(d,['atlas*','ATLAS*']) - for d2 in lib_dirs2: - lapack = self.check_libs2(d2,lapack_libs,[]) - if lapack is not None: - break - else: - lapack = None - if lapack is not None: - break - if atlas: - atlas_1 = atlas - log.info(self.__class__) - if atlas is None: - atlas = atlas_1 - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs+include_dirs,'cblas.h') or [None])[0] - if h: - h = os.path.dirname(h) - dict_append(info,include_dirs=[h]) - info['language'] = 'c' - if lapack is not None: - dict_append(info,**lapack) - dict_append(info,**atlas) - elif 'lapack_atlas' in atlas['libraries']: - dict_append(info,**atlas) - dict_append(info,define_macros=[('ATLAS_WITH_LAPACK_ATLAS',None)]) - self.set_info(**info) - return - else: - dict_append(info,**atlas) - dict_append(info,define_macros=[('ATLAS_WITHOUT_LAPACK',None)]) - message = """ -********************************************************************* - Could not find lapack library within the ATLAS installation. -********************************************************************* -""" - warnings.warn(message) - self.set_info(**info) - return - - # Check if lapack library is complete, only warn if it is not. - lapack_dir = lapack['library_dirs'][0] - lapack_name = lapack['libraries'][0] - lapack_lib = None - lib_prefixes = ['lib'] - if sys.platform == 'win32': - lib_prefixes.append('') - for e in self.library_extensions(): - for prefix in lib_prefixes: - fn = os.path.join(lapack_dir,prefix+lapack_name+e) - if os.path.exists(fn): - lapack_lib = fn - break - if lapack_lib: - break - if lapack_lib is not None: - sz = os.stat(lapack_lib)[6] - if sz <= 4000*1024: - message = """ -********************************************************************* - Lapack library (from ATLAS) is probably incomplete: - size of %s is %sk (expected >4000k) - - Follow the instructions in the KNOWN PROBLEMS section of the file - numpy/INSTALL.txt. -********************************************************************* -""" % (lapack_lib,sz/1024) - warnings.warn(message) - else: - info['language'] = 'f77' - - self.set_info(**info) - -class atlas_blas_info(atlas_info): - _lib_names = ['f77blas','cblas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - atlas_libs = self.get_libs('atlas_libs', - self._lib_names + self._lib_atlas) - atlas = None - for d in lib_dirs: - atlas = self.check_libs2(d,atlas_libs,[]) - if atlas is not None: - break - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs+include_dirs,'cblas.h') or [None])[0] - if h: - h = os.path.dirname(h) - dict_append(info,include_dirs=[h]) - info['language'] = 'c' - - dict_append(info,**atlas) - - self.set_info(**info) - return - - -class atlas_threads_info(atlas_info): - dir_env_var = ['PTATLAS','ATLAS'] - _lib_names = ['ptf77blas','ptcblas'] - -class atlas_blas_threads_info(atlas_blas_info): - dir_env_var = ['PTATLAS','ATLAS'] - _lib_names = ['ptf77blas','ptcblas'] - -class lapack_atlas_info(atlas_info): - _lib_names = ['lapack_atlas'] + atlas_info._lib_names - -class lapack_atlas_threads_info(atlas_threads_info): - _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names - -class lapack_info(system_info): - section = 'lapack' - dir_env_var = 'LAPACK' - _lib_names = ['lapack'] - notfounderror = LapackNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - lapack_libs = self.get_libs('lapack_libs', self._lib_names) - for d in lib_dirs: - lapack = self.check_libs(d,lapack_libs,[]) - if lapack is not None: - info = lapack - break - else: - return - info['language'] = 'f77' - self.set_info(**info) - -class lapack_src_info(system_info): - section = 'lapack_src' - dir_env_var = 'LAPACK_SRC' - notfounderror = LapackSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d,['LAPACK*/SRC','SRC'])) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d,'dgesv.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - # The following is extracted from LAPACK-3.0/SRC/Makefile. - # Added missing names from lapack-lite-3.1.1/SRC/Makefile - # while keeping removed names for Lapack-3.0 compatibility. - allaux=''' - ilaenv ieeeck lsame lsamen xerbla - iparmq - ''' # *.f - laux = ''' - bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 - laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 - lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre - larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 - lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 - lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf - stebz stedc steqr sterf - - larra larrc larrd larr larrk larrj larrr laneg laisnan isnan - lazq3 lazq4 - ''' # [s|d]*.f - lasrc = ''' - gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak - gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv - gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 - geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd - gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal - gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd - ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein - hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 - lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb - lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp - laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv - lartv larz larzb larzt laswp lasyf latbs latdf latps latrd - latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv - pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 - potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri - pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs - spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv - sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 - tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs - trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs - tzrqf tzrzf - - lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 - ''' # [s|c|d|z]*.f - sd_lasrc = ''' - laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l - org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr - orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 - ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx - sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd - stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd - sygvx sytd2 sytrd - ''' # [s|d]*.f - cz_lasrc = ''' - bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev - heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv - hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd - hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf - hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 - laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe - laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv - spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq - ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 - unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr - ''' # [c|z]*.f - ####### - sclaux = laux + ' econd ' # s*.f - dzlaux = laux + ' secnd ' # d*.f - slasrc = lasrc + sd_lasrc # s*.f - dlasrc = lasrc + sd_lasrc # d*.f - clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f - zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f - oclasrc = ' icmax1 scsum1 ' # *.f - ozlasrc = ' izmax1 dzsum1 ' # *.f - sources = ['s%s.f'%f for f in (sclaux+slasrc).split()] \ - + ['d%s.f'%f for f in (dzlaux+dlasrc).split()] \ - + ['c%s.f'%f for f in (clasrc).split()] \ - + ['z%s.f'%f for f in (zlasrc).split()] \ - + ['%s.f'%f for f in (allaux+oclasrc+ozlasrc).split()] - sources = [os.path.join(src_dir,f) for f in sources] - # Lapack 3.1: - src_dir2 = os.path.join(src_dir,'..','INSTALL') - sources += [os.path.join(src_dir2,p+'lamch.f') for p in 'sdcz'] - # Should we check here actual existence of source files? - # Yes, the file listing is different between 3.0 and 3.1 - # versions. - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources':sources,'language':'f77'} - self.set_info(**info) - -atlas_version_c_text = r''' -/* This file is generated from numpy/distutils/system_info.py */ -void ATL_buildinfo(void); -int main(void) { - ATL_buildinfo(); - return 0; -} -''' - -_cached_atlas_version = {} -def get_atlas_version(**config): - libraries = config.get('libraries', []) - library_dirs = config.get('library_dirs', []) - key = (tuple(libraries), tuple(library_dirs)) - if key in _cached_atlas_version: - return _cached_atlas_version[key] - c = cmd_config(Distribution()) - atlas_version = None - try: - s, o = c.get_output(atlas_version_c_text, - libraries=libraries, library_dirs=library_dirs) - except: # failed to get version from file -- maybe on Windows - # look at directory name - for o in library_dirs: - m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_',o) - if m: - atlas_version = m.group('version') - if atlas_version is not None: - break - # final choice --- look at ATLAS_VERSION environment - # variable - if atlas_version is None: - atlas_version = os.environ.get('ATLAS_VERSION',None) - return atlas_version or '?.?.?' - - if not s: - m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)',o) - if m: - atlas_version = m.group('version') - if atlas_version is None: - if re.search(r'undefined symbol: ATL_buildinfo',o,re.M): - atlas_version = '3.2.1_pre3.3.6' - else: - log.info('Status: %d', s) - log.info('Output: %s', o) - _cached_atlas_version[key] = atlas_version - return atlas_version - -from distutils.util import get_platform - -class lapack_opt_info(system_info): - - notfounderror = LapackNotFoundError - - def calc_info(self): - - if sys.platform=='darwin' and not os.environ.get('ATLAS',None): - args = [] - link_args = [] - if get_platform()[-4:] == 'i386': - intel = 1 - else: - intel = 0 - if os.path.exists('/System/Library/Frameworks/Accelerate.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - link_args.extend(['-Wl,-framework','-Wl,Accelerate']) - elif os.path.exists('/System/Library/Frameworks/vecLib.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - link_args.extend(['-Wl,-framework','-Wl,vecLib']) - if args: - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO',3)]) - return - - lapack_mkl_info = get_info('lapack_mkl') - if lapack_mkl_info: - self.set_info(**lapack_mkl_info) - return - - atlas_info = get_info('atlas_threads') - if not atlas_info: - atlas_info = get_info('atlas') - #atlas_info = {} ## uncomment for testing - atlas_version = None - need_lapack = 0 - need_blas = 0 - info = {} - if atlas_info: - version_info = atlas_info.copy() - atlas_version = get_atlas_version(**version_info) - if 'define_macros' not in atlas_info: - atlas_info['define_macros'] = [] - if atlas_version is None: - atlas_info['define_macros'].append(('NO_ATLAS_INFO',2)) - else: - atlas_info['define_macros'].append(('ATLAS_INFO', - '"\\"%s\\""' % atlas_version)) - if atlas_version=='3.2.1_pre3.3.6': - atlas_info['define_macros'].append(('NO_ATLAS_INFO',4)) - l = atlas_info.get('define_macros',[]) - if ('ATLAS_WITH_LAPACK_ATLAS',None) in l \ - or ('ATLAS_WITHOUT_LAPACK',None) in l: - need_lapack = 1 - info = atlas_info - else: - warnings.warn(AtlasNotFoundError.__doc__) - need_blas = 1 - need_lapack = 1 - dict_append(info,define_macros=[('NO_ATLAS_INFO',1)]) - - if need_lapack: - lapack_info = get_info('lapack') - #lapack_info = {} ## uncomment for testing - if lapack_info: - dict_append(info,**lapack_info) - else: - warnings.warn(LapackNotFoundError.__doc__) - lapack_src_info = get_info('lapack_src') - if not lapack_src_info: - warnings.warn(LapackSrcNotFoundError.__doc__) - return - dict_append(info,libraries=[('flapack_src',lapack_src_info)]) - - if need_blas: - blas_info = get_info('blas') - #blas_info = {} ## uncomment for testing - if blas_info: - dict_append(info,**blas_info) - else: - warnings.warn(BlasNotFoundError.__doc__) - blas_src_info = get_info('blas_src') - if not blas_src_info: - warnings.warn(BlasSrcNotFoundError.__doc__) - return - dict_append(info,libraries=[('fblas_src',blas_src_info)]) - - self.set_info(**info) - return - - -class blas_opt_info(system_info): - - notfounderror = BlasNotFoundError - - def calc_info(self): - - if sys.platform=='darwin' and not os.environ.get('ATLAS',None): - args = [] - link_args = [] - if get_platform()[-4:] == 'i386': - intel = 1 - else: - intel = 0 - if os.path.exists('/System/Library/Frameworks/Accelerate.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework','-Wl,Accelerate']) - elif os.path.exists('/System/Library/Frameworks/vecLib.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework','-Wl,vecLib']) - if args: - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO',3)]) - return - - blas_mkl_info = get_info('blas_mkl') - if blas_mkl_info: - self.set_info(**blas_mkl_info) - return - - atlas_info = get_info('atlas_blas_threads') - if not atlas_info: - atlas_info = get_info('atlas_blas') - atlas_version = None - need_blas = 0 - info = {} - if atlas_info: - version_info = atlas_info.copy() - atlas_version = get_atlas_version(**version_info) - if 'define_macros' not in atlas_info: - atlas_info['define_macros'] = [] - if atlas_version is None: - atlas_info['define_macros'].append(('NO_ATLAS_INFO',2)) - else: - atlas_info['define_macros'].append(('ATLAS_INFO', - '"\\"%s\\""' % atlas_version)) - info = atlas_info - else: - warnings.warn(AtlasNotFoundError.__doc__) - need_blas = 1 - dict_append(info,define_macros=[('NO_ATLAS_INFO',1)]) - - if need_blas: - blas_info = get_info('blas') - if blas_info: - dict_append(info,**blas_info) - else: - warnings.warn(BlasNotFoundError.__doc__) - blas_src_info = get_info('blas_src') - if not blas_src_info: - warnings.warn(BlasSrcNotFoundError.__doc__) - return - dict_append(info,libraries=[('fblas_src',blas_src_info)]) - - self.set_info(**info) - return - - -class blas_info(system_info): - section = 'blas' - dir_env_var = 'BLAS' - _lib_names = ['blas'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - blas_libs = self.get_libs('blas_libs', self._lib_names) - for d in lib_dirs: - blas = self.check_libs(d,blas_libs,[]) - if blas is not None: - info = blas - break - else: - return - info['language'] = 'f77' # XXX: is it generally true? - self.set_info(**info) - - -class blas_src_info(system_info): - section = 'blas_src' - dir_env_var = 'BLAS_SRC' - notfounderror = BlasSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d,['blas'])) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d,'daxpy.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - blas1 = ''' - caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot - dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 - srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg - dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax - snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap - scabs1 - ''' - blas2 = ''' - cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv - chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv - dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv - sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger - stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc - zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 - ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv - ''' - blas3 = ''' - cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k - dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm - ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm - ''' - sources = [os.path.join(src_dir,f+'.f') \ - for f in (blas1+blas2+blas3).split()] - #XXX: should we check here actual existence of source files? - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources':sources,'language':'f77'} - self.set_info(**info) - -class x11_info(system_info): - section = 'x11' - notfounderror = X11NotFoundError - - def __init__(self): - system_info.__init__(self, - default_lib_dirs=default_x11_lib_dirs, - default_include_dirs=default_x11_include_dirs) - - def calc_info(self): - if sys.platform in ['win32']: - return - lib_dirs = self.get_lib_dirs() - include_dirs = self.get_include_dirs() - x11_libs = self.get_libs('x11_libs', ['X11']) - for lib_dir in lib_dirs: - info = self.check_libs(lib_dir, x11_libs, []) - if info is not None: - break - else: - return - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, 'X11/X.h'): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - self.set_info(**info) - -class _numpy_info(system_info): - section = 'Numeric' - modulename = 'Numeric' - notfounderror = NumericNotFoundError - - def __init__(self): - include_dirs = [] - try: - module = __import__(self.modulename) - prefix = [] - for name in module.__file__.split(os.sep): - if name=='lib': - break - prefix.append(name) - include_dirs.append(distutils.sysconfig.get_python_inc( - prefix=os.sep.join(prefix))) - except ImportError: - pass - py_incl_dir = distutils.sysconfig.get_python_inc() - include_dirs.append(py_incl_dir) - for d in default_include_dirs: - d = os.path.join(d, os.path.basename(py_incl_dir)) - if d not in include_dirs: - include_dirs.append(d) - system_info.__init__(self, - default_lib_dirs=[], - default_include_dirs=include_dirs) - - def calc_info(self): - try: - module = __import__(self.modulename) - except ImportError: - return - info = {} - macros = [] - for v in ['__version__','version']: - vrs = getattr(module,v,None) - if vrs is None: - continue - macros = [(self.modulename.upper()+'_VERSION', - '"\\"%s\\""' % (vrs)), - (self.modulename.upper(),None)] - break -## try: -## macros.append( -## (self.modulename.upper()+'_VERSION_HEX', -## hex(vstr2hex(module.__version__))), -## ) -## except Exception,msg: -## print msg - dict_append(info, define_macros = macros) - include_dirs = self.get_include_dirs() - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, - os.path.join(self.modulename, - 'arrayobject.h')): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - if info: - self.set_info(**info) - return - -class numarray_info(_numpy_info): - section = 'numarray' - modulename = 'numarray' - -class Numeric_info(_numpy_info): - section = 'Numeric' - modulename = 'Numeric' - -class numpy_info(_numpy_info): - section = 'numpy' - modulename = 'numpy' - -class numerix_info(system_info): - section = 'numerix' - def calc_info(self): - which = None, None - if os.getenv("NUMERIX"): - which = os.getenv("NUMERIX"), "environment var" - # If all the above fail, default to numpy. - if which[0] is None: - which = "numpy", "defaulted" - try: - import numpy - which = "numpy", "defaulted" - except ImportError,msg1: - try: - import Numeric - which = "numeric", "defaulted" - except ImportError,msg2: - try: - import numarray - which = "numarray", "defaulted" - except ImportError,msg3: - log.info(msg1) - log.info(msg2) - log.info(msg3) - which = which[0].strip().lower(), which[1] - if which[0] not in ["numeric", "numarray", "numpy"]: - raise ValueError("numerix selector must be either 'Numeric' " - "or 'numarray' or 'numpy' but the value obtained" - " from the %s was '%s'." % (which[1], which[0])) - os.environ['NUMERIX'] = which[0] - self.set_info(**get_info(which[0])) - -class f2py_info(system_info): - def calc_info(self): - try: - import numpy.f2py as f2py - except ImportError: - return - f2py_dir = os.path.join(os.path.dirname(f2py.__file__),'src') - self.set_info(sources = [os.path.join(f2py_dir,'fortranobject.c')], - include_dirs = [f2py_dir]) - return - -class boost_python_info(system_info): - section = 'boost_python' - dir_env_var = 'BOOST' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d,['boost*'])) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d,'libs','python','src','module.cpp')): - src_dir = d - break - if not src_dir: - return - py_incl_dir = distutils.sysconfig.get_python_inc() - srcs_dir = os.path.join(src_dir,'libs','python','src') - bpl_srcs = glob(os.path.join(srcs_dir,'*.cpp')) - bpl_srcs += glob(os.path.join(srcs_dir,'*','*.cpp')) - info = {'libraries':[('boost_python_src',{'include_dirs':[src_dir,py_incl_dir], - 'sources':bpl_srcs})], - 'include_dirs':[src_dir], - } - if info: - self.set_info(**info) - return - -class agg2_info(system_info): - section = 'agg2' - dir_env_var = 'AGG2' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d,['agg2*'])) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d,'src','agg_affine_matrix.cpp')): - src_dir = d - break - if not src_dir: - return - if sys.platform=='win32': - agg2_srcs = glob(os.path.join(src_dir,'src','platform','win32','agg_win32_bmp.cpp')) - else: - agg2_srcs = glob(os.path.join(src_dir,'src','*.cpp')) - agg2_srcs += [os.path.join(src_dir,'src','platform','X11','agg_platform_support.cpp')] - - info = {'libraries':[('agg2_src',{'sources':agg2_srcs, - 'include_dirs':[os.path.join(src_dir,'include')], - })], - 'include_dirs':[os.path.join(src_dir,'include')], - } - if info: - self.set_info(**info) - return - -class _pkg_config_info(system_info): - section = None - config_env_var = 'PKG_CONFIG' - default_config_exe = 'pkg-config' - append_config_exe = '' - version_macro_name = None - release_macro_name = None - version_flag = '--modversion' - cflags_flag = '--cflags' - - def get_config_exe(self): - if self.config_env_var in os.environ: - return os.environ[self.config_env_var] - return self.default_config_exe - def get_config_output(self, config_exe, option): - s,o = exec_command(config_exe+' '+self.append_config_exe+' '+option,use_tee=0) - if not s: - return o - - def calc_info(self): - config_exe = find_executable(self.get_config_exe()) - if not config_exe: - log.warn('File not found: %s. Cannot determine %s info.' \ - % (config_exe, self.section)) - return - info = {} - macros = [] - libraries = [] - library_dirs = [] - include_dirs = [] - extra_link_args = [] - extra_compile_args = [] - version = self.get_config_output(config_exe,self.version_flag) - if version: - macros.append((self.__class__.__name__.split('.')[-1].upper(), - '"\\"%s\\""' % (version))) - if self.version_macro_name: - macros.append((self.version_macro_name+'_%s' % (version.replace('.','_')),None)) - if self.release_macro_name: - release = self.get_config_output(config_exe,'--release') - if release: - macros.append((self.release_macro_name+'_%s' % (release.replace('.','_')),None)) - opts = self.get_config_output(config_exe,'--libs') - if opts: - for opt in opts.split(): - if opt[:2]=='-l': - libraries.append(opt[2:]) - elif opt[:2]=='-L': - library_dirs.append(opt[2:]) - else: - extra_link_args.append(opt) - opts = self.get_config_output(config_exe,self.cflags_flag) - if opts: - for opt in opts.split(): - if opt[:2]=='-I': - include_dirs.append(opt[2:]) - elif opt[:2]=='-D': - if '=' in opt: - n,v = opt[2:].split('=') - macros.append((n,v)) - else: - macros.append((opt[2:],None)) - else: - extra_compile_args.append(opt) - if macros: dict_append(info, define_macros = macros) - if libraries: dict_append(info, libraries = libraries) - if library_dirs: dict_append(info, library_dirs = library_dirs) - if include_dirs: dict_append(info, include_dirs = include_dirs) - if extra_link_args: dict_append(info, extra_link_args = extra_link_args) - if extra_compile_args: dict_append(info, extra_compile_args = extra_compile_args) - if info: - self.set_info(**info) - return - -class wx_info(_pkg_config_info): - section = 'wx' - config_env_var = 'WX_CONFIG' - default_config_exe = 'wx-config' - append_config_exe = '' - version_macro_name = 'WX_VERSION' - release_macro_name = 'WX_RELEASE' - version_flag = '--version' - cflags_flag = '--cxxflags' - -class gdk_pixbuf_xlib_2_info(_pkg_config_info): - section = 'gdk_pixbuf_xlib_2' - append_config_exe = 'gdk-pixbuf-xlib-2.0' - version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' - -class gdk_pixbuf_2_info(_pkg_config_info): - section = 'gdk_pixbuf_2' - append_config_exe = 'gdk-pixbuf-2.0' - version_macro_name = 'GDK_PIXBUF_VERSION' - -class gdk_x11_2_info(_pkg_config_info): - section = 'gdk_x11_2' - append_config_exe = 'gdk-x11-2.0' - version_macro_name = 'GDK_X11_VERSION' - -class gdk_2_info(_pkg_config_info): - section = 'gdk_2' - append_config_exe = 'gdk-2.0' - version_macro_name = 'GDK_VERSION' - -class gdk_info(_pkg_config_info): - section = 'gdk' - append_config_exe = 'gdk' - version_macro_name = 'GDK_VERSION' - -class gtkp_x11_2_info(_pkg_config_info): - section = 'gtkp_x11_2' - append_config_exe = 'gtk+-x11-2.0' - version_macro_name = 'GTK_X11_VERSION' - - -class gtkp_2_info(_pkg_config_info): - section = 'gtkp_2' - append_config_exe = 'gtk+-2.0' - version_macro_name = 'GTK_VERSION' - -class xft_info(_pkg_config_info): - section = 'xft' - append_config_exe = 'xft' - version_macro_name = 'XFT_VERSION' - -class freetype2_info(_pkg_config_info): - section = 'freetype2' - append_config_exe = 'freetype2' - version_macro_name = 'FREETYPE2_VERSION' - -class amd_info(system_info): - section = 'amd' - dir_env_var = 'AMD' - _lib_names = ['amd'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - amd_libs = self.get_libs('amd_libs', self._lib_names) - for d in lib_dirs: - amd = self.check_libs(d,amd_libs,[]) - if amd is not None: - info = amd - break - else: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d,'amd.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_AMD_H',None)], - swig_opts = ['-I' + inc_dir]) - - self.set_info(**info) - return - -class umfpack_info(system_info): - section = 'umfpack' - dir_env_var = 'UMFPACK' - notfounderror = UmfpackNotFoundError - _lib_names = ['umfpack'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - umfpack_libs = self.get_libs('umfpack_libs', self._lib_names) - for d in lib_dirs: - umf = self.check_libs(d,umfpack_libs,[]) - if umf is not None: - info = umf - break - else: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d,['','umfpack'],'umfpack.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_UMFPACK_H',None)], - swig_opts = ['-I' + inc_dir]) - - amd = get_info('amd') - dict_append(info, **get_info('amd')) - - self.set_info(**info) - return - -## def vstr2hex(version): -## bits = [] -## n = [24,16,8,4,0] -## r = 0 -## for s in version.split('.'): -## r |= int(s) << n[0] -## del n[0] -## return r - -#-------------------------------------------------------------------- - -def combine_paths(*args,**kws): - """ Return a list of existing paths composed by all combinations of - items from arguments. - """ - r = [] - for a in args: - if not a: continue - if is_string(a): - a = [a] - r.append(a) - args = r - if not args: return [] - if len(args)==1: - result = reduce(lambda a,b:a+b,map(glob,args[0]),[]) - elif len (args)==2: - result = [] - for a0 in args[0]: - for a1 in args[1]: - result.extend(glob(os.path.join(a0,a1))) - else: - result = combine_paths(*(combine_paths(args[0],args[1])+args[2:])) - verbosity = kws.get('verbosity',1) - log.debug('(paths: %s)', ','.join(result)) - return result - -language_map = {'c':0,'c++':1,'f77':2,'f90':3} -inv_language_map = {0:'c',1:'c++',2:'f77',3:'f90'} -def dict_append(d,**kws): - languages = [] - for k,v in kws.items(): - if k=='language': - languages.append(v) - continue - if k in d: - if k in ['library_dirs','include_dirs','define_macros']: - [d[k].append(vv) for vv in v if vv not in d[k]] - else: - d[k].extend(v) - else: - d[k] = v - if languages: - l = inv_language_map[max([language_map.get(l,0) for l in languages])] - d['language'] = l - return - -def parseCmdLine(argv=(None,)): - import optparse - parser = optparse.OptionParser("usage: %prog [-v] [info objs]") - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', - default=False, - help='be verbose and print more messages') - - opts, args = parser.parse_args(args=argv[1:]) - return opts, args - -def show_all(argv=None): - import inspect - if argv is None: - argv = sys.argv - opts, args = parseCmdLine(argv) - if opts.verbose: - log.set_threshold(log.DEBUG) - else: - log.set_threshold(log.INFO) - show_only = [] - for n in args: - if n[-5:] != '_info': - n = n + '_info' - show_only.append(n) - show_all = not show_only - _gdict_ = globals().copy() - for name, c in _gdict_.iteritems(): - if not inspect.isclass(c): - continue - if not issubclass(c, system_info) or c is system_info: - continue - if not show_all: - if name not in show_only: - continue - del show_only[show_only.index(name)] - conf = c() - conf.verbosity = 2 - r = conf.get_info() - if show_only: - log.info('Info classes not defined: %s',','.join(show_only)) - -if __name__ == "__main__": - show_all() diff --git a/numpy/distutils/tests/f2py_ext/__init__.py b/numpy/distutils/tests/f2py_ext/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/numpy/distutils/tests/f2py_ext/setup.py b/numpy/distutils/tests/f2py_ext/setup.py deleted file mode 100644 index e3dfddb74..000000000 --- a/numpy/distutils/tests/f2py_ext/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_ext',parent_package,top_path) - config.add_extension('fib2', ['src/fib2.pyf','src/fib1.f']) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/f2py_ext/src/fib1.f b/numpy/distutils/tests/f2py_ext/src/fib1.f deleted file mode 100644 index cfbb1eea0..000000000 --- a/numpy/distutils/tests/f2py_ext/src/fib1.f +++ /dev/null @@ -1,18 +0,0 @@ -C FILE: FIB1.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB1.F diff --git a/numpy/distutils/tests/f2py_ext/src/fib2.pyf b/numpy/distutils/tests/f2py_ext/src/fib2.pyf deleted file mode 100644 index 90a8cf00c..000000000 --- a/numpy/distutils/tests/f2py_ext/src/fib2.pyf +++ /dev/null @@ -1,9 +0,0 @@ -! -*- f90 -*- -python module fib2 - interface - subroutine fib(a,n) - real*8 dimension(n),intent(out),depend(n) :: a - integer intent(in) :: n - end subroutine fib - end interface -end python module fib2 diff --git a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py b/numpy/distutils/tests/f2py_ext/tests/test_fib2.py deleted file mode 100644 index 9a52ab17a..000000000 --- a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py +++ /dev/null @@ -1,13 +0,0 @@ -import sys -from numpy.testing import * -set_package_path() -from f2py_ext import fib2 -del sys.path[0] - -class TestFib2(NumpyTestCase): - - def check_fib(self): - assert_array_equal(fib2.fib(6),[0,1,1,2,3,5]) - -if __name__ == "__main__": - NumpyTest(fib2).run() diff --git a/numpy/distutils/tests/f2py_f90_ext/__init__.py b/numpy/distutils/tests/f2py_f90_ext/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/numpy/distutils/tests/f2py_f90_ext/include/body.f90 b/numpy/distutils/tests/f2py_f90_ext/include/body.f90 deleted file mode 100644 index 90b44e29d..000000000 --- a/numpy/distutils/tests/f2py_f90_ext/include/body.f90 +++ /dev/null @@ -1,5 +0,0 @@ - subroutine bar13(a) - !f2py intent(out) a - integer a - a = 13 - end subroutine bar13 diff --git a/numpy/distutils/tests/f2py_f90_ext/setup.py b/numpy/distutils/tests/f2py_f90_ext/setup.py deleted file mode 100644 index ee56cc3a6..000000000 --- a/numpy/distutils/tests/f2py_f90_ext/setup.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_f90_ext',parent_package,top_path) - config.add_extension('foo', - ['src/foo_free.f90'], - include_dirs=['include'], - f2py_options=['--include_paths', - config.paths('include')[0]] - ) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 b/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 deleted file mode 100644 index c7713be59..000000000 --- a/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 +++ /dev/null @@ -1,6 +0,0 @@ -module foo_free -contains - -include "body.f90" - -end module foo_free diff --git a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py b/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py deleted file mode 100644 index 3d48f6ca9..000000000 --- a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py +++ /dev/null @@ -1,13 +0,0 @@ -import sys -from numpy.testing import * -set_package_path() -from f2py_f90_ext import foo -del sys.path[0] - -class TestFoo(NumpyTestCase): - - def check_foo_free(self): - assert_equal(foo.foo_free.bar13(),13) - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/distutils/tests/gen_ext/__init__.py b/numpy/distutils/tests/gen_ext/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/numpy/distutils/tests/gen_ext/setup.py b/numpy/distutils/tests/gen_ext/setup.py deleted file mode 100644 index bf029062c..000000000 --- a/numpy/distutils/tests/gen_ext/setup.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python - -fib3_f = ''' -C FILE: FIB3.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) -Cf2py intent(in) n -Cf2py intent(out) a -Cf2py depend(n) a - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB3.F -''' - -def source_func(ext, build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir,'fib3.f') - if newer(__file__, target): - f = open(target,'w') - f.write(fib3_f) - f.close() - return [target] - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('gen_ext',parent_package,top_path) - config.add_extension('fib3', - [source_func] - ) - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/gen_ext/tests/test_fib3.py b/numpy/distutils/tests/gen_ext/tests/test_fib3.py deleted file mode 100644 index b962a12aa..000000000 --- a/numpy/distutils/tests/gen_ext/tests/test_fib3.py +++ /dev/null @@ -1,13 +0,0 @@ -import sys -from numpy.testing import * -set_package_path() -from gen_ext import fib3 -del sys.path[0] - -class TestFib3(NumpyTestCase): - - def check_fib(self): - assert_array_equal(fib3.fib(6),[0,1,1,2,3,5]) - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/distutils/tests/pyrex_ext/__init__.py b/numpy/distutils/tests/pyrex_ext/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/numpy/distutils/tests/pyrex_ext/primes.pyx b/numpy/distutils/tests/pyrex_ext/primes.pyx deleted file mode 100644 index 2ada0c5a0..000000000 --- a/numpy/distutils/tests/pyrex_ext/primes.pyx +++ /dev/null @@ -1,22 +0,0 @@ -# -# Calculate prime numbers -# - -def primes(int kmax): - cdef int n, k, i - cdef int p[1000] - result = [] - if kmax > 1000: - kmax = 1000 - k = 0 - n = 2 - while k < kmax: - i = 0 - while i < k and n % p[i] <> 0: - i = i + 1 - if i == k: - p[k] = n - k = k + 1 - result.append(n) - n = n + 1 - return result diff --git a/numpy/distutils/tests/pyrex_ext/setup.py b/numpy/distutils/tests/pyrex_ext/setup.py deleted file mode 100644 index 5b348b916..000000000 --- a/numpy/distutils/tests/pyrex_ext/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('pyrex_ext',parent_package,top_path) - config.add_extension('primes', - ['primes.pyx']) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/pyrex_ext/tests/test_primes.py b/numpy/distutils/tests/pyrex_ext/tests/test_primes.py deleted file mode 100644 index 1ca5ed8e7..000000000 --- a/numpy/distutils/tests/pyrex_ext/tests/test_primes.py +++ /dev/null @@ -1,13 +0,0 @@ -import sys -from numpy.testing import * - -set_package_path() -from pyrex_ext.primes import primes -restore_path() - -class TestPrimes(NumpyTestCase): - def check_simple(self, level=1): - l = primes(10) - assert_equal(l, [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]) -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/distutils/tests/setup.py b/numpy/distutils/tests/setup.py deleted file mode 100644 index 89d73800e..000000000 --- a/numpy/distutils/tests/setup.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('testnumpydistutils',parent_package,top_path) - config.add_subpackage('pyrex_ext') - config.add_subpackage('f2py_ext') - #config.add_subpackage('f2py_f90_ext') - config.add_subpackage('swig_ext') - config.add_subpackage('gen_ext') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/swig_ext/__init__.py b/numpy/distutils/tests/swig_ext/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/numpy/distutils/tests/swig_ext/setup.py b/numpy/distutils/tests/swig_ext/setup.py deleted file mode 100644 index 7f0dbe627..000000000 --- a/numpy/distutils/tests/swig_ext/setup.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('swig_ext',parent_package,top_path) - config.add_extension('_example', - ['src/example.i','src/example.c'] - ) - config.add_extension('_example2', - ['src/zoo.i','src/zoo.cc'], - depends=['src/zoo.h'], - include_dirs=['src'] - ) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/swig_ext/src/example.c b/numpy/distutils/tests/swig_ext/src/example.c deleted file mode 100644 index be151725c..000000000 --- a/numpy/distutils/tests/swig_ext/src/example.c +++ /dev/null @@ -1,14 +0,0 @@ -/* File : example.c */ - -double My_variable = 3.0; - -/* Compute factorial of n */ -int fact(int n) { - if (n <= 1) return 1; - else return n*fact(n-1); -} - -/* Compute n mod m */ -int my_mod(int n, int m) { - return(n % m); -} diff --git a/numpy/distutils/tests/swig_ext/src/example.i b/numpy/distutils/tests/swig_ext/src/example.i deleted file mode 100644 index f4fc11e66..000000000 --- a/numpy/distutils/tests/swig_ext/src/example.i +++ /dev/null @@ -1,14 +0,0 @@ -/* -*- c -*- */ - -/* File : example.i */ -%module example -%{ -/* Put headers and other declarations here */ -extern double My_variable; -extern int fact(int); -extern int my_mod(int n, int m); -%} - -extern double My_variable; -extern int fact(int); -extern int my_mod(int n, int m); diff --git a/numpy/distutils/tests/swig_ext/src/zoo.cc b/numpy/distutils/tests/swig_ext/src/zoo.cc deleted file mode 100644 index 0a643d1e5..000000000 --- a/numpy/distutils/tests/swig_ext/src/zoo.cc +++ /dev/null @@ -1,23 +0,0 @@ -#include "zoo.h" -#include -#include - -Zoo::Zoo() -{ - n = 0; -} - -void Zoo::shut_up(char *animal) -{ - if (n < 10) { - strcpy(animals[n], animal); - n++; - } -} - -void Zoo::display() -{ - int i; - for(i = 0; i < n; i++) - printf("%s\n", animals[i]); -} diff --git a/numpy/distutils/tests/swig_ext/src/zoo.h b/numpy/distutils/tests/swig_ext/src/zoo.h deleted file mode 100644 index cb26e6cef..000000000 --- a/numpy/distutils/tests/swig_ext/src/zoo.h +++ /dev/null @@ -1,9 +0,0 @@ - -class Zoo{ - int n; - char animals[10][50]; -public: - Zoo(); - void shut_up(char *animal); - void display(); -}; diff --git a/numpy/distutils/tests/swig_ext/src/zoo.i b/numpy/distutils/tests/swig_ext/src/zoo.i deleted file mode 100644 index a029c03e8..000000000 --- a/numpy/distutils/tests/swig_ext/src/zoo.i +++ /dev/null @@ -1,10 +0,0 @@ -// -*- c++ -*- -// Example copied from http://linuxgazette.net/issue49/pramode.html - -%module example2 - -%{ -#include "zoo.h" -%} - -%include "zoo.h" diff --git a/numpy/distutils/tests/swig_ext/tests/test_example.py b/numpy/distutils/tests/swig_ext/tests/test_example.py deleted file mode 100644 index f24162077..000000000 --- a/numpy/distutils/tests/swig_ext/tests/test_example.py +++ /dev/null @@ -1,18 +0,0 @@ -import sys -from numpy.testing import * -set_package_path() -from swig_ext import example -restore_path() - -class TestExample(NumpyTestCase): - - def check_fact(self): - assert_equal(example.fact(10),3628800) - - def check_cvar(self): - assert_equal(example.cvar.My_variable,3.0) - example.cvar.My_variable = 5 - assert_equal(example.cvar.My_variable,5.0) - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/distutils/tests/swig_ext/tests/test_example2.py b/numpy/distutils/tests/swig_ext/tests/test_example2.py deleted file mode 100644 index 3150e1a16..000000000 --- a/numpy/distutils/tests/swig_ext/tests/test_example2.py +++ /dev/null @@ -1,17 +0,0 @@ -import sys -from numpy.testing import * -set_package_path() -from swig_ext import example2 -restore_path() - -class TestExample2(NumpyTestCase): - - def check_zoo(self): - z = example2.Zoo() - z.shut_up('Tiger') - z.shut_up('Lion') - z.display() - - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py deleted file mode 100644 index 002d360b9..000000000 --- a/numpy/distutils/tests/test_fcompiler_gnu.py +++ /dev/null @@ -1,52 +0,0 @@ -from numpy.testing import * - -set_package_path() -import numpy.distutils.fcompiler -restore_path() - -g77_version_strings = [ - ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), - ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), - ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), - ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), - ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' - ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), -] - -gfortran_version_strings = [ - ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', - '4.0.3'), - ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), - ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), - ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), -] - -class TestG77Versions(NumpyTestCase): - def test_g77_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, version in g77_version_strings: - v = fc.version_match(vs) - assert v == version, (vs, v) - - def test_not_g77(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, _ in gfortran_version_strings: - v = fc.version_match(vs) - assert v is None, (vs, v) - -class TestGortranVersions(NumpyTestCase): - def test_gfortran_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, version in gfortran_version_strings: - v = fc.version_match(vs) - assert v == version, (vs, v) - - def test_not_gfortran(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, _ in g77_version_strings: - v = fc.version_match(vs) - assert v is None, (vs, v) - - -if __name__ == '__main__': - NumpyTest.run() diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py deleted file mode 100644 index 4d2404092..000000000 --- a/numpy/distutils/tests/test_misc_util.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python - -import os -import sys -from numpy.testing import * -from numpy.distutils.misc_util import appendpath, minrelpath, gpaths, rel_path -from os.path import join, sep - -ajoin = lambda *paths: join(*((sep,)+paths)) - -class TestAppendpath(NumpyTestCase): - - def check_1(self): - assert_equal(appendpath('prefix','name'),join('prefix','name')) - assert_equal(appendpath('/prefix','name'),ajoin('prefix','name')) - assert_equal(appendpath('/prefix','/name'),ajoin('prefix','name')) - assert_equal(appendpath('prefix','/name'),join('prefix','name')) - - def check_2(self): - assert_equal(appendpath('prefix/sub','name'), - join('prefix','sub','name')) - assert_equal(appendpath('prefix/sub','sup/name'), - join('prefix','sub','sup','name')) - assert_equal(appendpath('/prefix/sub','/prefix/name'), - ajoin('prefix','sub','name')) - - def check_3(self): - assert_equal(appendpath('/prefix/sub','/prefix/sup/name'), - ajoin('prefix','sub','sup','name')) - assert_equal(appendpath('/prefix/sub/sub2','/prefix/sup/sup2/name'), - ajoin('prefix','sub','sub2','sup','sup2','name')) - assert_equal(appendpath('/prefix/sub/sub2','/prefix/sub/sup/name'), - ajoin('prefix','sub','sub2','sup','name')) - -class TestMinrelpath(NumpyTestCase): - - def check_1(self): - import os - n = lambda path: path.replace('/',os.path.sep) - assert_equal(minrelpath(n('aa/bb')),n('aa/bb')) - assert_equal(minrelpath('..'),'..') - assert_equal(minrelpath(n('aa/..')),'') - assert_equal(minrelpath(n('aa/../bb')),'bb') - assert_equal(minrelpath(n('aa/bb/..')),'aa') - assert_equal(minrelpath(n('aa/bb/../..')),'') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')),n('aa/dd')) - assert_equal(minrelpath(n('.././..')),n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')),n('dd')) - -class TestGpaths(NumpyTestCase): - - def check_gpaths(self): - local_path = minrelpath(os.path.join(os.path.dirname(__file__),'..')) - ls = gpaths('command/*.py', local_path) - assert os.path.join(local_path,'command','build_src.py') in ls,`ls` - f = gpaths('system_info.py', local_path) - assert os.path.join(local_path,'system_info.py')==f[0],`f` - -if __name__ == "__main__": - NumpyTest().run() diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py deleted file mode 100644 index 416bffc82..000000000 --- a/numpy/distutils/unixccompiler.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -unixccompiler - can handle very long argument lists for ar. -""" - -import os - -from distutils.errors import DistutilsExecError, CompileError -from distutils.unixccompiler import * -from numpy.distutils.ccompiler import replace_method - -import log - -# Note that UnixCCompiler._compile appeared in Python 2.3 -def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - display = '%s: %s' % (os.path.basename(self.compiler_so[0]),src) - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + - extra_postargs, display = display) - except DistutilsExecError, msg: - raise CompileError, msg - -replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) - - -def UnixCCompiler_create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - try: - # previous .a may be screwed up; best to remove it first - # and recreate. - # Also, ar on OS X doesn't handle updating universal archives - os.unlink(output_filename) - except (IOError, OSError): - pass - self.mkpath(os.path.dirname(output_filename)) - tmp_objects = objects + self.objects - while tmp_objects: - objects = tmp_objects[:50] - tmp_objects = tmp_objects[50:] - display = '%s: adding %d object files to %s' % ( - os.path.basename(self.archiver[0]), - len(objects), output_filename) - self.spawn(self.archiver + [output_filename] + objects, - display = display) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), - output_filename) - try: - self.spawn(self.ranlib + [output_filename], - display = display) - except DistutilsExecError, msg: - raise LibError, msg - else: - log.debug("skipping %s (up-to-date)", output_filename) - return - -replace_method(UnixCCompiler, 'create_static_lib', - UnixCCompiler_create_static_lib) diff --git a/numpy/doc/CAPI.txt b/numpy/doc/CAPI.txt deleted file mode 100644 index 28738635e..000000000 --- a/numpy/doc/CAPI.txt +++ /dev/null @@ -1,313 +0,0 @@ -=============== -C-API for NumPy -=============== - -:Author: Travis Oliphant -:Discussions to: `numpy-discussion@scipy.org`__ -:Created: October 2005 - -__ http://www.scipy.org/Mailing_Lists - -The C API of NumPy is (mostly) backward compatible with Numeric. - -There are a few non-standard Numeric usages (that were not really part -of the API) that will need to be changed: - -* If you used any of the function pointers in the ``PyArray_Descr`` - structure you will have to modify your usage of those. First, - the pointers are all under the member named ``f``. So ``descr->cast`` - is now ``descr->f->cast``. In addition, the - casting functions have eliminated the strides argument (use - ``PyArray_CastTo`` if you need strided casting). All functions have - one or two ``PyArrayObject *`` arguments at the end. This allows the - flexible arrays and mis-behaved arrays to be handled. - -* The ``descr->zero`` and ``descr->one`` constants have been replaced with - function calls, ``PyArray_Zero``, and ``PyArray_One`` (be sure to read the - code and free the resulting memory if you use these calls). - -* If you passed ``array->dimensions`` and ``array->strides`` around - to functions, you will need to fix some code. These are now - ``npy_intp*`` pointers. On 32-bit systems there won't be a problem. - However, on 64-bit systems, you will need to make changes to avoid - errors and segfaults. - - -The header files ``arrayobject.h`` and ``ufuncobject.h`` contain many defines -that you may find useful. The files ``__ufunc_api.h`` and -``__multiarray_api.h`` contain the available C-API function calls with -their function signatures. - -All of these headers are installed to -``/site-packages/numpy/core/include`` - - -Getting arrays in C-code -========================= - -All new arrays can be created using ``PyArray_NewFromDescr``. A simple interface -equivalent to ``PyArray_FromDims`` is ``PyArray_SimpleNew(nd, dims, typenum)`` -and to ``PyArray_FromDimsAndData`` is -``PyArray_SimpleNewFromData(nd, dims, typenum, data)``. - -This is a very flexible function. - -:: - - PyObject * PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, - int nd, npy_intp *dims, - npy_intp *strides, char *data, - int flags, PyObject *obj); - -``subtype`` : ``PyTypeObject *`` - The subtype that should be created (either pass in - ``&PyArray_Type``, ``&PyBigArray_Type``, or ``obj->ob_type``, - where ``obj`` is a an instance of a subtype (or subclass) of - ``PyArray_Type`` or ``PyBigArray_Type``). - -``descr`` : ``PyArray_Descr *`` - The type descriptor for the array. This is a Python object (this - function steals a reference to it). The easiest way to get one is - using ``PyArray_DescrFromType()``. If you want to use a - flexible size array, then you need to use - ``PyArray_DescrNewFromType()`` and set its ``elsize`` - paramter to the desired size. The typenum in both of these cases - is one of the ``PyArray_XXXX`` enumerated types. - -``nd`` : ``int`` - The number of dimensions (<``MAX_DIMS``) - -``*dims`` : ``npy_intp *`` - A pointer to the size in each dimension. Information will be - copied from here. - -``*strides`` : ``npy_intp *`` - The strides this array should have. For new arrays created by this - routine, this should be ``NULL``. If you pass in memory for this array - to use, then you can pass in the strides information as well - (otherwise it will be created for you and default to C-contiguous - or Fortran contiguous). Any strides will be copied into the array - structure. Do not pass in bad strides information!!!! - - ``PyArray_CheckStrides(...)`` can help but you must call it if you are - unsure. You cannot pass in strides information when data is ``NULL`` - and this routine is creating its own memory. - -``*data`` : ``char *`` - ``NULL`` for creating brand-new memory. If you want this array to wrap - another memory area, then pass the pointer here. You are - responsible for deleting the memory in that case, but do not do so - until the new array object has been deleted. The best way to - handle that is to get the memory from another Python object, - ``INCREF`` that Python object after passing it's data pointer to this - routine, and set the ``->base`` member of the returned array to the - Python object. *You are responsible for* setting ``PyArray_BASE(ret)`` - to the base object. Failure to do so will create a memory leak. - - If you pass in a data buffer, the ``flags`` argument will be the flags - of the new array. If you create a new array, a non-zero flags - argument indicates that you want the array to be in Fortran order. - -``flags`` : ``int`` - Either the flags showing how to interpret the data buffer passed - in, or if a new array is created, nonzero to indicate a Fortran - order array. See below for an explanation of the flags. - -``obj`` : ``PyObject *`` - If subtypes is ``&PyArray_Type`` or ``&PyBigArray_Type``, this argument is - ignored. Otherwise, the ``__array_finalize__`` method of the subtype - is called (if present) and passed this object. This is usually an - array of the type to be created (so the ``__array_finalize__`` method - must handle an array argument. But, it can be anything...) - -Note: The returned array object will be unitialized unless the type is -``PyArray_OBJECT`` in which case the memory will be set to ``NULL``. - -``PyArray_SimpleNew(nd, dims, typenum)`` is a drop-in replacement for -``PyArray_FromDims`` (except it takes ``npy_intp*`` dims instead of ``int*`` dims -which matters on 64-bit systems) and it does not initialize the memory -to zero. - -``PyArray_SimpleNew`` is just a macro for ``PyArray_New`` with default arguments. -Use ``PyArray_FILLWBYTE(arr, 0)`` to fill with zeros. - -The ``PyArray_FromDims`` and family of functions are still available and -are loose wrappers around this function. These functions still take -``int *`` arguments. This should be fine on 32-bit systems, but on 64-bit -systems you may run into trouble if you frequently passed -``PyArray_FromDims`` the dimensions member of the old ``PyArrayObject`` structure -because ``sizeof(npy_intp) != sizeof(int)``. - - -Getting an arrayobject from an arbitrary Python object -====================================================== - -``PyArray_FromAny(...)`` - -This function replaces ``PyArray_ContiguousFromObject`` and friends (those -function calls still remain but they are loose wrappers around the -``PyArray_FromAny`` call). - -:: - - static PyObject * - PyArray_FromAny(PyObject *op, PyArray_Descr *dtype, int min_depth, - int max_depth, int requires, PyObject *context) - - -``op`` : ``PyObject *`` - The Python object to "convert" to an array object - -``dtype`` : ``PyArray_Descr *`` - The desired data-type descriptor. This can be ``NULL``, if the - descriptor should be determined by the object. Unless ``FORCECAST`` is - present in ``flags``, this call will generate an error if the data - type cannot be safely obtained from the object. - -``min_depth`` : ``int`` - The minimum depth of array needed or 0 if doesn't matter - -``max_depth`` : ``int`` - The maximum depth of array allowed or 0 if doesn't matter - -``requires`` : ``int`` - A flag indicating the "requirements" of the returned array. These - are the usual ndarray flags (see `NDArray flags`_ below). In - addition, there are three flags used only for the ``FromAny`` - family of functions: - - - ``ENSURECOPY``: always copy the array. Returned arrays always - have ``CONTIGUOUS``, ``ALIGNED``, and ``WRITEABLE`` set. - - ``ENSUREARRAY``: ensure the returned array is an ndarray (or a - bigndarray if ``op`` is one). - - ``FORCECAST``: cause a cast to occur regardless of whether or - not it is safe. - -``context`` : ``PyObject *`` - If the Python object ``op`` is not an numpy array, but has an - ``__array__`` method, context is passed as the second argument to - that method (the first is the typecode). Almost always this - parameter is ``NULL``. - - -``PyArray_ContiguousFromAny(op, typenum, min_depth, max_depth)`` is -equivalent to ``PyArray_ContiguousFromObject(...)`` (which is still -available), except it will return the subclass if op is already a -subclass of the ndarray. The ``ContiguousFromObject`` version will -always return an ndarray (or a bigndarray). - -Passing Data Type information to C-code -======================================= - -All datatypes are handled using the ``PyArray_Descr *`` structure. -This structure can be obtained from a Python object using -``PyArray_DescrConverter`` and ``PyArray_DescrConverter2``. The former -returns the default ``PyArray_LONG`` descriptor when the input object -is None, while the latter returns ``NULL`` when the input object is ``None``. - -See the ``arraymethods.c`` and ``multiarraymodule.c`` files for many -examples of usage. - -Getting at the structure of the array. --------------------------------------- - -You should use the ``#defines`` provided to access array structure portions: - -- ``PyArray_DATA(obj)`` : returns a ``void *`` to the array data -- ``PyArray_BYTES(obj)`` : return a ``char *`` to the array data -- ``PyArray_ITEMSIZE(obj)`` -- ``PyArray_NDIM(obj)`` -- ``PyArray_DIMS(obj)`` -- ``PyArray_DIM(obj, n)`` -- ``PyArray_STRIDES(obj)`` -- ``PyArray_STRIDE(obj,n)`` -- ``PyArray_DESCR(obj)`` -- ``PyArray_BASE(obj)`` - -see more in ``arrayobject.h`` - - -NDArray Flags -============= - -The ``flags`` attribute of the ``PyArrayObject`` structure contains important -information about the memory used by the array (pointed to by the data member) -This flags information must be kept accurate or strange results and even -segfaults may result. - -There are 6 (binary) flags that describe the memory area used by the -data buffer. These constants are defined in ``arrayobject.h`` and -determine the bit-position of the flag. Python exposes a nice attribute- -based interface as well as a dictionary-like interface for getting -(and, if appropriate, setting) these flags. - -Memory areas of all kinds can be pointed to by an ndarray, necessitating -these flags. If you get an arbitrary ``PyArrayObject`` in C-code, -you need to be aware of the flags that are set. -If you need to guarantee a certain kind of array -(like ``NPY_CONTIGUOUS`` and ``NPY_BEHAVED``), then pass these requirements into the -PyArray_FromAny function. - - -``NPY_CONTIGUOUS`` - True if the array is (C-style) contiguous in memory. -``NPY_FORTRAN`` - True if the array is (Fortran-style) contiguous in memory. - -Notice that contiguous 1-d arrays are always both ``NPY_FORTRAN`` contiguous -and C contiguous. Both of these flags can be checked and are convenience -flags only as whether or not an array is ``NPY_CONTIGUOUS`` or ``NPY_FORTRAN`` -can be determined by the ``strides``, ``dimensions``, and ``itemsize`` -attributes. - -``NPY_OWNDATA`` - True if the array owns the memory (it will try and free it using - ``PyDataMem_FREE()`` on deallocation --- so it better really own it). - -These three flags facilitate using a data pointer that is a memory-mapped -array, or part of some larger record array. But, they may have other uses... - -``NPY_ALIGNED`` - True if the data buffer is aligned for the type and the strides - are multiples of the alignment factor as well. This can be - checked. - -``NPY_WRITEABLE`` - True only if the data buffer can be "written" to. - -``NPY_UPDATEIFCOPY`` - This is a special flag that is set if this array represents a copy - made because a user required certain flags in ``PyArray_FromAny`` and - a copy had to be made of some other array (and the user asked for - this flag to be set in such a situation). The base attribute then - points to the "misbehaved" array (which is set read_only). When - the array with this flag set is deallocated, it will copy its - contents back to the "misbehaved" array (casting if necessary) and - will reset the "misbehaved" array to ``WRITEABLE``. If the - "misbehaved" array was not ``WRITEABLE`` to begin with then - ``PyArray_FromAny`` would have returned an error because ``UPDATEIFCOPY`` - would not have been possible. - - -``PyArray_UpdateFlags(obj, flags)`` will update the ``obj->flags`` for -``flags`` which can be any of ``NPY_CONTIGUOUS``, ``NPY_FORTRAN``, ``NPY_ALIGNED``, or -``NPY_WRITEABLE``. - -Some useful combinations of these flags: - -- ``NPY_BEHAVED = NPY_ALIGNED | NPY_WRITEABLE`` -- ``NPY_CARRAY = NPY_DEFAULT = NPY_CONTIGUOUS | NPY_BEHAVED`` -- ``NPY_CARRAY_RO = NPY_CONTIGUOUS | NPY_ALIGNED`` -- ``NPY_FARRAY = NPY_FORTRAN | NPY_BEHAVED`` -- ``NPY_FARRAY_RO = NPY_FORTRAN | NPY_ALIGNED`` - -The macro ``PyArray_CHECKFLAGS(obj, flags)`` can test any combination of flags. -There are several default combinations defined as macros already -(see ``arrayobject.h``) - -In particular, there are ``ISBEHAVED``, ``ISBEHAVED_RO``, ``ISCARRAY`` -and ``ISFARRAY`` macros that also check to make sure the array is in -native byte order (as determined) by the data-type descriptor. - -There are more C-API enhancements which you can discover in the code, -or buy the book (http://www.trelgol.com) diff --git a/numpy/doc/DISTUTILS.txt b/numpy/doc/DISTUTILS.txt deleted file mode 100644 index e0e8e662a..000000000 --- a/numpy/doc/DISTUTILS.txt +++ /dev/null @@ -1,569 +0,0 @@ -.. -*- rest -*- - -NumPy Distutils - Users Guide -============================= - -:Author: Pearu Peterson -:Discussions to: scipy-dev@scipy.org -:Created: October 2005 -:Revision: $LastChangedRevision$ -:SVN source: $HeadURL$ - -.. contents:: - -SciPy structure -''''''''''''''' - -Currently SciPy project consists of two packages: - -- NumPy (previously called SciPy core) --- it provides packages like: - - + numpy.distutils - extension to Python distutils - + numpy.f2py - a tool to bind Fortran/C codes to Python - + numpy.core - future replacement of Numeric and numarray packages - + numpy.lib - extra utility functions - + numpy.testing - numpy-style tools for unit testing - + etc - -- SciPy --- a collection of scientific tools for Python. - -The aim of this document is to describe how to add new tools to SciPy. - - -Requirements for SciPy packages -''''''''''''''''''''''''''''''' - -SciPy consists of Python packages, called SciPy packages, that are -available to Python users via the ``scipy`` namespace. Each SciPy package -may contain other SciPy packages. And so on. Therefore, the SciPy -directory tree is a tree of packages with arbitrary depth and width. -Any SciPy package may depend on NumPy packages but the dependence on other -SciPy packages should be kept minimal or zero. - -A SciPy package contains, in addition to its sources, the following -files and directories: - - + ``setup.py`` --- building script - + ``info.py`` --- contains documentation and import flags - + ``__init__.py`` --- package initializer - + ``tests/`` --- directory of unittests - -Their contents are described below. - -The ``setup.py`` file -''''''''''''''''''''' - -In order to add a Python package to SciPy, its build script (``setup.py``) -must meet certain requirements. The most important requirement is that the -package define a ``configuration(parent_package='',top_path=None)`` function -which returns a dictionary suitable for passing to -``numpy.distutils.core.setup(..)``. To simplify the construction of -this dictionary, ``numpy.distutils.misc_util`` provides the -``Configuration`` class, described below. - -SciPy pure Python package example ---------------------------------- - -Below is an example of a minimal ``setup.py`` file for a pure Scipy package:: - - #!/usr/bin/env python - def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('mypackage',parent_package,top_path) - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - #setup(**configuration(top_path='').todict()) - setup(configuration=configuration) - -The arguments of the ``configuration`` function specifiy the name of -parent SciPy package (``parent_package``) and the directory location -of the main ``setup.py`` script (``top_path``). These arguments, -along with the name of the current package, should be passed to the -``Configuration`` constructor. - -The ``Configuration`` constructor has a fourth optional argument, -``package_path``, that can be used when package files are located in -a different location than the directory of the ``setup.py`` file. - -Remaining ``Configuration`` arguments are all keyword arguments that will -be used to initialize attributes of ``Configuration`` -instance. Usually, these keywords are the same as the ones that -``setup(..)`` function would expect, for example, ``packages``, -``ext_modules``, ``data_files``, ``include_dirs``, ``libraries``, -``headers``, ``scripts``, ``package_dir``, etc. However, the direct -specification of these keywords is not recommended as the content of -these keyword arguments will not be processed or checked for the -consistency of SciPy building system. - -Finally, ``Configuration`` has ``.todict()`` method that returns all -the configuration data as a dictionary suitable for passing on to the -``setup(..)`` function. - -``Configuration`` instance attributes -------------------------------------- - -In addition to attributes that can be specified via keyword arguments -to ``Configuration`` constructor, ``Configuration`` instance (let us -denote as ``config``) has the following attributes that can be useful -in writing setup scripts: - -+ ``config.name`` - full name of the current package. The names of parent - packages can be extracted as ``config.name.split('.')``. - -+ ``config.local_path`` - path to the location of current ``setup.py`` file. - -+ ``config.top_path`` - path to the location of main ``setup.py`` file. - -``Configuration`` instance methods ----------------------------------- - -+ ``config.todict()`` --- returns configuration dictionary suitable for - passing to ``numpy.distutils.core.setup(..)`` function. - -+ ``config.paths(*paths) --- applies ``glob.glob(..)`` to items of - ``paths`` if necessary. Fixes ``paths`` item that is relative to - ``config.local_path``. - -+ ``config.get_subpackage(subpackage_name,subpackage_path=None)`` --- - returns a list of subpackage configurations. Subpackage is looked in the - current directory under the name ``subpackage_name`` but the path - can be specified also via optional ``subpackage_path`` argument. - If ``subpackage_name`` is specified as ``None`` then the subpackage - name will be taken the basename of ``subpackage_path``. - Any ``*`` used for subpackage names are expanded as wildcards. - -+ ``config.add_subpackage(subpackage_name,subpackage_path=None)`` --- - add SciPy subpackage configuration to the current one. The meaning - and usage of arguments is explained above, see - ``config.get_subpackage()`` method. - -+ ``config.add_data_files(*files)`` --- prepend ``files`` to ``data_files`` - list. If ``files`` item is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data - files. By default data files are copied under package installation - directory. For example, - - :: - - config.add_data_files('foo.dat', - ('fun',['gun.dat','nun/pun.dat','/tmp/sun.dat']), - 'bar/car.dat'. - '/full/path/to/can.dat', - ) - - will install data files to the following locations - - :: - - / - foo.dat - fun/ - gun.dat - pun.dat - sun.dat - bar/ - car.dat - can.dat - - Path to data files can be a function taking no arguments and - returning path(s) to data files -- this is a useful when data files - are generated while building the package. (XXX: explain the step - when this function are called exactly) - -+ ``config.add_data_dir(data_path)`` --- add directory ``data_path`` - recursively to ``data_files``. The whole directory tree starting at - ``data_path`` will be copied under package installation directory. - If ``data_path`` is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data directory. - By default, data directory are copied under package installation - directory under the basename of ``data_path``. For example, - - :: - - config.add_data_dir('fun') # fun/ contains foo.dat bar/car.dat - config.add_data_dir(('sun','fun')) - config.add_data_dir(('gun','/full/path/to/fun')) - - will install data files to the following locations - - :: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - bar/ - car.dat - -+ ``config.add_include_dirs(*paths)`` --- prepend ``paths`` to - ``include_dirs`` list. This list will be visible to all extension - modules of the current package. - -+ ``config.add_headers(*files)`` --- prepend ``files`` to ``headers`` - list. By default, headers will be installed under - ``/include/pythonX.X//`` - directory. If ``files`` item is a tuple then it's first argument - specifies the installation suffix relative to - ``/include/pythonX.X/`` path. This is a Python distutils - method; its use is discouraged for NumPy and SciPy in favour of - ``config.add_data_files(*files)``. - -+ ``config.add_scripts(*files)`` --- prepend ``files`` to ``scripts`` - list. Scripts will be installed under ``/bin/`` directory. - -+ ``config.add_extension(name,sources,*kw)`` --- create and add an - ``Extension`` instance to ``ext_modules`` list. The first argument - ``name`` defines the name of the extension module that will be - installed under ``config.name`` package. The second argument is - a list of sources. ``add_extension`` method takes also keyword - arguments that are passed on to the ``Extension`` constructor. - The list of allowed keywords is the following: ``include_dirs``, - ``define_macros``, ``undef_macros``, ``library_dirs``, ``libraries``, - ``runtime_library_dirs``, ``extra_objects``, ``extra_compile_args``, - ``extra_link_args``, ``export_symbols``, ``swig_opts``, ``depends``, - ``language``, ``f2py_options``, ``module_dirs``, ``extra_info``. - - Note that ``config.paths`` method is applied to all lists that - may contain paths. ``extra_info`` is a dictionary or a list - of dictionaries that content will be appended to keyword arguments. - The list ``depends`` contains paths to files or directories - that the sources of the extension module depend on. If any path - in the ``depends`` list is newer than the extension module, then - the module will be rebuilt. - - The list of sources may contain functions ('source generators') - with a pattern ``def (ext, build_dir): return - ``. If ``funcname`` returns ``None``, no sources - are generated. And if the ``Extension`` instance has no sources - after processing all source generators, no extension module will - be built. This is the recommended way to conditionally define - extension modules. Source generator functions are called by the - ``build_src`` command of ``numpy.distutils``. - - For example, here is a typical source generator function:: - - def generate_source(ext,build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir,'somesource.c') - if newer(target,__file__): - # create target file - return target - - The first argument contains the Extension instance that can be - useful to access its attributes like ``depends``, ``sources``, - etc. lists and modify them during the building process. - The second argument gives a path to a build directory that must - be used when creating files to a disk. - -+ ``config.add_library(name, sources, **build_info)`` --- add - a library to ``libraries`` list. Allowed keywords arguments - are ``depends``, ``macros``, ``include_dirs``, - ``extra_compiler_args``, ``f2py_options``. See ``.add_extension()`` - method for more information on arguments. - -+ ``config.have_f77c()`` --- return True if Fortran 77 compiler is - available (read: a simple Fortran 77 code compiled succesfully). - -+ ``config.have_f90c()`` --- return True if Fortran 90 compiler is - available (read: a simple Fortran 90 code compiled succesfully). - -+ ``config.get_version()`` --- return version string of the current package, - ``None`` if version information could not be detected. This methods - scans files ``__version__.py``, ``_version.py``, - ``version.py``, ``__svn_version__.py`` for string variables - ``version``, ``__version__``, ``_version``. - -+ ``config.make_svn_version_py()`` --- appends a data function to - ``data_files`` list that will generate ``__svn_version__.py`` file - to the current package directory. The file will be removed from - the source directory when Python exits. - -+ ``config.get_build_temp_dir()`` --- return a path to a temporary - directory. This is the place where one should build temporary - files. - -+ ``config.get_distribution()`` --- return distutils ``Distribution`` - instance. - -+ ``config.get_config_cmd()`` --- returns ``numpy.distutils`` config - command instance. - -+ ``config.get_info(*names)`` --- - -Template files --------------- - -XXX: Describe how files with extensions ``.f.src``, ``.pyf.src``, -``.c.src``, etc. are pre-processed by the ``build_src`` command. - -Useful functions in ``numpy.distutils.misc_util`` -------------------------------------------------- - -+ ``get_numpy_include_dirs()`` --- return a list of NumPy base - include directories. NumPy base include directories contain - header files such as ``numpy/arrayobject.h``, ``numpy/funcobject.h`` - etc. For installed NumPy the returned list has length 1 - but when building NumPy the list may contain more directories, - for example, a path to ``config.h`` file that - ``numpy/base/setup.py`` file generates and is used by ``numpy`` - header files. - -+ ``append_path(prefix,path)`` --- smart append ``path`` to ``prefix``. - -+ ``gpaths(paths, local_path='')`` --- apply glob to paths and prepend - ``local_path`` if needed. - -+ ``njoin(*path)`` --- join pathname components + convert ``/``-separated path - to ``os.sep``-separated path and resolve ``..``, ``.`` from paths. - Ex. ``njoin('a',['b','./c'],'..','g') -> os.path.join('a','b','g')``. - -+ ``minrelpath(path)`` --- resolves dots in ``path``. - -+ ``rel_path(path, parent_path)`` --- return ``path`` relative to ``parent_path``. - -+ ``def get_cmd(cmdname,_cache={})`` --- returns ``numpy.distutils`` - command instance. - -+ ``all_strings(lst)`` - -+ ``has_f_sources(sources)`` - -+ ``has_cxx_sources(sources)`` - -+ ``filter_sources(sources)`` --- return ``c_sources, cxx_sources, - f_sources, fmodule_sources`` - -+ ``get_dependencies(sources)`` - -+ ``is_local_src_dir(directory)`` - -+ ``get_ext_source_files(ext)`` - -+ ``get_script_files(scripts)`` - -+ ``get_lib_source_files(lib)`` - -+ ``get_data_files(data)`` - -+ ``dot_join(*args)`` --- join non-zero arguments with a dot. - -+ ``get_frame(level=0)`` --- return frame object from call stack with given level. - -+ ``cyg2win32(path)`` - -+ ``mingw32()`` --- return ``True`` when using mingw32 environment. - -+ ``terminal_has_colors()``, ``red_text(s)``, ``green_text(s)``, - ``yellow_text(s)``, ``blue_text(s)``, ``cyan_text(s)`` - -+ ``get_path(mod_name,parent_path=None)`` --- return path of a module - relative to parent_path when given. Handles also ``__main__`` and - ``__builtin__`` modules. - -+ ``allpath(name)`` --- replaces ``/`` with ``os.sep`` in ``name``. - -+ ``cxx_ext_match``, ``fortran_ext_match``, ``f90_ext_match``, - ``f90_module_name_match`` - -``numpy.distutils.system_info`` module --------------------------------------- - -+ ``get_info(name,notfound_action=0)`` -+ ``combine_paths(*args,**kws)`` -+ ``show_all()`` - -``numpy.distutils.cpuinfo`` module ----------------------------------- - -+ ``cpuinfo`` - -``numpy.distutils.log`` module ------------------------------- - -+ ``set_verbosity(v)`` - - -``numpy.distutils.exec_command`` module ---------------------------------------- - -+ ``get_pythonexe()`` -+ ``splitcmdline(line)`` -+ ``find_executable(exe, path=None)`` -+ ``exec_command( command, execute_in='', use_shell=None, use_tee=None, **env )`` - -The ``info.py`` file -'''''''''''''''''''' - -Scipy package import hooks assume that each package contains a -``info.py`` file. This file contains overall documentation about the package -and variables defining the order of package imports, dependency -relations between packages, etc. - -On import, the following information will be looked for in ``info.py``: - -__doc__ - The documentation string of the package. - -__doc_title__ - The title of the package. If not defined then the first non-empty - line of ``__doc__`` will be used. - -__all__ - List of symbols that package exports. Optional. - -global_symbols - List of names that should be imported to numpy name space. To import - all symbols to ``numpy`` namespace, define ``global_symbols=['*']``. - -depends - List of names that the package depends on. Prefix ``numpy.`` - will be automatically added to package names. For example, - use ``testing`` to indicate dependence on ``numpy.testing`` - package. Default value is ``[]``. - -postpone_import - Boolean variable indicating that importing the package should be - postponed until the first attempt of its usage. Default value is ``False``. - Depreciated. - -The ``__init__.py`` file -'''''''''''''''''''''''' - -To speed up the import time and minimize memory usage, numpy -uses ``ppimport`` hooks to transparently postpone importing large modules, -which might not be used during the Scipy session. In order to -have access to the documentation of all Scipy packages, including -postponed packages, the docstring from ``info.py`` is imported -into ``__init__.py``. - -The header of a typical ``__init__.py`` is:: - - # - # Package ... - ... - # - - from info import __doc__ - ... - - from numpy.testing import NumpyTest - test = NumpyTest().test - -The ``tests/`` directory -'''''''''''''''''''''''' - -Ideally, every Python code, extension module, or subpackage in Scipy -package directory should have the corresponding ``test_.py`` -file in ``tests/`` directory. This file should define classes -derived from ``NumpyTestCase`` (or from ``unittest.TestCase``) class -and have names starting with ``test``. The methods of these classes -which names start with ``bench``, ``check``, or ``test``, are passed -on to unittest machinery. In addition, the value of the first optional -argument of these methods determine the level of the corresponding -test. Default level is 1. - -A minimal example of a ``test_yyy.py`` file that implements tests for -a Scipy package module ``numpy.xxx.yyy`` containing a function -``zzz()``, is shown below:: - - import sys - from numpy.testing import * - - set_package_path() - # import xxx symbols - from xxx.yyy import zzz - restore_path() - - #Optional: - set_local_path() - # import modules that are located in the same directory as this file. - restore_path() - - class test_zzz(NumpyTestCase): - def check_simple(self, level=1): - assert zzz()=='Hello from zzz' - #... - - if __name__ == "__main__": - NumpyTest().run() - -``NumpyTestCase`` is derived from ``unittest.TestCase`` and it -basically only implements an additional method ``measure(self, -code_str, times=1)``. - -Note that all classes that are inherited from ``TestCase`` class, are -picked up by the test runner when using ``testoob``. - -``numpy.testing`` module provides also the following convenience -functions:: - - assert_equal(actual,desired,err_msg='',verbose=1) - assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=1) - assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=1) - assert_array_equal(x,y,err_msg='') - assert_array_almost_equal(x,y,decimal=6,err_msg='') - rand(*shape) # returns random array with a given shape - -``NumpyTest`` can be used for running ``tests/test_*.py`` scripts. -For instance, to run all test scripts of the module ``xxx``, execute -in Python: - - >>> NumpyTest('xxx').test(level=1,verbosity=1) - -or equivalently, - - >>> import xxx - >>> NumpyTest(xxx).test(level=1,verbosity=1) - -To run only tests for ``xxx.yyy`` module, execute: - - >>> NumpyTest('xxx.yyy').test(level=1,verbosity=1) - -To take the level and verbosity parameters for tests from -``sys.argv``, use ``NumpyTest.run()`` method (this is supported only -when ``optparse`` is installed). - -Extra features in NumPy Distutils -''''''''''''''''''''''''''''''''' - -Specifing config_fc options for libraries in setup.py script ------------------------------------------------------------- - -It is possible to specify config_fc options in setup.py scripts. -For example, using - - config.add_library('library', - sources=[...], - config_fc={'noopt':(__file__,1)}) - -will compile the ``library`` sources without optimization flags. - -It's recommended to specify only those config_fc options in such a way -that are compiler independent. - -Getting extra Fortran 77 compiler options from source ------------------------------------------------------ - -Some old Fortran codes need special compiler options in order to -work correctly. In order to specify compiler options per source -file, ``numpy.distutils`` Fortran compiler looks for the following -pattern:: - - CF77FLAGS() = - -in the first 20 lines of the source and use the ``f77flags`` for -specified type of the fcompiler (the first character ``C`` is optional). - -TODO: This feature can be easily extended for Fortran 90 codes as -well. Let us know if you would need such a feature. diff --git a/numpy/doc/HOWTO_DOCUMENT.txt b/numpy/doc/HOWTO_DOCUMENT.txt deleted file mode 100644 index 33b81ae11..000000000 --- a/numpy/doc/HOWTO_DOCUMENT.txt +++ /dev/null @@ -1,216 +0,0 @@ -==================================== -A Guide to NumPy/SciPy Documentation -==================================== - -.. Contents:: - -Overview --------- -In general, we follow the standard Python style conventions as described here: - * `Style Guide for C Code `__ - * `Style Guide for Python Code `__ - * `Docstring Conventions `__ - -Additional PEPs of interest regarding documentation of code: - * `Docstring Processing Framework `__ - * `Docutils Design Specification `__ - -Use a code checker: - * `pylint `__ - * `pep8.py `__ - -Docstring Standard ------------------- - -A documentation string (docstring) is a string that describes a module, -function, class, or method definition. The docstring is a special attribute -of the object (``object.__doc__``) and, for consistency, is surrounded by -triple double quotes. - -It is highly desireable that both NumPy and SciPy_ follow a common -convention for docstrings that provide for consistency while also -allowing epydoc_ to produce nicely-formatted reference guides. This -document describes the current community consensus for this standard. -If you have suggestions for improvements, post them on the -`numpy-discussion list`_, together with the epydoc output. - -Our docstring standard uses `reST -`__ syntax and is rendered -using epydoc_. The markup in this proposal is as basic as possible -and, in particular, avoids the use of epydoc consolidated fields. This -is both because there are a limited number of such fields, inadequate -to our current needs, and because epydoc moves the fields to the end -of the documentation, messing up the ordering. Standard definition -lists are used instead. Likewise, epydoc moves headings and have an -unwelcome size in the default style sheet, therefore they are also -avoided. - - -Status ------- - -We are currently trying to: - -1. Agree on docstring standards. - -2. Work with Ed loper to ensure that epydoc_ provides the functionality - we need. - -3. Convert existing docstrings to the new format and write them for - those that currently lack docstrings. - - -Sections --------- - -The proposed sections of the docstring are: - -1. **Short summary:** - A one-line summary not using variable names or the function name - (unless a C-function). - -2. **Extended summary:** - A few sentences giving an extended description. - -3. **Parameters:** - Description of the function arguments, keywords and their - respective types. - -4. **Returns:** - Explanation of the returned values and their types. - -5. **Other parameters:** - An optional section used to describe little used parameters so that - functions with a large number of keyword argument can still be well - documented without cluttering the main parameters' list. - -6. **See also:** - An optional section used to refer to related code. This section - can be very useful, but should be used judiciously. The goal is to - direct users to other functions they may not be aware of, or have - easy means of discovering (by looking at the module docstring, for - example). Routines whose docstrings further explain parameters - used by this function are good candidates. - -7. **Notes:** - An optional section that provides additional information about the - code, possibly including a discussion of the algorithm. This - section may include mathematical equations, possibly written in - `LaTeX `__. - -8. **Examples:** - An optional section for examples, using the `doctest - `__ format. It - can provide an inline mini-tutorial as well as additional - regression testing. While optional, this section is strongly - encouraged. You can run the tests by doing:: - - >>> import doctest - >>> doctest.testfile('example.py') - - Blank lines are used to seperate doctests. When they occur in the - expected output, they should be replaced by ```` (see - `doctest options - `_), e.g. - - :: - - >>> print "a\n\nb" - a - - b - -Common reST concepts --------------------- - -A reST-documented module should define:: - - __docformat__ = 'restructuredtext en' - -at the top level in accordance with `PEP 258 -`__. Note that the -``__docformat__`` variable in a package's ``__init__.py`` file does -not apply to objects defined in subpackages and submodules. - -For paragraphs, indentation is significant and indicates indentation in the -output. New paragraphs are marked with blank line. - -Use *italics*, **bold**, and ``courier`` if needed in any explanations (but -not for variable names and doctest code or multi-line code) - -Use ``:lm:`eqn``` for in-line math in latex format (remember to use the -raw-format for your text string or escape any '\' symbols). Use ``:m:`eqn``` -for non-latex math. - -A more extensive example of reST markup can be found here: -http://docutils.sourceforge.net/docs/user/rst/demo.txt -Line spacing and indentation are significant and should -be carefully followed. - - - -Using Epydoc_ -------------- - -Currently, we recommend that you build epydoc from the trunk:: - - svn co https://epydoc.svn.sourceforge.net/svnroot/epydoc/trunk/epydoc epydoc - cd epydoc/src - sudo python setup.py install - -The appearance of some elements can be changed in the epydoc.css -style sheet. The list headings, i.e. *Parameters*:, are emphasized text, so -their appearance is controlled by the definition of the -tag. For instance, to make them bold, insert:: - - em {font-weight: bold;} - -The variables' types are in a span of class rst-classifier, hence can be -changed by inserting something like:: - - span.rst-classifier {font-weight: normal;} - -The first line of the signature should **not** copy the signature unless -the function is written in C, in which case it is mandatory. If the function -signature is generic (uses ``*args`` or ``**kwds``), then a function signature -may be included. - -Use optional in the "type" field for parameters that are non-keyword -optional for C-functions. - -Epydoc depends on Docutils for reStructuredText parsing. You can download -Docutils from the -`Docutils sourceforge page. `__ -You may also be able to use a package manager like yum to install a -current version:: - - $ sudo yum install python-docutils - - -Example -------- - -Here is a short example module, -`plain text `__ -or -`rendered `__ in HTML. - -To try this yourself, simply download the example.py:: - - svn co http://svn.scipy.org/svn/numpy/trunk/numpy/doc/example.py . - -Then, run epydoc:: - - $ epydoc example.txt - -The output is placed in ``./html``, and may be viewed by loading the -``index.html`` file into your browser. - -This document itself was written in ReStructuredText, and may be converted to -HTML using:: - - $ rst2html HOWTO_DOCUMENT.txt HOWTO_DOCUMENT.html - -.. _SciPy: http://www.scipy.org -.. _numpy-discussion list: http://www.scipy.org/Mailing_Lists -.. _epydoc: http://epydoc.sourceforge.net/ diff --git a/numpy/doc/README.txt b/numpy/doc/README.txt deleted file mode 100644 index eacc3659e..000000000 --- a/numpy/doc/README.txt +++ /dev/null @@ -1,15 +0,0 @@ -Very complete documentation is available from the primary developer of -NumPy for a small fee. After a brief period, that documentation -will become freely available. See http://www.trelgol.com for -details. The fee and restriction period is intended to allow people -and to encourage companies to easily contribute to the development of -NumPy. - -This directory will contain all public documentation that becomes available. - -Very good documentation is also available using Python's (and -especially IPython's) own help system. Most of the functions have -docstrings that provide usage assistance. - - - diff --git a/numpy/doc/example.py b/numpy/doc/example.py deleted file mode 100644 index 26d140e5f..000000000 --- a/numpy/doc/example.py +++ /dev/null @@ -1,98 +0,0 @@ -"""This is the docstring for the example.py module. Modules names should -have short, all-lowercase names. The module name may have underscores if -this improves readability. - -Every module should have a docstring at the very top of the file. The -module's docstring may extend over multiple lines. If your docstring does -extend over multiple lines, the closing three quotation marks must be on -a line by itself, preferably preceeded by a blank line. - -""" - -import os # standard library imports first - -import numpy as np # related third party imports next -import scipy as sp # imports should be at the top of the module -import matplotlib as mpl # imports should usually be on separate lines - - -__docformat__ = "restructuredtext en" - - -def foo(var1, var2, long_var_name='hi') : - """One-line summary or signature. - - Several sentences providing an extended description. You can put - text in mono-spaced type like so: ``var``. - - *Parameters*: - - var1 : {array_like} - Array_like means all those objects -- lists, nested lists, etc. -- - that can be converted to an array. - var2 : {integer} - Write out the full type - long_variable_name : {'hi', 'ho'}, optional - Choices in brackets, default first when optional. - - *Returns*: - - named : {type} - Explanation - list - Explanation - of - Explanation - outputs - even more explaining - - *Other Parameters*: - - only_seldom_used_keywords : type - Explanation - common_parametrs_listed_above : type - Explanation - - *See Also*: - - `otherfunc` : relationship (optional) - - `newfunc` : relationship (optional) - - *Notes* - - Notes about the implementation algorithm (if needed). - - This can have multiple paragraphs as can all sections. - - *Examples* - - examples in doctest format - - >>> a=[1,2,3] - >>> [x + 3 for x in a] - [4, 5, 6] - - """ - - pass - - -def newfunc() : - """Do nothing. - - I never saw a purple cow. - - """ - - pass - - -def otherfunc() : - """Do nothing. - - I never hope to see one. - - """ - - pass diff --git a/numpy/doc/html/api-objects.txt b/numpy/doc/html/api-objects.txt deleted file mode 100644 index 81953990e..000000000 --- a/numpy/doc/html/api-objects.txt +++ /dev/null @@ -1,4 +0,0 @@ -example example-module.html -example.otherfunc example-module.html#otherfunc -example.foo example-module.html#foo -example.newfunc example-module.html#newfunc diff --git a/numpy/doc/html/crarr.png b/numpy/doc/html/crarr.png deleted file mode 100644 index 26b43c524..000000000 Binary files a/numpy/doc/html/crarr.png and /dev/null differ diff --git a/numpy/doc/html/epydoc.css b/numpy/doc/html/epydoc.css deleted file mode 100644 index 4bb3e0b68..000000000 --- a/numpy/doc/html/epydoc.css +++ /dev/null @@ -1,315 +0,0 @@ - - -/* Epydoc CSS Stylesheet - * - * This stylesheet can be used to customize the appearance of epydoc's - * HTML output. - * - */ - -/* Default Colors & Styles - * - Set the default foreground & background color with 'body'; and - * link colors with 'a:link' and 'a:visited'. - * - Use bold for decision list terms. - * - The heading styles defined here are used for headings *within* - * docstring descriptions. All headings used by epydoc itself use - * either class='epydoc' or class='toc' (CSS styles for both - * defined below). - */ -body { background: #ffffff; color: #000000; } -p { margin-top: 0.5em; margin-bottom: 0.5em; } -a:link { color: #0000ff; } -a:visited { color: #204080; } -dt { font-weight: bold; } -h1 { font-size: +140%; font-style: italic; - font-weight: bold; } -h2 { font-size: +125%; font-style: italic; - font-weight: bold; } -h3 { font-size: +110%; font-style: italic; - font-weight: normal; } -code { font-size: 100%; } - -/* Page Header & Footer - * - The standard page header consists of a navigation bar (with - * pointers to standard pages such as 'home' and 'trees'); a - * breadcrumbs list, which can be used to navigate to containing - * classes or modules; options links, to show/hide private - * variables and to show/hide frames; and a page title (using - *

). The page title may be followed by a link to the - * corresponding source code (using 'span.codelink'). - * - The footer consists of a navigation bar, a timestamp, and a - * pointer to epydoc's homepage. - */ -h1.epydoc { margin: 0; font-size: +140%; font-weight: bold; } -h2.epydoc { font-size: +130%; font-weight: bold; } -h3.epydoc { font-size: +115%; font-weight: bold; - margin-top: 0.2em; } -td h3.epydoc { font-size: +115%; font-weight: bold; - margin-bottom: 0; } -table.navbar { background: #a0c0ff; color: #000000; - border: 2px groove #c0d0d0; } -table.navbar table { color: #000000; } -th.navbar-select { background: #70b0ff; - color: #000000; } -table.navbar a { text-decoration: none; } -table.navbar a:link { color: #0000ff; } -table.navbar a:visited { color: #204080; } -span.breadcrumbs { font-size: 85%; font-weight: bold; } -span.options { font-size: 70%; } -span.codelink { font-size: 85%; } -td.footer { font-size: 85%; } - -/* Table Headers - * - Each summary table and details section begins with a 'header' - * row. This row contains a section title (marked by - * 'span.table-header') as well as a show/hide private link - * (marked by 'span.options', defined above). - * - Summary tables that contain user-defined groups mark those - * groups using 'group header' rows. - */ -td.table-header { background: #70b0ff; color: #000000; - border: 1px solid #608090; } -td.table-header table { color: #000000; } -td.table-header table a:link { color: #0000ff; } -td.table-header table a:visited { color: #204080; } -span.table-header { font-size: 120%; font-weight: bold; } -th.group-header { background: #c0e0f8; color: #000000; - text-align: left; font-style: italic; - font-size: 115%; - border: 1px solid #608090; } - -/* Summary Tables (functions, variables, etc) - * - Each object is described by a single row of the table with - * two cells. The left cell gives the object's type, and is - * marked with 'code.summary-type'. The right cell gives the - * object's name and a summary description. - * - CSS styles for the table's header and group headers are - * defined above, under 'Table Headers' - */ -table.summary { border-collapse: collapse; - background: #e8f0f8; color: #000000; - border: 1px solid #608090; - margin-bottom: 0.5em; } -td.summary { border: 1px solid #608090; } -code.summary-type { font-size: 85%; } -table.summary a:link { color: #0000ff; } -table.summary a:visited { color: #204080; } - - -/* Details Tables (functions, variables, etc) - * - Each object is described in its own div. - * - A single-row summary table w/ table-header is used as - * a header for each details section (CSS style for table-header - * is defined above, under 'Table Headers'). - */ -table.details { border-collapse: collapse; - background: #e8f0f8; color: #000000; - border: 1px solid #608090; - margin: .2em 0 0 0; } -table.details table { color: #000000; } -table.details a:link { color: #0000ff; } -table.details a:visited { color: #204080; } - -/* Fields */ -dl.fields { margin-left: 2em; margin-top: 1em; - margin-bottom: 1em; } -dl.fields dd ul { margin-left: 0em; padding-left: 0em; } -dl.fields dd ul li ul { margin-left: 2em; padding-left: 0em; } -div.fields { margin-left: 2em; } -div.fields p { margin-bottom: 0.5em; } - -/* Index tables (identifier index, term index, etc) - * - link-index is used for indices containing lists of links - * (namely, the identifier index & term index). - * - index-where is used in link indices for the text indicating - * the container/source for each link. - * - metadata-index is used for indices containing metadata - * extracted from fields (namely, the bug index & todo index). - */ -table.link-index { border-collapse: collapse; - background: #e8f0f8; color: #000000; - border: 1px solid #608090; } -td.link-index { border-width: 0px; } -table.link-index a:link { color: #0000ff; } -table.link-index a:visited { color: #204080; } -span.index-where { font-size: 70%; } -table.metadata-index { border-collapse: collapse; - background: #e8f0f8; color: #000000; - border: 1px solid #608090; - margin: .2em 0 0 0; } -td.metadata-index { border-width: 1px; border-style: solid; } -table.metadata-index a:link { color: #0000ff; } -table.metadata-index a:visited { color: #204080; } - -/* Function signatures - * - sig* is used for the signature in the details section. - * - .summary-sig* is used for the signature in the summary - * table, and when listing property accessor functions. - * */ -.sig-name { color: #006080; } -.sig-arg { color: #008060; } -.sig-default { color: #602000; } -.summary-sig { font-family: monospace; } -.summary-sig-name { color: #006080; font-weight: bold; } -table.summary a.summary-sig-name:link - { color: #006080; font-weight: bold; } -table.summary a.summary-sig-name:visited - { color: #006080; font-weight: bold; } -.summary-sig-arg { color: #006040; } -.summary-sig-default { color: #501800; } - -/* To render variables, classes etc. like functions */ -table.summary .summary-name { color: #006080; font-weight: bold; - font-family: monospace; } -table.summary - a.summary-name:link { color: #006080; font-weight: bold; - font-family: monospace; } -table.summary - a.summary-name:visited { color: #006080; font-weight: bold; - font-family: monospace; } - -/* Variable values - * - In the 'variable details' sections, each varaible's value is - * listed in a 'pre.variable' box. The width of this box is - * restricted to 80 chars; if the value's repr is longer than - * this it will be wrapped, using a backslash marked with - * class 'variable-linewrap'. If the value's repr is longer - * than 3 lines, the rest will be ellided; and an ellipsis - * marker ('...' marked with 'variable-ellipsis') will be used. - * - If the value is a string, its quote marks will be marked - * with 'variable-quote'. - * - If the variable is a regexp, it is syntax-highlighted using - * the re* CSS classes. - */ -pre.variable { padding: .5em; margin: 0; - background: #dce4ec; color: #000000; - border: 1px solid #708890; } -.variable-linewrap { color: #604000; font-weight: bold; } -.variable-ellipsis { color: #604000; font-weight: bold; } -.variable-quote { color: #604000; font-weight: bold; } -.variable-group { color: #008000; font-weight: bold; } -.variable-op { color: #604000; font-weight: bold; } -.variable-string { color: #006030; } -.variable-unknown { color: #a00000; font-weight: bold; } -.re { color: #000000; } -.re-char { color: #006030; } -.re-op { color: #600000; } -.re-group { color: #003060; } -.re-ref { color: #404040; } - -/* Base tree - * - Used by class pages to display the base class hierarchy. - */ -pre.base-tree { font-size: 80%; margin: 0; } - -/* Frames-based table of contents headers - * - Consists of two frames: one for selecting modules; and - * the other listing the contents of the selected module. - * - h1.toc is used for each frame's heading - * - h2.toc is used for subheadings within each frame. - */ -h1.toc { text-align: center; font-size: 105%; - margin: 0; font-weight: bold; - padding: 0; } -h2.toc { font-size: 100%; font-weight: bold; - margin: 0.5em 0 0 -0.3em; } - -/* Syntax Highlighting for Source Code - * - doctest examples are displayed in a 'pre.py-doctest' block. - * If the example is in a details table entry, then it will use - * the colors specified by the 'table pre.py-doctest' line. - * - Source code listings are displayed in a 'pre.py-src' block. - * Each line is marked with 'span.py-line' (used to draw a line - * down the left margin, separating the code from the line - * numbers). Line numbers are displayed with 'span.py-lineno'. - * The expand/collapse block toggle button is displayed with - * 'a.py-toggle' (Note: the CSS style for 'a.py-toggle' should not - * modify the font size of the text.) - * - If a source code page is opened with an anchor, then the - * corresponding code block will be highlighted. The code - * block's header is highlighted with 'py-highlight-hdr'; and - * the code block's body is highlighted with 'py-highlight'. - * - The remaining py-* classes are used to perform syntax - * highlighting (py-string for string literals, py-name for names, - * etc.) - */ -pre.py-doctest { padding: .5em; margin: 1em; - background: #e8f0f8; color: #000000; - border: 1px solid #708890; } -table pre.py-doctest { background: #dce4ec; - color: #000000; } -pre.py-src { border: 2px solid #000000; - background: #f0f0f0; color: #000000; } -.py-line { border-left: 2px solid #000000; - margin-left: .2em; padding-left: .4em; } -.py-lineno { font-style: italic; font-size: 90%; - padding-left: .5em; } -a.py-toggle { text-decoration: none; } -div.py-highlight-hdr { border-top: 2px solid #000000; - border-bottom: 2px solid #000000; - background: #d8e8e8; } -div.py-highlight { border-bottom: 2px solid #000000; - background: #d0e0e0; } -.py-prompt { color: #005050; font-weight: bold;} -.py-more { color: #005050; font-weight: bold;} -.py-string { color: #006030; } -.py-comment { color: #003060; } -.py-keyword { color: #600000; } -.py-output { color: #404040; } -.py-name { color: #000050; } -.py-name:link { color: #000050 !important; } -.py-name:visited { color: #000050 !important; } -.py-number { color: #005000; } -.py-defname { color: #000060; font-weight: bold; } -.py-def-name { color: #000060; font-weight: bold; } -.py-base-class { color: #000060; } -.py-param { color: #000060; } -.py-docstring { color: #006030; } -.py-decorator { color: #804020; } -/* Use this if you don't want links to names underlined: */ -/*a.py-name { text-decoration: none; }*/ - -/* Graphs & Diagrams - * - These CSS styles are used for graphs & diagrams generated using - * Graphviz dot. 'img.graph-without-title' is used for bare - * diagrams (to remove the border created by making the image - * clickable). - */ -img.graph-without-title { border: none; } -img.graph-with-title { border: 1px solid #000000; } -span.graph-title { font-weight: bold; } -span.graph-caption { } - -/* General-purpose classes - * - 'p.indent-wrapped-lines' defines a paragraph whose first line - * is not indented, but whose subsequent lines are. - * - The 'nomargin-top' class is used to remove the top margin (e.g. - * from lists). The 'nomargin' class is used to remove both the - * top and bottom margin (but not the left or right margin -- - * for lists, that would cause the bullets to disappear.) - */ -p.indent-wrapped-lines { padding: 0 0 0 7em; text-indent: -7em; - margin: 0; } -.nomargin-top { margin-top: 0; } -.nomargin { margin-top: 0; margin-bottom: 0; } - -/* HTML Log */ -div.log-block { padding: 0; margin: .5em 0 .5em 0; - background: #e8f0f8; color: #000000; - border: 1px solid #000000; } -div.log-error { padding: .1em .3em .1em .3em; margin: 4px; - background: #ffb0b0; color: #000000; - border: 1px solid #000000; } -div.log-warning { padding: .1em .3em .1em .3em; margin: 4px; - background: #ffffb0; color: #000000; - border: 1px solid #000000; } -div.log-info { padding: .1em .3em .1em .3em; margin: 4px; - background: #b0ffb0; color: #000000; - border: 1px solid #000000; } -h2.log-hdr { background: #70b0ff; color: #000000; - margin: 0; padding: 0em 0.5em 0em 0.5em; - border-bottom: 1px solid #000000; font-size: 110%; } -p.log { font-weight: bold; margin: .5em 0 .5em 0; } -tr.opt-changed { color: #000000; font-weight: bold; } -tr.opt-default { color: #606060; } -pre.log { margin: 0; padding: 0; padding-left: 1em; } diff --git a/numpy/doc/html/epydoc.js b/numpy/doc/html/epydoc.js deleted file mode 100644 index b5b2ddc40..000000000 --- a/numpy/doc/html/epydoc.js +++ /dev/null @@ -1,280 +0,0 @@ -function toggle_private() { - // Search for any private/public links on this page. Store - // their old text in "cmd," so we will know what action to - // take; and change their text to the opposite action. - var cmd = "?"; - var elts = document.getElementsByTagName("a"); - for(var i=0; i...
"; - elt.innerHTML = s; - } -} - -function toggle(id) { - elt = document.getElementById(id+"-toggle"); - if (elt.innerHTML == "-") - collapse(id); - else - expand(id); - return false; -} - -function highlight(id) { - var elt = document.getElementById(id+"-def"); - if (elt) elt.className = "py-highlight-hdr"; - var elt = document.getElementById(id+"-expanded"); - if (elt) elt.className = "py-highlight"; - var elt = document.getElementById(id+"-collapsed"); - if (elt) elt.className = "py-highlight"; -} - -function num_lines(s) { - var n = 1; - var pos = s.indexOf("\n"); - while ( pos > 0) { - n += 1; - pos = s.indexOf("\n", pos+1); - } - return n; -} - -// Collapse all blocks that mave more than `min_lines` lines. -function collapse_all(min_lines) { - var elts = document.getElementsByTagName("div"); - for (var i=0; i 0) - if (elt.id.substring(split, elt.id.length) == "-expanded") - if (num_lines(elt.innerHTML) > min_lines) - collapse(elt.id.substring(0, split)); - } -} - -function expandto(href) { - var start = href.indexOf("#")+1; - if (start != 0 && start != href.length) { - if (href.substring(start, href.length) != "-") { - collapse_all(4); - pos = href.indexOf(".", start); - while (pos != -1) { - var id = href.substring(start, pos); - expand(id); - pos = href.indexOf(".", pos+1); - } - var id = href.substring(start, href.length); - expand(id); - highlight(id); - } - } -} - -function kill_doclink(id) { - var parent = document.getElementById(id); - parent.removeChild(parent.childNodes.item(0)); -} -function auto_kill_doclink(ev) { - if (!ev) var ev = window.event; - if (!this.contains(ev.toElement)) { - var parent = document.getElementById(this.parentID); - parent.removeChild(parent.childNodes.item(0)); - } -} - -function doclink(id, name, targets_id) { - var elt = document.getElementById(id); - - // If we already opened the box, then destroy it. - // (This case should never occur, but leave it in just in case.) - if (elt.childNodes.length > 1) { - elt.removeChild(elt.childNodes.item(0)); - } - else { - // The outer box: relative + inline positioning. - var box1 = document.createElement("div"); - box1.style.position = "relative"; - box1.style.display = "inline"; - box1.style.top = 0; - box1.style.left = 0; - - // A shadow for fun - var shadow = document.createElement("div"); - shadow.style.position = "absolute"; - shadow.style.left = "-1.3em"; - shadow.style.top = "-1.3em"; - shadow.style.background = "#404040"; - - // The inner box: absolute positioning. - var box2 = document.createElement("div"); - box2.style.position = "relative"; - box2.style.border = "1px solid #a0a0a0"; - box2.style.left = "-.2em"; - box2.style.top = "-.2em"; - box2.style.background = "white"; - box2.style.padding = ".3em .4em .3em .4em"; - box2.style.fontStyle = "normal"; - box2.onmouseout=auto_kill_doclink; - box2.parentID = id; - - // Get the targets - var targets_elt = document.getElementById(targets_id); - var targets = targets_elt.getAttribute("targets"); - var links = ""; - target_list = targets.split(","); - for (var i=0; i" + - target[0] + ""; - } - - // Put it all together. - elt.insertBefore(box1, elt.childNodes.item(0)); - //box1.appendChild(box2); - box1.appendChild(shadow); - shadow.appendChild(box2); - box2.innerHTML = - "Which "+name+" do you want to see documentation for?" + - ""; - } - return false; -} - -function get_anchor() { - var href = location.href; - var start = href.indexOf("#")+1; - if ((start != 0) && (start != href.length)) - return href.substring(start, href.length); - } -function redirect_url(dottedName) { - // Scan through each element of the "pages" list, and check - // if "name" matches with any of them. - for (var i=0; i-m" or "-c"; - // extract the portion & compare it to dottedName. - var pagename = pages[i].substring(0, pages[i].length-2); - if (pagename == dottedName.substring(0,pagename.length)) { - - // We've found a page that matches `dottedName`; - // construct its URL, using leftover `dottedName` - // content to form an anchor. - var pagetype = pages[i].charAt(pages[i].length-1); - var url = pagename + ((pagetype=="m")?"-module.html": - "-class.html"); - if (dottedName.length > pagename.length) - url += "#" + dottedName.substring(pagename.length+1, - dottedName.length); - return url; - } - } - } diff --git a/numpy/doc/html/example-module.html b/numpy/doc/html/example-module.html deleted file mode 100644 index 847ae8f1d..000000000 --- a/numpy/doc/html/example-module.html +++ /dev/null @@ -1,316 +0,0 @@ - - - - - example - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - Module example - - - - - - -
[hide private]
[frames] | no frames]
-
- -

Module example

source code

-

This is the docstring for the example.py module. Modules names should -have short, all-lowercase names. The module name may have underscores if -this improves readability.

-

Every module should have a docstring at the very top of the file. The -module's docstring may extend over multiple lines. If your docstring does -extend over multiple lines, the closing three quotation marks must be on -a line by itself, preferably preceeded by a blank line.

- - - - - - - - - - - - - - - - -
- - - - - -
Functions[hide private]
-
-   - - - - - - -
foo(var1, - var2, - long_var_name='hi')
- One-line summary or signature.
- source code - -
- -
-   - - - - - - -
newfunc()
- Do nothing.
- source code - -
- -
-   - - - - - - -
otherfunc()
- Do nothing.
- source code - -
- -
- - - - - - -
- - - - - -
Function Details[hide private]
-
- -
- -
- - -
-

foo(var1, - var2, - long_var_name='hi') -

-
source code  -
- -

One-line summary or signature.

-

Several sentences providing an extended description. You can put -text in mono-spaced type like so: var.

-

Parameters:

-
-
-
var1 : {array_like}
-
Array_like means all those objects -- lists, nested lists, etc. -- -that can be converted to an array.
-
var2 : {integer}
-
Write out the full type
-
long_variable_name : {'hi', 'ho'}, optional
-
Choices in brackets, default first when optional.
-
-
-

Returns:

-
-
-
named : {type}
-
Explanation
-
list
-
Explanation
-
of
-
Explanation
-
outputs
-
even more explaining
-
-
-

Other Parameters:

-
-
-
only_seldom_used_keywords : type
-
Explanation
-
common_parametrs_listed_above : type
-
Explanation
-
-
-

See Also:

-
-

otherfunc : relationship (optional)

-

newfunc : relationship (optional)

-
-

Notes

-
-

Notes about the implementation algorithm (if needed).

-

This can have multiple paragraphs as can all sections.

-
-

Examples

-
-

examples in doctest format

-
->>> a=[1,2,3]
->>> [x + 3 for x in a]
-[4, 5, 6]
-
-
-
-
-
- -
- -
- - -
-

newfunc() -

-
source code  -
- -

Do nothing.

-

I never saw a purple cow.

-
-
-
-
- -
- -
- - -
-

otherfunc() -

-
source code  -
- -

Do nothing.

-

I never hope to see one.

-
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - -
- - - - - diff --git a/numpy/doc/html/example-pysrc.html b/numpy/doc/html/example-pysrc.html deleted file mode 100644 index 0790bad92..000000000 --- a/numpy/doc/html/example-pysrc.html +++ /dev/null @@ -1,208 +0,0 @@ - - - - - example - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - Module example - - - - - - -
[hide private]
[frames] | no frames]
-
-

Source Code for Module example

-
-  1  """This is the docstring for the example.py module.  Modules names should 
-  2  have short, all-lowercase names.  The module name may have underscores if 
-  3  this improves readability. 
-  4   
-  5  Every module should have a docstring at the very top of the file.  The 
-  6  module's docstring may extend over multiple lines.  If your docstring does 
-  7  extend over multiple lines, the closing three quotation marks must be on 
-  8  a line by itself, preferably preceeded by a blank line. 
-  9   
- 10  """ 
- 11   
- 12  import os                      # standard library imports first 
- 13   
- 14  import numpy as np             # related third party imports next 
- 15  import scipy as sp             # imports should be at the top of the module 
- 16  import matplotlib as mpl       # imports should usually be on separate lines 
- 17   
- 18   
- 19  __docformat__ = "restructuredtext en" 
- 20   
- 21   
-
22 -def foo(var1, var2, long_var_name='hi') : -
23 """One-line summary or signature. - 24 - 25 Several sentences providing an extended description. You can put - 26 text in mono-spaced type like so: ``var``. - 27 - 28 *Parameters*: - 29 - 30 var1 : {array_like} - 31 Array_like means all those objects -- lists, nested lists, etc. -- - 32 that can be converted to an array. - 33 var2 : {integer} - 34 Write out the full type - 35 long_variable_name : {'hi', 'ho'}, optional - 36 Choices in brackets, default first when optional. - 37 - 38 *Returns*: - 39 - 40 named : {type} - 41 Explanation - 42 list - 43 Explanation - 44 of - 45 Explanation - 46 outputs - 47 even more explaining - 48 - 49 *Other Parameters*: - 50 - 51 only_seldom_used_keywords : type - 52 Explanation - 53 common_parametrs_listed_above : type - 54 Explanation - 55 - 56 *See Also*: - 57 - 58 `otherfunc` : relationship (optional) - 59 - 60 `newfunc` : relationship (optional) - 61 - 62 *Notes* - 63 - 64 Notes about the implementation algorithm (if needed). - 65 - 66 This can have multiple paragraphs as can all sections. - 67 - 68 *Examples* - 69 - 70 examples in doctest format - 71 - 72 >>> a=[1,2,3] - 73 >>> [x + 3 for x in a] - 74 [4, 5, 6] - 75 - 76 """ - 77 - 78 pass -
79 - 80 -
81 -def newfunc() : -
82 """Do nothing. - 83 - 84 I never saw a purple cow. - 85 - 86 """ - 87 - 88 pass -
89 - 90 -
91 -def otherfunc() : -
92 """Do nothing. - 93 - 94 I never hope to see one. - 95 - 96 """ - 97 - 98 pass -
99 -
-
- - - - - - - - - - - - - - - - - - - - - - - -
- - - - - diff --git a/numpy/doc/html/frames.html b/numpy/doc/html/frames.html deleted file mode 100644 index 6ebc67e75..000000000 --- a/numpy/doc/html/frames.html +++ /dev/null @@ -1,17 +0,0 @@ - - - - - API Documentation - - - - - - - - - diff --git a/numpy/doc/html/help.html b/numpy/doc/html/help.html deleted file mode 100644 index 8c56960b5..000000000 --- a/numpy/doc/html/help.html +++ /dev/null @@ -1,268 +0,0 @@ - - - - - Help - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  - - - - -
[hide private]
[frames] | no frames]
-
- -

API Documentation

- -

This document contains the API (Application Programming Interface) -documentation for this project. Documentation for the Python -objects defined by the project is divided into separate pages for each -package, module, and class. The API documentation also includes two -pages containing information about the project as a whole: a trees -page, and an index page.

- -

Object Documentation

- -

Each Package Documentation page contains:

-
    -
  • A description of the package.
  • -
  • A list of the modules and sub-packages contained by the - package.
  • -
  • A summary of the classes defined by the package.
  • -
  • A summary of the functions defined by the package.
  • -
  • A summary of the variables defined by the package.
  • -
  • A detailed description of each function defined by the - package.
  • -
  • A detailed description of each variable defined by the - package.
  • -
- -

Each Module Documentation page contains:

-
    -
  • A description of the module.
  • -
  • A summary of the classes defined by the module.
  • -
  • A summary of the functions defined by the module.
  • -
  • A summary of the variables defined by the module.
  • -
  • A detailed description of each function defined by the - module.
  • -
  • A detailed description of each variable defined by the - module.
  • -
- -

Each Class Documentation page contains:

-
    -
  • A class inheritance diagram.
  • -
  • A list of known subclasses.
  • -
  • A description of the class.
  • -
  • A summary of the methods defined by the class.
  • -
  • A summary of the instance variables defined by the class.
  • -
  • A summary of the class (static) variables defined by the - class.
  • -
  • A detailed description of each method defined by the - class.
  • -
  • A detailed description of each instance variable defined by the - class.
  • -
  • A detailed description of each class (static) variable defined - by the class.
  • -
- -

Project Documentation

- -

The Trees page contains the module and class hierarchies:

-
    -
  • The module hierarchy lists every package and module, with - modules grouped into packages. At the top level, and within each - package, modules and sub-packages are listed alphabetically.
  • -
  • The class hierarchy lists every class, grouped by base - class. If a class has more than one base class, then it will be - listed under each base class. At the top level, and under each base - class, classes are listed alphabetically.
  • -
- -

The Index page contains indices of terms and - identifiers:

-
    -
  • The term index lists every term indexed by any object's - documentation. For each term, the index provides links to each - place where the term is indexed.
  • -
  • The identifier index lists the (short) name of every package, - module, class, method, function, variable, and parameter. For each - identifier, the index provides a short description, and a link to - its documentation.
  • -
- -

The Table of Contents

- -

The table of contents occupies the two frames on the left side of -the window. The upper-left frame displays the project -contents, and the lower-left frame displays the module -contents:

- - - - - - - - - -
- Project
Contents
...
- API
Documentation
Frame


-
- Module
Contents
 
...
  -

- -

The project contents frame contains a list of all packages -and modules that are defined by the project. Clicking on an entry -will display its contents in the module contents frame. Clicking on a -special entry, labeled "Everything," will display the contents of -the entire project.

- -

The module contents frame contains a list of every -submodule, class, type, exception, function, and variable defined by a -module or package. Clicking on an entry will display its -documentation in the API documentation frame. Clicking on the name of -the module, at the top of the frame, will display the documentation -for the module itself.

- -

The "frames" and "no frames" buttons below the top -navigation bar can be used to control whether the table of contents is -displayed or not.

- -

The Navigation Bar

- -

A navigation bar is located at the top and bottom of every page. -It indicates what type of page you are currently viewing, and allows -you to go to related pages. The following table describes the labels -on the navigation bar. Note that not some labels (such as -[Parent]) are not displayed on all pages.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
LabelHighlighted when...Links to...
[Parent](never highlighted) the parent of the current package
[Package]viewing a packagethe package containing the current object -
[Module]viewing a modulethe module containing the current object -
[Class]viewing a class the class containing the current object
[Trees]viewing the trees page the trees page
[Index]viewing the index page the index page
[Help]viewing the help page the help page
- -

The "show private" and "hide private" buttons below -the top navigation bar can be used to control whether documentation -for private objects is displayed. Private objects are usually defined -as objects whose (short) names begin with a single underscore, but do -not end with an underscore. For example, "_x", -"__pprint", and "epydoc.epytext._tokenize" -are private objects; but "re.sub", -"__init__", and "type_" are not. However, -if a module defines the "__all__" variable, then its -contents are used to decide which objects are private.

- -

A timestamp below the bottom navigation bar indicates when each -page was last updated.

- - - - - - - - - - - - - - - - - - - - - - - -
- - - - - diff --git a/numpy/doc/html/identifier-index.html b/numpy/doc/html/identifier-index.html deleted file mode 100644 index 317e1677b..000000000 --- a/numpy/doc/html/identifier-index.html +++ /dev/null @@ -1,180 +0,0 @@ - - - - - Identifier Index - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  - - - - -
[hide private]
[frames] | no frames]
-
- -
-

Identifier Index

-
-[ - A - B - C - D - E - F - G - H - I - J - K - L - M - N - O - P - Q - R - S - T - U - V - W - X - Y - Z - _ -] -
- - - - - - - - - -

E

- - - - - - - - -

F

- - - - - - - - -

N

- - - - - - - - -

O

- - - - - - - - -
-

- - - - - - - - - - - - - - - - - - - - - - -
- - - - - diff --git a/numpy/doc/html/index.html b/numpy/doc/html/index.html deleted file mode 100644 index 6ebc67e75..000000000 --- a/numpy/doc/html/index.html +++ /dev/null @@ -1,17 +0,0 @@ - - - - - API Documentation - - - - - - - - - diff --git a/numpy/doc/html/module-tree.html b/numpy/doc/html/module-tree.html deleted file mode 100644 index db41e8d35..000000000 --- a/numpy/doc/html/module-tree.html +++ /dev/null @@ -1,101 +0,0 @@ - - - - - Module Hierarchy - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  - - - - -
[hide private]
[frames] | no frames]
-
-

Module Hierarchy

-
    -
  • example: This is the docstring for the example.py module.
  • -
- - - - - - - - - - - - - - - - - - - - - - - -
- - - - - diff --git a/numpy/doc/html/redirect.html b/numpy/doc/html/redirect.html deleted file mode 100644 index dbd50828c..000000000 --- a/numpy/doc/html/redirect.html +++ /dev/null @@ -1,38 +0,0 @@ -Epydoc Redirect Page - - - - - - - - -

Epydoc Auto-redirect page

- -

When javascript is enabled, this page will redirect URLs of -the form redirect.html#dotted.name to the -documentation for the object with the given fully-qualified -dotted name.

-

 

- - - - - diff --git a/numpy/doc/html/toc-everything.html b/numpy/doc/html/toc-everything.html deleted file mode 100644 index f6ed84f8e..000000000 --- a/numpy/doc/html/toc-everything.html +++ /dev/null @@ -1,34 +0,0 @@ - - - - - Everything - - - - - -

Everything

-
-

All Functions

- example.foo
example.newfunc
example.otherfunc

-[hide private] - - - - - diff --git a/numpy/doc/html/toc-example-module.html b/numpy/doc/html/toc-example-module.html deleted file mode 100644 index 137dea5ae..000000000 --- a/numpy/doc/html/toc-example-module.html +++ /dev/null @@ -1,34 +0,0 @@ - - - - - example - - - - - -

Module example

-
-

Functions

- foo
newfunc
otherfunc

-[hide private] - - - - - diff --git a/numpy/doc/html/toc.html b/numpy/doc/html/toc.html deleted file mode 100644 index d9863817f..000000000 --- a/numpy/doc/html/toc.html +++ /dev/null @@ -1,34 +0,0 @@ - - - - - Table of Contents - - - - - -

Table of Contents

-
- Everything -
-

Modules

- example

- [hide private] - - - - - diff --git a/numpy/doc/pep_buffer.txt b/numpy/doc/pep_buffer.txt deleted file mode 100644 index a154d2792..000000000 --- a/numpy/doc/pep_buffer.txt +++ /dev/null @@ -1,869 +0,0 @@ -:PEP: 3118 -:Title: Revising the buffer protocol -:Version: $Revision$ -:Last-Modified: $Date$ -:Authors: Travis Oliphant , Carl Banks -:Status: Draft -:Type: Standards Track -:Content-Type: text/x-rst -:Created: 28-Aug-2006 -:Python-Version: 3000 - -Abstract -======== - -This PEP proposes re-designing the buffer interface (PyBufferProcs -function pointers) to improve the way Python allows memory sharing -in Python 3.0 - -In particular, it is proposed that the character buffer portion -of the API be elminated and the multiple-segment portion be -re-designed in conjunction with allowing for strided memory -to be shared. In addition, the new buffer interface will -allow the sharing of any multi-dimensional nature of the -memory and what data-format the memory contains. - -This interface will allow any extension module to either -create objects that share memory or create algorithms that -use and manipulate raw memory from arbitrary objects that -export the interface. - - -Rationale -========= - -The Python 2.X buffer protocol allows different Python types to -exchange a pointer to a sequence of internal buffers. This -functionality is *extremely* useful for sharing large segments of -memory between different high-level objects, but it is too limited and -has issues: - -1. There is the little used "sequence-of-segments" option - (bf_getsegcount) that is not well motivated. - -2. There is the apparently redundant character-buffer option - (bf_getcharbuffer) - -3. There is no way for a consumer to tell the buffer-API-exporting - object it is "finished" with its view of the memory and - therefore no way for the exporting object to be sure that it is - safe to reallocate the pointer to the memory that it owns (for - example, the array object reallocating its memory after sharing - it with the buffer object which held the original pointer led - to the infamous buffer-object problem). - -4. Memory is just a pointer with a length. There is no way to - describe what is "in" the memory (float, int, C-structure, etc.) - -5. There is no shape information provided for the memory. But, - several array-like Python types could make use of a standard - way to describe the shape-interpretation of the memory - (wxPython, GTK, pyQT, CVXOPT, PyVox, Audio and Video - Libraries, ctypes, NumPy, data-base interfaces, etc.) - -6. There is no way to share discontiguous memory (except through - the sequence of segments notion). - - There are two widely used libraries that use the concept of - discontiguous memory: PIL and NumPy. Their view of discontiguous - arrays is different, though. The proposed buffer interface allows - sharing of either memory model. Exporters will use only one - approach and consumers may choose to support discontiguous - arrays of each type however they choose. - - NumPy uses the notion of constant striding in each dimension as its - basic concept of an array. With this concept, a simple sub-region - of a larger array can be described without copying the data. - Thus, stride information is the additional information that must be - shared. - - The PIL uses a more opaque memory representation. Sometimes an - image is contained in a contiguous segment of memory, but sometimes - it is contained in an array of pointers to the contiguous segments - (usually lines) of the image. The PIL is where the idea of multiple - buffer segments in the original buffer interface came from. - - NumPy's strided memory model is used more often in computational - libraries and because it is so simple it makes sense to support - memory sharing using this model. The PIL memory model is sometimes - used in C-code where a 2-d array can be then accessed using double - pointer indirection: e.g. image[i][j]. - - The buffer interface should allow the object to export either of these - memory models. Consumers are free to either require contiguous memory - or write code to handle one or both of these memory models. - -Proposal Overview -================= - -* Eliminate the char-buffer and multiple-segment sections of the - buffer-protocol. - -* Unify the read/write versions of getting the buffer. - -* Add a new function to the interface that should be called when - the consumer object is "done" with the memory area. - -* Add a new variable to allow the interface to describe what is in - memory (unifying what is currently done now in struct and - array) - -* Add a new variable to allow the protocol to share shape information - -* Add a new variable for sharing stride information - -* Add a new mechanism for sharing arrays that must - be accessed using pointer indirection. - -* Fix all objects in the core and the standard library to conform - to the new interface - -* Extend the struct module to handle more format specifiers - -* Extend the buffer object into a new memory object which places - a Python veneer around the buffer interface. - -* Add a few functions to make it easy to copy contiguous data - in and out of object supporting the buffer interface. - -Specification -============= - -While the new specification allows for complicated memory sharing. -Simple contiguous buffers of bytes can still be obtained from an -object. In fact, the new protocol allows a standard mechanism for -doing this even if the original object is not represented as a -contiguous chunk of memory. - -The easiest way to obtain a simple contiguous chunk of memory is -to use the provided C-API to obtain a chunk of memory. - - -Change the PyBufferProcs structure to - -:: - - typedef struct { - getbufferproc bf_getbuffer; - releasebufferproc bf_releasebuffer; - } - - -:: - - typedef int (*getbufferproc)(PyObject *obj, PyBuffer *view, int flags) - -This function returns 0 on success and -1 on failure (and raises an -error). The first variable is the "exporting" object. The second -argument is the address to a bufferinfo structure. If view is NULL, -then no information is returned but a lock on the memory is still -obtained. In this case, the corresponding releasebuffer should also -be called with NULL. - -The third argument indicates what kind of buffer the exporter is -allowed to return. It essentially tells the exporter what kind of -memory area the consumer can deal with. It also indicates what -members of the PyBuffer structure the consumer is going to care about. - -The exporter can use this information to simplify how much of the PyBuffer -structure is filled in and/or raise an error if the object can't support -a simpler view of its memory. - -Thus, the caller can request a simple "view" and either receive it or -have an error raised if it is not possible. - -All of the following assume that at least buf, len, and readonly -will always be utilized by the caller. - -Py_BUF_SIMPLE - - The returned buffer will be assumed to be readable (the object may - or may not have writeable memory). Only the buf, len, and readonly - variables may be accessed. The format will be assumed to be - unsigned bytes . This is a "stand-alone" flag constant. It never - needs to be \|'d to the others. The exporter will raise an - error if it cannot provide such a contiguous buffer. - -Py_BUF_WRITEABLE - - The returned buffer must be writeable. If it is not writeable, - then raise an error. - -Py_BUF_READONLY - - The returned buffer must be readonly. If the object is already - read-only or it can make its memory read-only (and there are no - other views on the object) then it should do so and return the - buffer information. If the object does not have read-only memory - (or cannot make it read-only), then an error should be raised. - -Py_BUF_FORMAT - - The returned buffer must have true format information. This would - be used when the consumer is going to be checking for what 'kind' - of data is actually stored. An exporter should always be able - to provide this information if requested. - -Py_BUF_SHAPE - - The returned buffer must have shape information. The memory will - be assumed C-style contiguous (last dimension varies the fastest). - The exporter may raise an error if it cannot provide this kind - of contiguous buffer. - -Py_BUF_STRIDES (implies Py_BUF_SHAPE) - - The returned buffer must have strides information. This would be - used when the consumer can handle strided, discontiguous arrays. - Handling strides automatically assumes you can handle shape. - The exporter may raise an error if cannot provide a strided-only - representation of the data (i.e. without the suboffsets). - -Py_BUF_OFFSETS (implies Py_BUF_STRIDES) - - The returned buffer must have suboffsets information. This would - be used when the consumer can handle indirect array referencing - implied by these suboffsets. - -Py_BUF_FULL (Py_BUF_OFFSETS | Py_BUF_WRITEABLE | Py_BUF_FORMAT) - -Thus, the consumer simply wanting a contiguous chunk of bytes from -the object would use Py_BUF_SIMPLE, while a consumer that understands -how to make use of the most complicated cases could use Py_BUF_INDIRECT. - -If format information is going to be probed, then Py_BUF_FORMAT must -be \|'d to the flags otherwise the consumer assumes it is unsigned -bytes. - -There is a C-API that simple exporting objects can use to fill-in the -buffer info structure correctly according to the provided flags if a -contiguous chunk of "unsigned bytes" is all that can be exported. - - -The bufferinfo structure is:: - - struct bufferinfo { - void *buf; - Py_ssize_t len; - int readonly; - const char *format; - int ndims; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - int itemsize; - void *internal; - } PyBuffer; - -Before calling this function, the bufferinfo structure can be filled -with whatever. Upon return from getbufferproc, the bufferinfo -structure is filled in with relevant information about the buffer. -This same bufferinfo structure must be passed to bf_releasebuffer (if -available) when the consumer is done with the memory. The caller is -responsible for keeping a reference to obj until releasebuffer is -called (i.e. this call does not alter the reference count of obj). - -The members of the bufferinfo structure are: - -buf - a pointer to the start of the memory for the object - -len - the total bytes of memory the object uses. This should be the - same as the product of the shape array multiplied by the number of - bytes per item of memory. - -readonly - an integer variable to hold whether or not the memory is - readonly. 1 means the memory is readonly, zero means the - memory is writeable. - -format - a NULL-terminated format-string (following the struct-style syntax - including extensions) indicating what is in each element of - memory. The number of elements is len / itemsize, where itemsize - is the number of bytes implied by the format. For standard - unsigned bytes use a format string of "B". - -ndims - a variable storing the number of dimensions the memory represents. - Must be >=0. - -shape - an array of ``Py_ssize_t`` of length ``ndims`` indicating the - shape of the memory as an N-D array. Note that ``((*shape)[0] * - ... * (*shape)[ndims-1])*itemsize = len``. If ndims is 0 (indicating - a scalar), then this must be NULL. - -strides - address of a ``Py_ssize_t*`` variable that will be filled with a - pointer to an array of ``Py_ssize_t`` of length ``ndims`` (or NULL - if ndims is 0). indicating the number of bytes to skip to get to - the next element in each dimension. If this is not requested by - the caller (BUF_STRIDES is not set), then this member of the - structure will not be used and the consumer is assuming the array - is C-style contiguous. If this is not the case, then an error - should be raised. If this member is requested by the caller - (BUF_STRIDES is set), then it must be filled in. - - -suboffsets - address of a ``Py_ssize_t *`` variable that will be filled with a - pointer to an array of ``Py_ssize_t`` of length ``*ndims``. If - these suboffset numbers are >=0, then the value stored along the - indicated dimension is a pointer and the suboffset value dictates - how many bytes to add to the pointer after de-referencing. A - suboffset value that it negative indicates that no de-referencing - should occur (striding in a contiguous memory block). If all - suboffsets are negative (i.e. no de-referencing is needed, then - this must be NULL. - - For clarity, here is a function that returns a pointer to the - element in an N-D array pointed to by an N-dimesional index when - there are both strides and suboffsets.:: - - void* get_item_pointer(int ndim, void* buf, Py_ssize_t* strides, - Py_ssize_t* suboffsets, Py_ssize_t *indices) { - char* pointer = (char*)buf; - int i; - for (i = 0; i < ndim; i++) { - pointer += strides[i]*indices[i]; - if (suboffsets[i] >=0 ) { - pointer = *((char**)pointer) + suboffsets[i]; - } - } - return (void*)pointer; - } - - Notice the suboffset is added "after" the dereferencing occurs. - Thus slicing in the ith dimension would add to the suboffsets in - the (i-1)st dimension. Slicing in the first dimension would change - the location of the starting pointer directly (i.e. buf would - be modified). - -itemsize - This is a storage for the itemsize of each element of the shared - memory. It can be obtained using PyBuffer_SizeFromFormat but an - exporter may know it without making this call and thus storing it - is more convenient and faster. - -internal - This is for use internally by the exporting object. For example, - this might be re-cast as an integer by the exporter and used to - store flags about whether or not the shape, strides, and suboffsets - arrays must be freed when the buffer is released. The consumer - should never touch this value. - - -The exporter is responsible for making sure the memory pointed to by -buf, format, shape, strides, and suboffsets is valid until -releasebuffer is called. If the exporter wants to be able to change -shape, strides, and/or suboffsets before releasebuffer is called then -it should allocate those arrays when getbuffer is called (pointing to -them in the buffer-info structure provided) and free them when -releasebuffer is called. - - -The same bufferinfo struct should be used in the release-buffer -interface call. The caller is responsible for the memory of the -bufferinfo structure itself. - -``typedef int (*releasebufferproc)(PyObject *obj, PyBuffer *view)`` - Callers of getbufferproc must make sure that this function is - called when memory previously acquired from the object is no - longer needed. The exporter of the interface must make sure that - any memory pointed to in the bufferinfo structure remains valid - until releasebuffer is called. - - Both of these routines are optional for a type object - - If the releasebuffer function is not provided then it does not ever - need to be called. - -Exporters will need to define a releasebuffer function if they can -re-allocate their memory, strides, shape, suboffsets, or format -variables which they might share through the struct bufferinfo. -Several mechanisms could be used to keep track of how many getbuffer -calls have been made and shared. Either a single variable could be -used to keep track of how many "views" have been exported, or a -linked-list of bufferinfo structures filled in could be maintained in -each object. - -All that is specifically required by the exporter, however, is to -ensure that any memory shared through the bufferinfo structure remains -valid until releasebuffer is called on the bufferinfo structure. - - -New C-API calls are proposed -============================ - -:: - - int PyObject_CheckBuffer(PyObject *obj) - -Return 1 if the getbuffer function is available otherwise 0. - -:: - - int PyObject_GetBuffer(PyObject *obj, PyBuffer *view, - int flags) - -This is a C-API version of the getbuffer function call. It checks to -make sure object has the required function pointer and issues the -call. Returns -1 and raises an error on failure and returns 0 on -success. - -:: - - int PyObject_ReleaseBuffer(PyObject *obj, PyBuffer *view) - -This is a C-API version of the releasebuffer function call. It checks -to make sure the object has the required function pointer and issues -the call. Returns 0 on success and -1 (with an error raised) on -failure. This function always succeeds if there is no releasebuffer -function for the object. - -:: - - PyObject *PyObject_GetMemoryView(PyObject *obj) - -Return a memory-view object from an object that defines the buffer interface. - -A memory-view object is an extended buffer object that could replace -the buffer object (but doesn't have to). It's C-structure is - -:: - - typedef struct { - PyObject_HEAD - PyObject *base; - int ndims; - Py_ssize_t *starts; /* slice starts */ - Py_ssize_t *stops; /* slice stops */ - Py_ssize_t *steps; /* slice steps */ - } PyMemoryViewObject; - -This is functionally similar to the current buffer object except only -a reference to base is kept. The actual memory for base must be -re-grabbed using the buffer-protocol, whenever it is needed. - -The getbuffer and releasebuffer for this object use the underlying -base object (adjusted using the slice information). If the number of -dimensions of the base object (or the strides or the size) has changed -when a new view is requested, then the getbuffer will trigger an error. - -This memory-view object will support mult-dimensional slicing. Slices -of the memory-view object are other memory-view objects. When an -"element" from the memory-view is returned it is always a tuple of -bytes object + format string which can then be interpreted using the -struct module if desired. - -:: - - int PyBuffer_SizeFromFormat(const char *) - -Return the implied itemsize of the data-format area from a struct-style -description. - -:: - - int PyObject_GetContiguous(PyObject *obj, void **buf, Py_ssize_t *len, - char **format, char fortran) - -Return a contiguous chunk of memory representing the buffer. If a -copy is made then return 1. If no copy was needed return 0. If an -error occurred in probing the buffer interface, then return -1. The -contiguous chunk of memory is pointed to by ``*buf`` and the length of -that memory is ``*len``. If the object is multi-dimensional, then if -fortran is 'F', the first dimension of the underlying array will vary -the fastest in the buffer. If fortran is 'C', then the last dimension -will vary the fastest (C-style contiguous). If fortran is 'A', then it -does not matter and you will get whatever the object decides is more -efficient. - -:: - - int PyObject_CopyToObject(PyObject *obj, void *buf, Py_ssize_t len, - char fortran) - -Copy ``len`` bytes of data pointed to by the contiguous chunk of -memory pointed to by ``buf`` into the buffer exported by obj. Return -0 on success and return -1 and raise an error on failure. If the -object does not have a writeable buffer, then an error is raised. If -fortran is 'F', then if the object is multi-dimensional, then the data -will be copied into the array in Fortran-style (first dimension varies -the fastest). If fortran is 'C', then the data will be copied into the -array in C-style (last dimension varies the fastest). If fortran is 'A', then -it does not matter and the copy will be made in whatever way is more -efficient. - -:: - - void PyBuffer_FreeMem(void *buf) - -This function frees the memory returned by PyObject_GetContiguous if a -copy was made. Do not call this function unless -PyObject_GetContiguous returns a 1 indicating that new memory was -created. - - -These last three C-API calls allow a standard way of getting data in and -out of Python objects into contiguous memory areas no matter how it is -actually stored. These calls use the extended buffer interface to perform -their work. - -:: - - int PyBuffer_IsContiguous(PyBuffer *view, char fortran); - -Return 1 if the memory defined by the view object is C-style (fortran = 'C') -or Fortran-style (fortran = 'A') contiguous. Return 0 otherwise. - -:: - - void PyBuffer_FillContiguousStrides(int *ndims, Py_ssize_t *shape, - int itemsize, - Py_ssize_t *strides, char fortran) - -Fill the strides array with byte-strides of a contiguous (C-style if -fortran is 0 or Fortran-style if fortran is 1) array of the given -shape with the given number of bytes per element. - -:: - - int PyBuffer_FillInfo(PyBuffer *view, void *buf, - Py_ssize_t len, int readonly, int infoflags) - -Fills in a buffer-info structure correctly for an exporter that can -only share a contiguous chunk of memory of "unsigned bytes" of the -given length. Returns 0 on success and -1 (with raising an error) on -error. - - -Additions to the struct string-syntax -===================================== - -The struct string-syntax is missing some characters to fully -implement data-format descriptions already available elsewhere (in -ctypes and NumPy for example). The Python 2.5 specification is -at http://docs.python.org/lib/module-struct.html - -Here are the proposed additions: - - -================ =========== -Character Description -================ =========== -'t' bit (number before states how many bits) -'?' platform _Bool type -'g' long double -'c' ucs-1 (latin-1) encoding -'u' ucs-2 -'w' ucs-4 -'O' pointer to Python Object -'Z' complex (whatever the next specifier is) -'&' specific pointer (prefix before another charater) -'T{}' structure (detailed layout inside {}) -'(k1,k2,...,kn)' multi-dimensional array of whatever follows -':name:' optional name of the preceeding element -'X{}' pointer to a function (optional function - signature inside {}) -' \n\t' ignored (allow better readability) - -- this may already be true -================ =========== - -The struct module will be changed to understand these as well and -return appropriate Python objects on unpacking. Unpacking a -long-double will return a decimal object or a ctypes long-double. -Unpacking 'u' or 'w' will return Python unicode. Unpacking a -multi-dimensional array will return a list (of lists if >1d). -Unpacking a pointer will return a ctypes pointer object. Unpacking a -function pointer will return a ctypes call-object (perhaps). Unpacking -a bit will return a Python Bool. White-space in the struct-string -syntax will be ignored if it isn't already. Unpacking a named-object -will return some kind of named-tuple-like object that acts like a -tuple but whose entries can also be accessed by name. Unpacking a -nested structure will return a nested tuple. - -Endian-specification ('!', '@','=','>','<', '^') is also allowed -inside the string so that it can change if needed. The -previously-specified endian string is in force until changed. The -default endian is '@' which means native data-types and alignment. If -un-aligned, native data-types are requested, then the endian -specification is '^'. - -According to the struct-module, a number can preceed a character -code to specify how many of that type there are. The -(k1,k2,...,kn) extension also allows specifying if the data is -supposed to be viewed as a (C-style contiguous, last-dimension -varies the fastest) multi-dimensional array of a particular format. - -Functions should be added to ctypes to create a ctypes object from -a struct description, and add long-double, and ucs-2 to ctypes. - -Examples of Data-Format Descriptions -==================================== - -Here are some examples of C-structures and how they would be -represented using the struct-style syntax. - - is the constructor for a named-tuple (not-specified yet). - -float - 'f' <--> Python float -complex double - 'Zd' <--> Python complex -RGB Pixel data - 'BBB' <--> (int, int, int) - 'B:r: B:g: B:b:' <--> ((int, int, int), ('r','g','b')) - -Mixed endian (weird but possible) - '>i:big: ((int, int), ('big', 'little')) - -Nested structure - :: - - struct { - int ival; - struct { - unsigned short sval; - unsigned char bval; - unsigned char cval; - } sub; - } - """i:ival: - T{ - H:sval: - B:bval: - B:cval: - }:sub: - """ -Nested array - :: - - struct { - int ival; - double data[16*4]; - } - """i:ival: - (16,4)d:data: - """ - - -Code to be affected -=================== - -All objects and modules in Python that export or consume the old -buffer interface will be modified. Here is a partial list. - -* buffer object -* bytes object -* string object -* array module -* struct module -* mmap module -* ctypes module - -Anything else using the buffer API. - - -Issues and Details -================== - -It is intended that this PEP will be back-ported to Python 2.6 by -adding the C-API and the two functions to the existing buffer -protocol. - -The proposed locking mechanism relies entirely on the exporter object -to not invalidate any of the memory pointed to by the buffer structure -until a corresponding releasebuffer is called. If it wants to be able -to change its own shape and/or strides arrays, then it needs to create -memory for these in the bufferinfo structure and copy information -over. - -The sharing of strided memory and suboffsets is new and can be seen as -a modification of the multiple-segment interface. It is motivated by -NumPy and the PIL. NumPy objects should be able to share their -strided memory with code that understands how to manage strided memory -because strided memory is very common when interfacing with compute -libraries. - -Also, with this approach it should be possible to write generic code -that works with both kinds of memory. - -Memory management of the format string, the shape array, the strides -array, and the suboffsets array in the bufferinfo structure is always -the responsibility of the exporting object. The consumer should not -set these pointers to any other memory or try to free them. - -Several ideas were discussed and rejected: - - Having a "releaser" object whose release-buffer was called. This - was deemed unacceptable because it caused the protocol to be - asymmetric (you called release on something different than you - "got" the buffer from). It also complicated the protocol without - providing a real benefit. - - Passing all the struct variables separately into the function. - This had the advantage that it allowed one to set NULL to - variables that were not of interest, but it also made the function - call more difficult. The flags variable allows the same - ability of consumers to be "simple" in how they call the protocol. - -Code -======== - -The authors of the PEP promise to contribute and maintain the code for -this proposal but will welcome any help. - - - - -Examples -========= - -Ex. 1 ------------ - -This example shows how an image object that uses contiguous lines might expose its buffer. - -:: - - struct rgba { - unsigned char r, g, b, a; - }; - - struct ImageObject { - PyObject_HEAD; - ... - struct rgba** lines; - Py_ssize_t height; - Py_ssize_t width; - Py_ssize_t shape_array[2]; - Py_ssize_t stride_array[2]; - Py_ssize_t view_count; - }; - -"lines" points to malloced 1-D array of (struct rgba*). Each pointer -in THAT block points to a seperately malloced array of (struct rgba). - -In order to access, say, the red value of the pixel at x=30, y=50, you'd use "lines[50][30].r". - -So what does ImageObject's getbuffer do? Leaving error checking out:: - - int Image_getbuffer(PyObject *self, PyBuffer *view, int flags) { - - static Py_ssize_t suboffsets[2] = { -1, 0 }; - - view->buf = self->lines; - view->len = self->height*self->width; - view->readonly = 0; - view->ndims = 2; - self->shape_array[0] = height; - self->shape_array[1] = width; - view->shape = &self->shape_array; - self->stride_array[0] = sizeof(struct rgba*); - self->stride_array[1] = sizeof(struct rgba); - view->strides = &self->stride_array; - view->suboffsets = suboffsets; - - self->view_count ++; - - return 0; - } - - - int Image_releasebuffer(PyObject *self, PyBuffer *view) { - self->view_count--; - return 0; - } - - -Ex. 2 ------------ - -This example shows how an object that wants to expose a contiguous -chunk of memory (which will never be re-allocated while the object is -alive) would do that. - -:: - - int myobject_getbuffer(PyObject *self, PyBuffer *view, int flags) { - - void *buf; - Py_ssize_t len; - int readonly=0; - - buf = /* Point to buffer */ - len = /* Set to size of buffer */ - readonly = /* Set to 1 if readonly */ - - return PyObject_FillBufferInfo(view, buf, len, readonly, flags); - } - -No releasebuffer is necessary because the memory will never -be re-allocated so the locking mechanism is not needed. - -Ex. 3 ------------ - -A consumer that wants to only get a simple contiguous chunk of bytes -from a Python object, obj would do the following: - -:: - - PyBuffer view; - int ret; - - if (PyObject_GetBuffer(obj, &view, Py_BUF_SIMPLE) < 0) { - /* error return */ - } - - /* Now, view.buf is the pointer to memory - view.len is the length - view.readonly is whether or not the memory is read-only. - */ - - - /* After using the information and you don't need it anymore */ - - if (PyObject_ReleaseBuffer(obj, &view) < 0) { - /* error return */ - } - - -Ex. 4 ------------ - -A consumer that wants to be able to use any object's memory but is -writing an algorithm that only handle contiguous memory could do the following: - -:: - - void *buf; - Py_ssize_t len; - char *format; - - if (PyObject_GetContiguous(obj, &buf, &len, &format, 0) < 0) { - /* error return */ - } - - /* process memory pointed to by buffer if format is correct */ - - /* Optional: - - if, after processing, we want to copy data from buffer back - into the the object - - we could do - */ - - if (PyObject_CopyToObject(obj, buf, len, 0) < 0) { - /* error return */ - } - - -Copyright -========= - -This PEP is placed in the public domain diff --git a/numpy/doc/pyrex/MANIFEST b/numpy/doc/pyrex/MANIFEST deleted file mode 100644 index feb3ec22a..000000000 --- a/numpy/doc/pyrex/MANIFEST +++ /dev/null @@ -1,2 +0,0 @@ -numpyx.pyx -setup.py diff --git a/numpy/doc/pyrex/Makefile b/numpy/doc/pyrex/Makefile deleted file mode 100644 index b5905e7be..000000000 --- a/numpy/doc/pyrex/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -all: - python setup.py build_ext --inplace - -test: all - python run_test.py - -.PHONY: clean -clean: - rm -rf *~ *.so *.c *.o build diff --git a/numpy/doc/pyrex/c_numpy.pxd b/numpy/doc/pyrex/c_numpy.pxd deleted file mode 100644 index 511acc4b1..000000000 --- a/numpy/doc/pyrex/c_numpy.pxd +++ /dev/null @@ -1,125 +0,0 @@ -# :Author: Travis Oliphant - -cdef extern from "numpy/arrayobject.h": - - cdef enum NPY_TYPES: - NPY_BOOL - NPY_BYTE - NPY_UBYTE - NPY_SHORT - NPY_USHORT - NPY_INT - NPY_UINT - NPY_LONG - NPY_ULONG - NPY_LONGLONG - NPY_ULONGLONG - NPY_FLOAT - NPY_DOUBLE - NPY_LONGDOUBLE - NPY_CFLOAT - NPY_CDOUBLE - NPY_CLONGDOUBLE - NPY_OBJECT - NPY_STRING - NPY_UNICODE - NPY_VOID - NPY_NTYPES - NPY_NOTYPE - - cdef enum requirements: - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_UPDATEIFCOPY - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - cdef enum defines: - # Note: as of Pyrex 0.9.5, enums are type-checked more strictly, so this - # can't be used as an integer. - NPY_MAXDIMS - - ctypedef struct npy_cdouble: - double real - double imag - - ctypedef struct npy_cfloat: - double real - double imag - - ctypedef int npy_intp - - ctypedef extern class numpy.dtype [object PyArray_Descr]: - cdef int type_num, elsize, alignment - cdef char type, kind, byteorder, hasobject - cdef object fields, typeobj - - ctypedef extern class numpy.ndarray [object PyArrayObject]: - cdef char *data - cdef int nd - cdef npy_intp *dimensions - cdef npy_intp *strides - cdef object base - cdef dtype descr - cdef int flags - - ctypedef extern class numpy.flatiter [object PyArrayIterObject]: - cdef int nd_m1 - cdef npy_intp index, size - cdef ndarray ao - cdef char *dataptr - - ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject]: - cdef int numiter - cdef npy_intp size, index - cdef int nd - # These next two should be arrays of [NPY_MAXITER], but that is - # difficult to cleanly specify in Pyrex. Fortunately, it doesn't matter. - cdef npy_intp *dimensions - cdef void **iters - - object PyArray_ZEROS(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran) - object PyArray_EMPTY(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran) - dtype PyArray_DescrFromTypeNum(NPY_TYPES type_num) - object PyArray_SimpleNew(int ndims, npy_intp* dims, NPY_TYPES type_num) - int PyArray_Check(object obj) - object PyArray_ContiguousFromAny(object obj, NPY_TYPES type, - int mindim, int maxdim) - npy_intp PyArray_SIZE(ndarray arr) - npy_intp PyArray_NBYTES(ndarray arr) - void *PyArray_DATA(ndarray arr) - object PyArray_FromAny(object obj, dtype newtype, int mindim, int maxdim, - int requirements, object context) - object PyArray_FROMANY(object obj, NPY_TYPES type_num, int min, - int max, int requirements) - object PyArray_NewFromDescr(object subtype, dtype newtype, int nd, - npy_intp* dims, npy_intp* strides, void* data, - int flags, object parent) - - void PyArray_ITER_NEXT(flatiter it) - - void import_array() diff --git a/numpy/doc/pyrex/c_python.pxd b/numpy/doc/pyrex/c_python.pxd deleted file mode 100644 index 53f6d9b19..000000000 --- a/numpy/doc/pyrex/c_python.pxd +++ /dev/null @@ -1,20 +0,0 @@ -# -*- Mode: Python -*- Not really, but close enough - -# Expose as much of the Python C API as we need here - -cdef extern from "stdlib.h": - ctypedef int size_t - -cdef extern from "Python.h": - ctypedef int Py_intptr_t - void* PyMem_Malloc(size_t) - void* PyMem_Realloc(void *p, size_t n) - void PyMem_Free(void *p) - char* PyString_AsString(object string) - object PyString_FromString(char *v) - object PyString_InternFromString(char *v) - int PyErr_CheckSignals() - object PyFloat_FromDouble(double v) - void Py_XINCREF(object o) - void Py_XDECREF(object o) - void Py_CLEAR(object o) # use instead of decref diff --git a/numpy/doc/pyrex/notes b/numpy/doc/pyrex/notes deleted file mode 100644 index 301581cee..000000000 --- a/numpy/doc/pyrex/notes +++ /dev/null @@ -1,3 +0,0 @@ -- cimport with a .pxd file vs 'include foo.pxi'? - -- the need to repeat: pyrex does NOT parse C headers. \ No newline at end of file diff --git a/numpy/doc/pyrex/numpyx.c b/numpy/doc/pyrex/numpyx.c deleted file mode 100644 index e250eae19..000000000 --- a/numpy/doc/pyrex/numpyx.c +++ /dev/null @@ -1,1037 +0,0 @@ -/* Generated by Pyrex 0.9.5.1 on Wed Jan 31 11:57:10 2007 */ - -#include "Python.h" -#include "structmember.h" -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifdef __cplusplus -#define __PYX_EXTERN_C extern "C" -#else -#define __PYX_EXTERN_C extern -#endif -__PYX_EXTERN_C double pow(double, double); -#include "stdlib.h" -#include "numpy/arrayobject.h" - - -typedef struct {PyObject **p; char *s;} __Pyx_InternTabEntry; /*proto*/ -typedef struct {PyObject **p; char *s; long n;} __Pyx_StringTabEntry; /*proto*/ - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static int __pyx_lineno; -static char *__pyx_filename; -static char **__pyx_f; - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, char *name); /*proto*/ - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/ - -static int __Pyx_PrintItem(PyObject *); /*proto*/ -static int __Pyx_PrintNewline(void); /*proto*/ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ - -static int __Pyx_InternStrings(__Pyx_InternTabEntry *t); /*proto*/ - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ - -static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name, long size); /*proto*/ - -static void __Pyx_AddTraceback(char *funcname); /*proto*/ - -/* Declarations from c_python */ - - -/* Declarations from c_numpy */ - -static PyTypeObject *__pyx_ptype_7c_numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_7c_numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_7c_numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_7c_numpy_broadcast = 0; - -/* Declarations from numpyx */ - -static PyObject *(__pyx_f_6numpyx_print_elements(char (*),Py_intptr_t (*),Py_intptr_t (*),int ,int ,PyObject *)); /*proto*/ - - -/* Implementation of numpyx */ - - -static PyObject *__pyx_n_c_python; -static PyObject *__pyx_n_c_numpy; -static PyObject *__pyx_n_numpy; -static PyObject *__pyx_n_print_array_info; -static PyObject *__pyx_n_test_methods; -static PyObject *__pyx_n_test; - -static PyObject *__pyx_n_dtype; - -static PyObject *__pyx_k2p; -static PyObject *__pyx_k3p; -static PyObject *__pyx_k4p; -static PyObject *__pyx_k5p; -static PyObject *__pyx_k6p; -static PyObject *__pyx_k7p; -static PyObject *__pyx_k8p; -static PyObject *__pyx_k9p; - -static char (__pyx_k2[]) = "-="; -static char (__pyx_k3[]) = "printing array info for ndarray at 0x%0lx"; -static char (__pyx_k4[]) = "print number of dimensions:"; -static char (__pyx_k5[]) = "address of strides: 0x%0lx"; -static char (__pyx_k6[]) = "strides:"; -static char (__pyx_k7[]) = " stride %d:"; -static char (__pyx_k8[]) = "memory dump:"; -static char (__pyx_k9[]) = "-="; - -static PyObject *__pyx_f_6numpyx_print_array_info(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyObject *__pyx_f_6numpyx_print_array_info(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyArrayObject *__pyx_v_arr = 0; - int __pyx_v_i; - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - int __pyx_3; - static char *__pyx_argnames[] = {"arr",0}; - if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_arr)) return 0; - Py_INCREF(__pyx_v_arr); - if (!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_arr), __pyx_ptype_7c_numpy_ndarray, 1, "arr")) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":13 */ - __pyx_1 = PyInt_FromLong(10); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; goto __pyx_L1;} - __pyx_2 = PyNumber_Multiply(__pyx_k2p, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":14 */ - __pyx_1 = PyInt_FromLong(((int )__pyx_v_arr)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; goto __pyx_L1;} - __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_2, 0, __pyx_1); - __pyx_1 = 0; - __pyx_1 = PyNumber_Remainder(__pyx_k3p, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":15 */ - if (__Pyx_PrintItem(__pyx_k4p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(__pyx_v_arr->nd); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":16 */ - __pyx_1 = PyInt_FromLong(((int )__pyx_v_arr->strides)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_2, 0, __pyx_1); - __pyx_1 = 0; - __pyx_1 = PyNumber_Remainder(__pyx_k5p, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":17 */ - if (__Pyx_PrintItem(__pyx_k6p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;} - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":18 */ - __pyx_3 = __pyx_v_arr->nd; - for (__pyx_v_i = 0; __pyx_v_i < __pyx_3; ++__pyx_v_i) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":20 */ - __pyx_2 = PyInt_FromLong(__pyx_v_i); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - __pyx_1 = PyNumber_Remainder(__pyx_k7p, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = PyInt_FromLong(((int )(__pyx_v_arr->strides[__pyx_v_i]))); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - } - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":21 */ - if (__Pyx_PrintItem(__pyx_k8p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;} - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":22 */ - __pyx_1 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; goto __pyx_L1;} - __pyx_2 = __pyx_f_6numpyx_print_elements(__pyx_v_arr->data,__pyx_v_arr->strides,__pyx_v_arr->dimensions,__pyx_v_arr->nd,(sizeof(double )),__pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":24 */ - __pyx_1 = PyInt_FromLong(10); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - __pyx_2 = PyNumber_Multiply(__pyx_k9p, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":25 */ - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; goto __pyx_L1;} - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - __Pyx_AddTraceback("numpyx.print_array_info"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_arr); - return __pyx_r; -} - -static PyObject *__pyx_n_object_; -static PyObject *__pyx_n_float64; -static PyObject *__pyx_n_name; - -static PyObject *__pyx_k10p; -static PyObject *__pyx_k11p; -static PyObject *__pyx_k12p; -static PyObject *__pyx_k13p; -static PyObject *__pyx_k14p; - -static char (__pyx_k10[]) = " print_elements() not (yet) implemented for dtype %s"; -static char (__pyx_k11[]) = " "; -static char (__pyx_k12[]) = " "; -static char (__pyx_k13[]) = " "; -static char (__pyx_k14[]) = " "; - -static PyObject *__pyx_f_6numpyx_print_elements(char (*__pyx_v_data),Py_intptr_t (*__pyx_v_strides),Py_intptr_t (*__pyx_v_dimensions),int __pyx_v_nd,int __pyx_v_elsize,PyObject *__pyx_v_dtype) { - Py_intptr_t __pyx_v_i; - void (*__pyx_v_elptr); - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - PyObject *__pyx_3 = 0; - PyObject *__pyx_4 = 0; - int __pyx_5; - Py_intptr_t __pyx_6; - Py_INCREF(__pyx_v_dtype); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":36 */ - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_dtype); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_1, __pyx_n_object_); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_3 = PyObject_CallObject(__pyx_2, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_float64); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_2, 0, __pyx_4); - __pyx_4 = 0; - __pyx_4 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_1 = PyList_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - PyList_SET_ITEM(__pyx_1, 0, __pyx_3); - PyList_SET_ITEM(__pyx_1, 1, __pyx_4); - __pyx_3 = 0; - __pyx_4 = 0; - __pyx_5 = PySequence_Contains(__pyx_1, __pyx_v_dtype); if (__pyx_5 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - __pyx_5 = !__pyx_5; - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":38 */ - __pyx_2 = PyObject_GetAttr(__pyx_v_dtype, __pyx_n_name); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - __pyx_3 = PyNumber_Remainder(__pyx_k10p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":39 */ - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - goto __pyx_L2; - } - __pyx_L2:; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":41 */ - __pyx_5 = (__pyx_v_nd == 0); - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":42 */ - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_4, __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_object_); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":43 */ - __pyx_v_elptr = (((void (*(*)))__pyx_v_data)[0]); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":44 */ - if (__Pyx_PrintItem(__pyx_k11p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; goto __pyx_L1;} - __pyx_3 = (PyObject *)__pyx_v_elptr; - Py_INCREF(__pyx_3); - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; goto __pyx_L1;} - goto __pyx_L4; - } - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - __pyx_4 = PyObject_GetAttr(__pyx_1, __pyx_n_dtype); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_float64); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_4, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":46 */ - if (__Pyx_PrintItem(__pyx_k12p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - __pyx_3 = PyFloat_FromDouble((((double (*))__pyx_v_data)[0])); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - goto __pyx_L4; - } - __pyx_L4:; - goto __pyx_L3; - } - __pyx_5 = (__pyx_v_nd == 1); - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":48 */ - __pyx_6 = (__pyx_v_dimensions[0]); - for (__pyx_v_i = 0; __pyx_v_i < __pyx_6; ++__pyx_v_i) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":49 */ - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_4, __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_object_); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":50 */ - __pyx_v_elptr = (((void (*(*)))__pyx_v_data)[0]); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":51 */ - if (__Pyx_PrintItem(__pyx_k13p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; goto __pyx_L1;} - __pyx_3 = (PyObject *)__pyx_v_elptr; - Py_INCREF(__pyx_3); - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; goto __pyx_L1;} - goto __pyx_L7; - } - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - __pyx_4 = PyObject_GetAttr(__pyx_1, __pyx_n_dtype); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_float64); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_4, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":53 */ - if (__Pyx_PrintItem(__pyx_k14p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - __pyx_3 = PyFloat_FromDouble((((double (*))__pyx_v_data)[0])); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - goto __pyx_L7; - } - __pyx_L7:; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":54 */ - __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); - } - goto __pyx_L3; - } - /*else*/ { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":56 */ - __pyx_6 = (__pyx_v_dimensions[0]); - for (__pyx_v_i = 0; __pyx_v_i < __pyx_6; ++__pyx_v_i) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":57 */ - __pyx_4 = __pyx_f_6numpyx_print_elements(__pyx_v_data,(__pyx_v_strides + 1),(__pyx_v_dimensions + 1),(__pyx_v_nd - 1),__pyx_v_elsize,__pyx_v_dtype); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":58 */ - __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); - } - } - __pyx_L3:; - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - Py_XDECREF(__pyx_3); - Py_XDECREF(__pyx_4); - __Pyx_AddTraceback("numpyx.print_elements"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_dtype); - return __pyx_r; -} - -static PyObject *__pyx_n_any; - -static PyObject *__pyx_k15p; -static PyObject *__pyx_k16p; -static PyObject *__pyx_k17p; - -static char (__pyx_k15[]) = "arr.any() :"; -static char (__pyx_k16[]) = "arr.nd :"; -static char (__pyx_k17[]) = "arr.flags :"; - -static PyObject *__pyx_f_6numpyx_test_methods(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6numpyx_test_methods[] = "Test a few attribute accesses for an array.\n \n This illustrates how the pyrex-visible object is in practice a strange\n hybrid of the C PyArrayObject struct and the python object. Some\n properties (like .nd) are visible here but not in python, while others\n like flags behave very differently: in python flags appears as a separate,\n object while here we see the raw int holding the bit pattern.\n\n This makes sense when we think of how pyrex resolves arr.foo: if foo is\n listed as a field in the c_numpy.ndarray struct description, it will be\n directly accessed as a C variable without going through Python at all.\n This is why for arr.flags, we see the actual int which holds all the flags\n as bit fields. However, for any other attribute not listed in the struct,\n it simply forwards the attribute lookup to python at runtime, just like\n python would (which means that AttributeError can be raised for\n non-existent attributes, for example)."; -static PyObject *__pyx_f_6numpyx_test_methods(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyArrayObject *__pyx_v_arr = 0; - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - static char *__pyx_argnames[] = {"arr",0}; - if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_arr)) return 0; - Py_INCREF(__pyx_v_arr); - if (!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_arr), __pyx_ptype_7c_numpy_ndarray, 1, "arr")) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":78 */ - if (__Pyx_PrintItem(__pyx_k15p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_any); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":79 */ - if (__Pyx_PrintItem(__pyx_k16p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - __pyx_1 = PyInt_FromLong(__pyx_v_arr->nd); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":80 */ - if (__Pyx_PrintItem(__pyx_k17p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(__pyx_v_arr->flags); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - __Pyx_AddTraceback("numpyx.test_methods"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_arr); - return __pyx_r; -} - -static PyObject *__pyx_n_array; -static PyObject *__pyx_n_arange; -static PyObject *__pyx_n_shape; -static PyObject *__pyx_n_one; -static PyObject *__pyx_n_two; - - -static PyObject *__pyx_f_6numpyx_test(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6numpyx_test[] = "this function is pure Python"; -static PyObject *__pyx_f_6numpyx_test(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_arr1; - PyObject *__pyx_v_arr2; - PyObject *__pyx_v_arr3; - PyObject *__pyx_v_four; - PyObject *__pyx_v_arr4; - PyObject *__pyx_v_arr5; - PyObject *__pyx_v_arr; - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - PyObject *__pyx_3 = 0; - PyObject *__pyx_4 = 0; - PyObject *__pyx_5 = 0; - static char *__pyx_argnames[] = {0}; - if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0; - __pyx_v_arr1 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr2 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr3 = Py_None; Py_INCREF(Py_None); - __pyx_v_four = Py_None; Py_INCREF(Py_None); - __pyx_v_arr4 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr5 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr = Py_None; Py_INCREF(Py_None); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":84 */ - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_array); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_1 = PyFloat_FromDouble((-1e-30)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_3, 0, __pyx_1); - __pyx_1 = 0; - __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_5 = PyObject_GetAttr(__pyx_4, __pyx_n_float64); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyDict_SetItem(__pyx_1, __pyx_n_dtype, __pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_4 = PyEval_CallObjectWithKeywords(__pyx_2, __pyx_3, __pyx_1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_v_arr1); - __pyx_v_arr1 = __pyx_4; - __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":85 */ - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_5, __pyx_n_array); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_3 = PyFloat_FromDouble(1.0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_1 = PyFloat_FromDouble(2.0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_4 = PyFloat_FromDouble(3.0); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_5 = PyList_New(3); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - PyList_SET_ITEM(__pyx_5, 0, __pyx_3); - PyList_SET_ITEM(__pyx_5, 1, __pyx_1); - PyList_SET_ITEM(__pyx_5, 2, __pyx_4); - __pyx_3 = 0; - __pyx_1 = 0; - __pyx_4 = 0; - __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_3, 0, __pyx_5); - __pyx_5 = 0; - __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_5 = PyObject_GetAttr(__pyx_4, __pyx_n_float64); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyDict_SetItem(__pyx_1, __pyx_n_dtype, __pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_4 = PyEval_CallObjectWithKeywords(__pyx_2, __pyx_3, __pyx_1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_v_arr2); - __pyx_v_arr2 = __pyx_4; - __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":87 */ - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_5, __pyx_n_arange); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_3 = PyInt_FromLong(9); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_4 = PyDict_New(); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_5, __pyx_n_float64); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - if (PyDict_SetItem(__pyx_4, __pyx_n_dtype, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - __pyx_5 = PyEval_CallObjectWithKeywords(__pyx_2, __pyx_1, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_v_arr3); - __pyx_v_arr3 = __pyx_5; - __pyx_5 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":88 */ - __pyx_3 = PyInt_FromLong(3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - __pyx_1 = PyTuple_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - PyTuple_SET_ITEM(__pyx_1, 1, __pyx_2); - __pyx_3 = 0; - __pyx_2 = 0; - if (PyObject_SetAttr(__pyx_v_arr3, __pyx_n_shape, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":90 */ - __pyx_4 = PyInt_FromLong(4); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; goto __pyx_L1;} - Py_DECREF(__pyx_v_four); - __pyx_v_four = __pyx_4; - __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":91 */ - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_5, __pyx_n_array); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_2 = PyInt_FromLong(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_1 = PyList_New(4); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_INCREF(__pyx_n_one); - PyList_SET_ITEM(__pyx_1, 0, __pyx_n_one); - Py_INCREF(__pyx_n_two); - PyList_SET_ITEM(__pyx_1, 1, __pyx_n_two); - PyList_SET_ITEM(__pyx_1, 2, __pyx_2); - Py_INCREF(__pyx_v_four); - PyList_SET_ITEM(__pyx_1, 3, __pyx_v_four); - __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_1); - __pyx_1 = 0; - __pyx_5 = PyDict_New(); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_object_); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (PyDict_SetItem(__pyx_5, __pyx_n_dtype, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = PyEval_CallObjectWithKeywords(__pyx_3, __pyx_4, __pyx_5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_5); __pyx_5 = 0; - Py_DECREF(__pyx_v_arr4); - __pyx_v_arr4 = __pyx_2; - __pyx_2 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":93 */ - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_1, __pyx_n_array); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_4 = PyInt_FromLong(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_5 = PyInt_FromLong(2); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_1 = PyList_New(3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - PyList_SET_ITEM(__pyx_1, 0, __pyx_4); - PyList_SET_ITEM(__pyx_1, 1, __pyx_5); - PyList_SET_ITEM(__pyx_1, 2, __pyx_2); - __pyx_4 = 0; - __pyx_5 = 0; - __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_1); - __pyx_1 = 0; - __pyx_5 = PyObject_CallObject(__pyx_3, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_v_arr5); - __pyx_v_arr5 = __pyx_5; - __pyx_5 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":95 */ - __pyx_2 = PyList_New(5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;} - Py_INCREF(__pyx_v_arr1); - PyList_SET_ITEM(__pyx_2, 0, __pyx_v_arr1); - Py_INCREF(__pyx_v_arr2); - PyList_SET_ITEM(__pyx_2, 1, __pyx_v_arr2); - Py_INCREF(__pyx_v_arr3); - PyList_SET_ITEM(__pyx_2, 2, __pyx_v_arr3); - Py_INCREF(__pyx_v_arr4); - PyList_SET_ITEM(__pyx_2, 3, __pyx_v_arr4); - Py_INCREF(__pyx_v_arr5); - PyList_SET_ITEM(__pyx_2, 4, __pyx_v_arr5); - __pyx_1 = PyObject_GetIter(__pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - for (;;) { - __pyx_3 = PyIter_Next(__pyx_1); - if (!__pyx_3) { - if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;} - break; - } - Py_DECREF(__pyx_v_arr); - __pyx_v_arr = __pyx_3; - __pyx_3 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":96 */ - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_print_array_info); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; goto __pyx_L1;} - __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; goto __pyx_L1;} - Py_INCREF(__pyx_v_arr); - PyTuple_SET_ITEM(__pyx_5, 0, __pyx_v_arr); - __pyx_2 = PyObject_CallObject(__pyx_4, __pyx_5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_5); __pyx_5 = 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - } - Py_DECREF(__pyx_1); __pyx_1 = 0; - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - Py_XDECREF(__pyx_3); - Py_XDECREF(__pyx_4); - Py_XDECREF(__pyx_5); - __Pyx_AddTraceback("numpyx.test"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_arr1); - Py_DECREF(__pyx_v_arr2); - Py_DECREF(__pyx_v_arr3); - Py_DECREF(__pyx_v_four); - Py_DECREF(__pyx_v_arr4); - Py_DECREF(__pyx_v_arr5); - Py_DECREF(__pyx_v_arr); - return __pyx_r; -} - -static __Pyx_InternTabEntry __pyx_intern_tab[] = { - {&__pyx_n_any, "any"}, - {&__pyx_n_arange, "arange"}, - {&__pyx_n_array, "array"}, - {&__pyx_n_c_numpy, "c_numpy"}, - {&__pyx_n_c_python, "c_python"}, - {&__pyx_n_dtype, "dtype"}, - {&__pyx_n_float64, "float64"}, - {&__pyx_n_name, "name"}, - {&__pyx_n_numpy, "numpy"}, - {&__pyx_n_object_, "object_"}, - {&__pyx_n_one, "one"}, - {&__pyx_n_print_array_info, "print_array_info"}, - {&__pyx_n_shape, "shape"}, - {&__pyx_n_test, "test"}, - {&__pyx_n_test_methods, "test_methods"}, - {&__pyx_n_two, "two"}, - {0, 0} -}; - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_k2p, __pyx_k2, sizeof(__pyx_k2)}, - {&__pyx_k3p, __pyx_k3, sizeof(__pyx_k3)}, - {&__pyx_k4p, __pyx_k4, sizeof(__pyx_k4)}, - {&__pyx_k5p, __pyx_k5, sizeof(__pyx_k5)}, - {&__pyx_k6p, __pyx_k6, sizeof(__pyx_k6)}, - {&__pyx_k7p, __pyx_k7, sizeof(__pyx_k7)}, - {&__pyx_k8p, __pyx_k8, sizeof(__pyx_k8)}, - {&__pyx_k9p, __pyx_k9, sizeof(__pyx_k9)}, - {&__pyx_k10p, __pyx_k10, sizeof(__pyx_k10)}, - {&__pyx_k11p, __pyx_k11, sizeof(__pyx_k11)}, - {&__pyx_k12p, __pyx_k12, sizeof(__pyx_k12)}, - {&__pyx_k13p, __pyx_k13, sizeof(__pyx_k13)}, - {&__pyx_k14p, __pyx_k14, sizeof(__pyx_k14)}, - {&__pyx_k15p, __pyx_k15, sizeof(__pyx_k15)}, - {&__pyx_k16p, __pyx_k16, sizeof(__pyx_k16)}, - {&__pyx_k17p, __pyx_k17, sizeof(__pyx_k17)}, - {0, 0, 0} -}; - -static struct PyMethodDef __pyx_methods[] = { - {"print_array_info", (PyCFunction)__pyx_f_6numpyx_print_array_info, METH_VARARGS|METH_KEYWORDS, 0}, - {"test_methods", (PyCFunction)__pyx_f_6numpyx_test_methods, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6numpyx_test_methods}, - {"test", (PyCFunction)__pyx_f_6numpyx_test, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6numpyx_test}, - {0, 0, 0, 0} -}; - -static void __pyx_init_filenames(void); /*proto*/ - -PyMODINIT_FUNC initnumpyx(void); /*proto*/ -PyMODINIT_FUNC initnumpyx(void) { - PyObject *__pyx_1 = 0; - __pyx_init_filenames(); - __pyx_m = Py_InitModule4("numpyx", __pyx_methods, 0, 0, PYTHON_API_VERSION); - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - __pyx_b = PyImport_AddModule("__builtin__"); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - if (__Pyx_InternStrings(__pyx_intern_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - __pyx_ptype_7c_numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr)); if (!__pyx_ptype_7c_numpy_dtype) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 76; goto __pyx_L1;} - __pyx_ptype_7c_numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject)); if (!__pyx_ptype_7c_numpy_ndarray) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 81; goto __pyx_L1;} - __pyx_ptype_7c_numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject)); if (!__pyx_ptype_7c_numpy_flatiter) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 90; goto __pyx_L1;} - __pyx_ptype_7c_numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject)); if (!__pyx_ptype_7c_numpy_broadcast) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 96; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":5 */ - __pyx_1 = __Pyx_Import(__pyx_n_numpy, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; goto __pyx_L1;} - if (PyObject_SetAttr(__pyx_m, __pyx_n_numpy, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":8 */ - import_array(); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":82 */ - return; - __pyx_L1:; - Py_XDECREF(__pyx_1); - __Pyx_AddTraceback("numpyx"); -} - -static char *__pyx_filenames[] = { - "numpyx.pyx", - "c_numpy.pxd", -}; - -/* Runtime support code */ - -static void __pyx_init_filenames(void) { - __pyx_f = __pyx_filenames; -} - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, char *name) { - if (!type) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if ((none_allowed && obj == Py_None) || PyObject_TypeCheck(obj, type)) - return 1; - PyErr_Format(PyExc_TypeError, - "Argument '%s' has incorrect type (expected %s, got %s)", - name, type->tp_name, obj->ob_type->tp_name); - return 0; -} - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) { - PyObject *__import__ = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - __import__ = PyObject_GetAttrString(__pyx_b, "__import__"); - if (!__import__) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - module = PyObject_CallFunction(__import__, "OOOO", - name, global_dict, empty_dict, list); -bad: - Py_XDECREF(empty_list); - Py_XDECREF(__import__); - Py_XDECREF(empty_dict); - return module; -} - -static PyObject *__Pyx_GetStdout(void) { - PyObject *f = PySys_GetObject("stdout"); - if (!f) { - PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout"); - } - return f; -} - -static int __Pyx_PrintItem(PyObject *v) { - PyObject *f; - - if (!(f = __Pyx_GetStdout())) - return -1; - if (PyFile_SoftSpace(f, 1)) { - if (PyFile_WriteString(" ", f) < 0) - return -1; - } - if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0) - return -1; - if (PyString_Check(v)) { - char *s = PyString_AsString(v); - int len = PyString_Size(v); - if (len > 0 && - isspace(Py_CHARMASK(s[len-1])) && - s[len-1] != ' ') - PyFile_SoftSpace(f, 0); - } - return 0; -} - -static int __Pyx_PrintNewline(void) { - PyObject *f; - - if (!(f = __Pyx_GetStdout())) - return -1; - if (PyFile_WriteString("\n", f) < 0) - return -1; - PyFile_SoftSpace(f, 0); - return 0; -} - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) - PyErr_SetObject(PyExc_NameError, name); - return result; -} - -static int __Pyx_InternStrings(__Pyx_InternTabEntry *t) { - while (t->p) { - *t->p = PyString_InternFromString(t->s); - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name, - long size) -{ - PyObject *py_module_name = 0; - PyObject *py_class_name = 0; - PyObject *py_name_list = 0; - PyObject *py_module = 0; - PyObject *result = 0; - - py_module_name = PyString_FromString(module_name); - if (!py_module_name) - goto bad; - py_class_name = PyString_FromString(class_name); - if (!py_class_name) - goto bad; - py_name_list = PyList_New(1); - if (!py_name_list) - goto bad; - Py_INCREF(py_class_name); - if (PyList_SetItem(py_name_list, 0, py_class_name) < 0) - goto bad; - py_module = __Pyx_Import(py_module_name, py_name_list); - if (!py_module) - goto bad; - result = PyObject_GetAttr(py_module, py_class_name); - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (((PyTypeObject *)result)->tp_basicsize != size) { - PyErr_Format(PyExc_ValueError, - "%s.%s does not appear to be the correct type object", - module_name, class_name); - goto bad; - } - goto done; -bad: - Py_XDECREF(result); - result = 0; -done: - Py_XDECREF(py_module_name); - Py_XDECREF(py_class_name); - Py_XDECREF(py_name_list); - return (PyTypeObject *)result; -} - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(char *funcname) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyObject *empty_tuple = 0; - PyObject *empty_string = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - py_srcfile = PyString_FromString(__pyx_filename); - if (!py_srcfile) goto bad; - py_funcname = PyString_FromString(funcname); - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - empty_tuple = PyTuple_New(0); - if (!empty_tuple) goto bad; - empty_string = PyString_FromString(""); - if (!empty_string) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - empty_string, /*PyObject *code,*/ - empty_tuple, /*PyObject *consts,*/ - empty_tuple, /*PyObject *names,*/ - empty_tuple, /*PyObject *varnames,*/ - empty_tuple, /*PyObject *freevars,*/ - empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - empty_string /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_Get(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(empty_tuple); - Py_XDECREF(empty_string); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} diff --git a/numpy/doc/pyrex/numpyx.pyx b/numpy/doc/pyrex/numpyx.pyx deleted file mode 100644 index 8089fbc38..000000000 --- a/numpy/doc/pyrex/numpyx.pyx +++ /dev/null @@ -1,97 +0,0 @@ -# -*- Mode: Python -*- Not really, but close enough - -cimport c_python -cimport c_numpy -import numpy - -# Numpy must be initialized -c_numpy.import_array() - -def print_array_info(c_numpy.ndarray arr): - cdef int i - - print '-='*10 - print 'printing array info for ndarray at 0x%0lx'%(arr,) - print 'print number of dimensions:',arr.nd - print 'address of strides: 0x%0lx'%(arr.strides,) - print 'strides:' - for i from 0<=iarr.strides[i] - print 'memory dump:' - print_elements( arr.data, arr.strides, arr.dimensions, - arr.nd, sizeof(double), arr.dtype ) - print '-='*10 - print - -cdef print_elements(char *data, - c_python.Py_intptr_t* strides, - c_python.Py_intptr_t* dimensions, - int nd, - int elsize, - object dtype): - cdef c_python.Py_intptr_t i,j - cdef void* elptr - - if dtype not in [numpy.dtype(numpy.object_), - numpy.dtype(numpy.float64)]: - print ' print_elements() not (yet) implemented for dtype %s'%dtype.name - return - - if nd ==0: - if dtype==numpy.dtype(numpy.object_): - elptr = (data)[0] #[0] dereferences pointer in Pyrex - print ' ',elptr - elif dtype==numpy.dtype(numpy.float64): - print ' ',(data)[0] - elif nd == 1: - for i from 0<=idata)[0] - print ' ',elptr - elif dtype==numpy.dtype(numpy.float64): - print ' ',(data)[0] - data = data + strides[0] - else: - for i from 0<=ielsize and ->fields filled in appropriately. - - The itemsize attribute must return a number > 0. The fields - attribute must return a dictionary with at least "names" and - "formats" entries. The "formats" entry will be converted to a - "proper" descr->fields entry (all generic data-types converted to - ``PyArray_Descr *`` structure). - - -Reference counting for ``PyArray_Descr *`` objects. -``````````````````````````````````````````````````` - -Most functions that take ``PyArary_Descr *`` as arguments and return a -``PyObject *`` steal the reference unless otherwise noted in the code: - -Functions that return ``PyArray_Descr *`` objects return a new -reference. - -.. tip:: - - There is a new function and a new method of array objects both labelled - dtypescr which can be used to try out the ``PyArray_DescrConverter``. - diff --git a/numpy/doc/swig/Makefile b/numpy/doc/swig/Makefile deleted file mode 100644 index b64492f45..000000000 --- a/numpy/doc/swig/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -# List all of the subdirectories here for recursive make -SUBDIRS = test doc - -# Default target -.PHONY : default -default: - @echo "There is no default make target for this Makefile" - @echo "Valid make targets are:" - @echo " test - Compile and run tests of numpy.i" - @echo " doc - Generate numpy.i documentation" - @echo " all - make test + doc" - @echo " clean - Remove generated files recursively" - -# Target all -.PHONY : all -all: $(SUBDIRS) - -# Target test -.PHONY : test -test: - cd $@ && make $@ - -# Target doc -.PHONY : doc -doc: - cd $@ && make - -# Target clean -.PHONY : clean -clean: - @for dir in $(SUBDIRS); do \ - echo ; \ - echo Running \'make clean\' in $$dir; \ - cd $$dir && make clean && cd ..; \ - done; \ - echo diff --git a/numpy/doc/swig/README b/numpy/doc/swig/README deleted file mode 100644 index d557b305f..000000000 --- a/numpy/doc/swig/README +++ /dev/null @@ -1,130 +0,0 @@ -Notes for the numpy/doc/swig directory -====================================== - -This set of files is for developing and testing file numpy.i, which is -intended to be a set of typemaps for helping SWIG interface between C -and C++ code that uses C arrays and the python module NumPy. It is -ultimately hoped that numpy.i will be included as part of the SWIG -distribution. - -Documentation -------------- -Documentation for how to use numpy.i is in the doc directory. The -primary source file here is numpy_swig.txt, a restructured text file -that documents how to use numpy.i. The Makefile in doc allows for the -conversion of numpy_swig.txt to HTML (if you have docutils installed) -and to PDF (if you have docutils and latex/pdftex installed). This -should not be necessary, however, as numpy_swig.html and -numpy_swig.pdf are stored in the repository. - -The same is true for a file called doc/testing.txt, which describes -the testing system used here. - -If you have the prerequisites installed and wish to build the HTML and -PDF documentation, this can be achieved by calling:: - - $ make doc - -from the shell. - -Testing -------- -The tests are a good example of what we are trying to do with numpy.i. -The files related to testing are are in the test subdirectory:: - - Vector.h - Vector.cxx - Vector.i - testVector.py - - Matrix.h - Matrix.cxx - Matrix.i - testMatrix.py - - Tensor.h - Tensor.cxx - Tensor.i - testTensor.py - -The header files contain prototypes for functions that illustrate the -wrapping issues we wish to address. Right now, this consists of -functions with argument signatures of the following forms. Vector.h:: - - (type IN_ARRAY1[ANY]) - (type* IN_ARRAY1, int DIM1) - (int DIM1, type* IN_ARRAY1) - - (type INPLACE_ARRAY1[ANY]) - (type* INPLACE_ARRAY1, int DIM1) - (int DIM1, type* INPLACE_ARRAY1) - - (type ARGOUT_ARRAY1[ANY]) - (type* ARGOUT_ARRAY1, int DIM1) - (int DIM1, type* ARGOUT_ARRAY1) - -Matrix.h:: - - (type IN_ARRAY2[ANY][ANY]) - (type* IN_ARRAY2, int DIM1, int DIM2) - (int DIM1, int DIM2, type* IN_ARRAY2) - - (type INPLACE_ARRAY2[ANY][ANY]) - (type* INPLACE_ARRAY2, int DIM1, int DIM2) - (int DIM1, int DIM2, type* INPLACE_ARRAY2) - - (type ARGOUT_ARRAY2[ANY][ANY]) - -Tensor.h:: - - (type IN_ARRAY3[ANY][ANY][ANY]) - (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) - (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) - - (type INPLACE_ARRAY3[ANY][ANY][ANY]) - (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) - (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) - - (type ARGOUT_ARRAY3[ANY][ANY][ANY]) - -These function signatures take a pointer to an array of type "type", -whose length is specified by the integer(s) DIM1 (and DIM2, and DIM3). - -The objective for the IN_ARRAY signatures is for SWIG to generate -python wrappers that take a container that constitutes a valid -argument to the numpy array constructor, and can be used to build an -array of type "type". Currently, types "signed char", "unsigned -char", "short", "unsigned short", "int", "unsigned int", "long", -"unsigned long", "long long", "unsigned long long", "float", and -"double" are supported and tested. - -The objective for the INPLACE_ARRAY signatures is for SWIG to generate -python wrappers that accept a numpy array of any of the above-listed -types. - -The source files Vector.cxx, Matrix.cxx and Tensor.cxx contain the -actual implementations of the functions described in Vector.h, -Matrix.h and Tensor.h. The python scripts testVector.py, -testMatrix.py and testTensor.py test the resulting python wrappers -using the unittest module. - -The SWIG interface files Vector.i, Matrix.i and Tensor.i are used to -generate the wrapper code. The SWIG_FILE_WITH_INIT macro allows -numpy.i to be used with multiple python modules. If it is specified, -then the %init block found in Vector.i, Matrix.i and Tensor.i are -required. The other things done in Vector.i, Matrix.i and Tensor.i -are the inclusion of the appropriate header file and numpy.i file, and -the "%apply" directives to force the functions to use the typemaps. - -The setup.py script is a standard python distutils script. It defines -_Vector, _Matrix and _Tensor extension modules and Vector, Matrix and -Tensor python modules. The Makefile automates everything, setting up -the dependencies, calling swig to generate the wrappers, and calling -setup.py to compile the wrapper code and generate the shared objects. -Targets "all" (default), "test", "doc" and "clean" are supported. The -"doc" target creates HTML documentation (with make target "html"), and -PDF documentation (with make targets "tex" and "pdf"). - -To build and run the test code, simply execute from the shell:: - - $ make test diff --git a/numpy/doc/swig/doc/Makefile b/numpy/doc/swig/doc/Makefile deleted file mode 100644 index 9223f0481..000000000 --- a/numpy/doc/swig/doc/Makefile +++ /dev/null @@ -1,51 +0,0 @@ -# ReStructured Text -RST2HTML = rst2html.py -RST2LATEX = rst2latex.py -RFLAGS = --generator --time -HTML_FLAGS = --no-xml-declaration -LATEX_FLAGS = -LATEX = pdflatex - -# Web pages that need to be made -WEB_PAGES = numpy_swig.html testing.html - -# LaTeX files that need to be made -LATEX_FILES = numpy_swig.tex testing.tex - -# PDF files that need to be made -PDF_FILES = numpy_swig.pdf testing.pdf - -# Default target: documentation -.PHONY : doc -doc: html pdf - -# HTML target -.PHONY : html -html: $(WEB_PAGES) - -# Rule: %.txt -> %.html -%.html: %.txt - $(RST2HTML) $(RFLAGS) $(HTML_FLAGS) $< $@ - -# LaTeX target -.PHONY : tex -tex: $(LATEX_FILES) - -# Rule: %.txt -> %.tex -%.tex: %.txt - $(RST2LATEX) $(RFLAGS) $(LATEX_FLAGS) $< $@ - -# PDF target -.PHONY : pdf -pdf: $(PDF_FILES) - -# Rule: %.tex -> %.pdf -%.pdf: %.tex - $(LATEX) $< - $(LATEX) $< - -# Clean target -.PHONY : clean -clean: - $(RM) $(LATEX_FILES) - $(RM) *.pyc *.aux *.dvi *.log *.out *~ diff --git a/numpy/doc/swig/doc/numpy_swig.html b/numpy/doc/swig/doc/numpy_swig.html deleted file mode 100644 index ed127f330..000000000 --- a/numpy/doc/swig/doc/numpy_swig.html +++ /dev/null @@ -1,1244 +0,0 @@ - - - - - -numpy.i: a SWIG Interface File for NumPy - - - - - -
-

numpy.i: a SWIG Interface File for NumPy

- --- - - - - - - - -
Author:Bill Spotz
Institution:Sandia National Laboratories
Date:1 December, 2007
- -
-

Introduction

-

The Simple Wrapper and Interface Generator (or SWIG) is a powerful tool for generating wrapper -code for interfacing to a wide variety of scripting languages. -SWIG can parse header files, and using only the code prototypes, -create an interface to the target language. But SWIG is not -omnipotent. For example, it cannot know from the prototype:

-
-double rms(double* seq, int n);
-
-

what exactly seq is. Is it a single value to be altered in-place? -Is it an array, and if so what is its length? Is it input-only? -Output-only? Input-output? SWIG cannot determine these details, -and does not attempt to do so.

-

If we designed rms, we probably made it a routine that takes an -input-only array of length n of double values called seq -and returns the root mean square. The default behavior of SWIG, -however, will be to create a wrapper function that compiles, but is -nearly impossible to use from the scripting language in the way the C -routine was intended.

-

For python, the preferred way of handling -contiguous (or technically, strided) blocks of homogeneous data is -with the module NumPy, which provides full -object-oriented access to multidimensial arrays of data. Therefore, -the most logical python interface for the rms function would be -(including doc string):

-
-def rms(seq):
-    """
-    rms: return the root mean square of a sequence
-    rms(numpy.ndarray) -> double
-    rms(list) -> double
-    rms(tuple) -> double
-    """
-
-

where seq would be a NumPy array of double values, and its -length n would be extracted from seq internally before being -passed to the C routine. Even better, since NumPy supports -construction of arrays from arbitrary python sequences, seq -itself could be a nearly arbitrary sequence (so long as each element -can be converted to a double) and the wrapper code would -internally convert it to a NumPy array before extracting its data -and length.

-

SWIG allows these types of conversions to be defined via a -mechanism called typemaps. This document provides information on how -to use numpy.i, a SWIG interface file that defines a series of -typemaps intended to make the type of array-related conversions -described above relatively simple to implement. For example, suppose -that the rms function prototype defined above was in a header file -named rms.h. To obtain the python interface discussed above, -your SWIG interface file would need the following:

-
-%{
-#define SWIG_FILE_WITH_INIT
-#include "rms.h"
-%}
-
-%include "numpy.i"
-
-%init %{
-import_array();
-%}
-
-%apply (double* IN_ARRAY1, int DIM1) {(double* seq, int n)};
-%include "rms.h"
-
-

Typemaps are keyed off a list of one or more function arguments, -either by type or by type and name. We will refer to such lists as -signatures. One of the many typemaps defined by numpy.i is used -above and has the signature (double* IN_ARRAY1, int DIM1). The -argument names are intended to suggest that the double* argument -is an input array of one dimension and that the int represents -that dimension. This is precisely the pattern in the rms -prototype.

-

Most likely, no actual prototypes to be wrapped will have the argument -names IN_ARRAY1 and DIM1. We use the %apply directive to -apply the typemap for one-dimensional input arrays of type double -to the actual prototype used by rms. Using numpy.i -effectively, therefore, requires knowing what typemaps are available -and what they do.

-

A SWIG interface file that includes the SWIG directives given -above will produce wrapper code that looks something like:

-
- 1 PyObject *_wrap_rms(PyObject *args) {
- 2   PyObject *resultobj = 0;
- 3   double *arg1 = (double *) 0 ;
- 4   int arg2 ;
- 5   double result;
- 6   PyArrayObject *array1 = NULL ;
- 7   int is_new_object1 = 0 ;
- 8   PyObject * obj0 = 0 ;
- 9
-10   if (!PyArg_ParseTuple(args,(char *)"O:rms",&obj0)) SWIG_fail;
-11   {
-12     array1 = obj_to_array_contiguous_allow_conversion(
-13                  obj0, NPY_DOUBLE, &is_new_object1);
-14     npy_intp size[1] = {
-15       -1
-16     };
-17     if (!array1 || !require_dimensions(array1, 1) ||
-18         !require_size(array1, size, 1)) SWIG_fail;
-19     arg1 = (double*) array1->data;
-20     arg2 = (int) array1->dimensions[0];
-21   }
-22   result = (double)rms(arg1,arg2);
-23   resultobj = SWIG_From_double((double)(result));
-24   {
-25     if (is_new_object1 && array1) Py_DECREF(array1);
-26   }
-27   return resultobj;
-28 fail:
-29   {
-30     if (is_new_object1 && array1) Py_DECREF(array1);
-31   }
-32   return NULL;
-33 }
-
-

The typemaps from numpy.i are responsible for the following lines -of code: 12--20, 25 and 30. Line 10 parses the input to the rms -function. From the format string "O:rms", we can see that the -argument list is expected to be a single python object (specified -by the O before the colon) and whose pointer is stored in -obj0. A number of functions, supplied by numpy.i, are called -to make and check the (possible) conversion from a generic python -object to a NumPy array. These functions are explained in the -section Helper Functions, but hopefully their names are -self-explanatory. At line 12 we use obj0 to construct a NumPy -array. At line 17, we check the validity of the result: that it is -non-null and that it has a single dimension of arbitrary length. Once -these states are verified, we extract the data buffer and length in -lines 19 and 20 so that we can call the underlying C function at line -22. Line 25 performs memory management for the case where we have -created a new array that is no longer needed.

-

This code has a significant amount of error handling. Note the -SWIG_fail is a macro for goto fail, refering to the label at -line 28. If the user provides the wrong number of arguments, this -will be caught at line 10. If construction of the NumPy array -fails or produces an array with the wrong number of dimensions, these -errors are caught at line 17. And finally, if an error is detected, -memory is still managed correctly at line 30.

-

Note that if the C function signature was in a different order:

-
-double rms(int n, double* seq);
-
-

that SWIG would not match the typemap signature given above with -the argument list for rms. Fortunately, numpy.i has a set of -typemaps with the data pointer given last:

-
-%apply (int DIM1, double* IN_ARRAY1) {(int n, double* seq)};
-
-

This simply has the effect of switching the definitions of arg1 -and arg2 in lines 3 and 4 of the generated code above, and their -assignments in lines 19 and 20.

-
-
-

Using numpy.i

-

The numpy.i file is currently located in the numpy/docs/swig -sub-directory under the numpy installation directory. Typically, -you will want to copy it to the directory where you are developing -your wrappers. If it is ever adopted by SWIG developers, then it -will be installed in a standard place where SWIG can find it.

-

A simple module that only uses a single SWIG interface file should -include the following:

-
-%{
-#define SWIG_FILE_WITH_INIT
-%}
-%include "numpy.i"
-%init %{
-import_array();
-%}
-
-

Within a compiled python module, import_array() should only get -called once. This could be in a C/C++ file that you have written and -is linked to the module. If this is the case, then none of your -interface files should #define SWIG_FILE_WITH_INIT or call -import_array(). Or, this initialization call could be in a -wrapper file generated by SWIG from an interface file that has the -%init block as above. If this is the case, and you have more than -one SWIG interface file, then only one interface file should -#define SWIG_FILE_WITH_INIT and call import_array().

-
-
-

Available Typemaps

-

The typemap directives provided by numpy.i for arrays of different -data types, say double and int, and dimensions of different -types, say int or long, are identical to one another except -for the C and NumPy type specifications. The typemaps are -therefore implemented (typically behind the scenes) via a macro:

-
-%numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE)
-
-

that can be invoked for appropriate (DATA_TYPE, DATA_TYPECODE, -DIM_TYPE) triplets. For example:

-
-%numpy_typemaps(double, NPY_DOUBLE, int)
-%numpy_typemaps(int,    NPY_INT   , int)
-
-

The numpy.i interface file uses the %numpy_typemaps macro to -implement typemaps for the following C data types and int -dimension types:

-
-
    -
  • signed char
  • -
  • unsigned char
  • -
  • short
  • -
  • unsigned short
  • -
  • int
  • -
  • unsigned int
  • -
  • long
  • -
  • unsigned long
  • -
  • long long
  • -
  • unsigned long long
  • -
  • float
  • -
  • double
  • -
-
-

In the following descriptions, we reference a generic DATA_TYPE, which -could be any of the C data types listed above, and DIM_TYPE which -should be one of the many types of integers.

-

The typemap signatures are largely differentiated on the name given to -the buffer pointer. Names with FARRAY are for FORTRAN-ordered -arrays, and names with ARRAY are for C-ordered (or 1D arrays).

-
-

Input Arrays

-

Input arrays are defined as arrays of data that are passed into a -routine but are not altered in-place or returned to the user. The -python input array is therefore allowed to be almost any python -sequence (such as a list) that can be converted to the requested type -of array. The input array signatures are

-

1D:

-
-
    -
  • ( DATA_TYPE IN_ARRAY1[ANY] )
  • -
  • ( DATA_TYPE* IN_ARRAY1, int DIM1 )
  • -
  • ( int DIM1, DATA_TYPE* IN_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE IN_ARRAY2[ANY][ANY] )
  • -
  • ( DATA_TYPE* IN_ARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* IN_ARRAY2 )
  • -
  • ( DATA_TYPE* IN_FARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* IN_FARRAY2 )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE IN_ARRAY3[ANY][ANY][ANY] )
  • -
  • ( DATA_TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_ARRAY3 )
  • -
  • ( DATA_TYPE* IN_FARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_FARRAY3 )
  • -
-
-

The first signature listed, ( DATA_TYPE IN_ARRAY[ANY] ) is for -one-dimensional arrays with hard-coded dimensions. Likewise, -( DATA_TYPE IN_ARRAY2[ANY][ANY] ) is for two-dimensional arrays -with hard-coded dimensions, and similarly for three-dimensional.

-
-
-

In-Place Arrays

-

In-place arrays are defined as arrays that are modified in-place. The -input values may or may not be used, but the values at the time the -function returns are significant. The provided python argument -must therefore be a NumPy array of the required type. The in-place -signatures are

-

1D:

-
-
    -
  • ( DATA_TYPE INPLACE_ARRAY1[ANY] )
  • -
  • ( DATA_TYPE* INPLACE_ARRAY1, int DIM1 )
  • -
  • ( int DIM1, DATA_TYPE* INPLACE_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE INPLACE_ARRAY2[ANY][ANY] )
  • -
  • ( DATA_TYPE* INPLACE_ARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* INPLACE_ARRAY2 )
  • -
  • ( DATA_TYPE* INPLACE_FARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* INPLACE_FARRAY2 )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY] )
  • -
  • ( DATA_TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_ARRAY3 )
  • -
  • ( DATA_TYPE* INPLACE_FARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_FARRAY3 )
  • -
-
-

These typemaps now check to make sure that the INPLACE_ARRAY -arguments use native byte ordering. If not, an exception is raised.

-
-
-

Argout Arrays

-

Argout arrays are arrays that appear in the input arguments in C, but -are in fact output arrays. This pattern occurs often when there is -more than one output variable and the single return argument is -therefore not sufficient. In python, the convential way to return -multiple arguments is to pack them into a sequence (tuple, list, etc.) -and return the sequence. This is what the argout typemaps do. If a -wrapped function that uses these argout typemaps has more than one -return argument, they are packed into a tuple or list, depending on -the version of python. The python user does not pass these -arrays in, they simply get returned. For the case where a dimension -is specified, the python user must provide that dimension as an -argument. The argout signatures are

-

1D:

-
-
    -
  • ( DATA_TYPE ARGOUT_ARRAY1[ANY] )
  • -
  • ( DATA_TYPE* ARGOUT_ARRAY1, int DIM1 )
  • -
  • ( int DIM1, DATA_TYPE* ARGOUT_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE ARGOUT_ARRAY2[ANY][ANY] )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY] )
  • -
-
-

These are typically used in situations where in C/C++, you would -allocate a(n) array(s) on the heap, and call the function to fill the -array(s) values. In python, the arrays are allocated for you and -returned as new array objects.

-

Note that we support DATA_TYPE* argout typemaps in 1D, but not 2D -or 3D. This is because of a quirk with the SWIG typemap syntax and -cannot be avoided. Note that for these types of 1D typemaps, the -python function will take a single argument representing DIM1.

-
-
-

Argoutview Arrays

-

Argoutview arrays are for when your C code provides you with a view of -its internal data and does not require any memory to be allocated by -the user. This can be dangerous. There is almost no way to guarantee -that the internal data from the C code will remain in existence for -the entire lifetime of the NumPy array that encapsulates it. If -the user destroys the object that provides the view of the data before -destroying the NumPy array, then using that array my result in bad -memory references or segmentation faults. Nevertheless, there are -situations, working with large data sets, where you simply have no -other choice.

-

The C code to be wrapped for argoutview arrays are characterized by -pointers: pointers to the dimensions and double pointers to the data, -so that these values can be passed back to the user. The argoutview -typemap signatures are therefore

-

1D:

-
-
    -
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )
  • -
  • ( DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2 )
  • -
  • ( DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2 )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)
  • -
  • ( DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)
  • -
-
-

Note that arrays with hard-coded dimensions are not supported. These -cannot follow the double pointer signatures of these typemaps.

-
-
-

Output Arrays

-

The numpy.i interface file does not support typemaps for output -arrays, for several reasons. First, C/C++ return arguments are -limited to a single value. This prevents obtaining dimension -information in a general way. Second, arrays with hard-coded lengths -are not permitted as return arguments. In other words:

-
-double[3] newVector(double x, double y, double z);
-
-

is not legal C/C++ syntax. Therefore, we cannot provide typemaps of -the form:

-
-%typemap(out) (TYPE[ANY]);
-
-

If you run into a situation where a function or method is returning a -pointer to an array, your best bet is to write your own version of the -function to be wrapped, either with %extend for the case of class -methods or %ignore and %rename for the case of functions.

-
-
-

Other Common Types: bool

-

Note that C++ type bool is not supported in the list in the -Available Typemaps section. NumPy bools are a single byte, while -the C++ bool is four bytes (at least on my system). Therefore:

-
-%numpy_typemaps(bool, NPY_BOOL, int)
-
-

will result in typemaps that will produce code that reference -improper data lengths. You can implement the following macro -expansion:

-
-%numpy_typemaps(bool, NPY_UINT, int)
-
-

to fix the data length problem, and Input Arrays will work fine, -but In-Place Arrays might fail type-checking.

-
-
-

Other Common Types: complex

-

Typemap conversions for complex floating-point types is also not -supported automatically. This is because python and NumPy are -written in C, which does not have native complex types. Both -python and NumPy implement their own (essentially equivalent) -struct definitions for complex variables:

-
-/* Python */
-typedef struct {double real; double imag;} Py_complex;
-
-/* NumPy */
-typedef struct {float  real, imag;} npy_cfloat;
-typedef struct {double real, imag;} npy_cdouble;
-
-

We could have implemented:

-
-%numpy_typemaps(Py_complex , NPY_CDOUBLE, int)
-%numpy_typemaps(npy_cfloat , NPY_CFLOAT , int)
-%numpy_typemaps(npy_cdouble, NPY_CDOUBLE, int)
-
-

which would have provided automatic type conversions for arrays of -type Py_complex, npy_cfloat and npy_cdouble. However, it -seemed unlikely that there would be any independent (non-python, -non-NumPy) application code that people would be using SWIG to -generate a python interface to, that also used these definitions -for complex types. More likely, these application codes will define -their own complex types, or in the case of C++, use std::complex. -Assuming these data structures are compatible with python and -NumPy complex types, %numpy_typemap expansions as above (with -the user's complex type substituted for the first argument) should -work.

-
-
-
-

NumPy Array Scalars and SWIG

-

SWIG has sophisticated type checking for numerical types. For -example, if your C/C++ routine expects an integer as input, the code -generated by SWIG will check for both python integers and -python long integers, and raise an overflow error if the provided -python integer is too big to cast down to a C integer. With the -introduction of NumPy scalar arrays into your python code, you -might conceivably extract an integer from a NumPy array and attempt -to pass this to a SWIG-wrapped C/C++ function that expects an -int, but the SWIG type checking will not recognize the NumPy -array scalar as an integer. (Often, this does in fact work -- it -depends on whether NumPy recognizes the integer type you are using -as inheriting from the python integer type on the platform you are -using. Sometimes, this means that code that works on a 32-bit machine -will fail on a 64-bit machine.)

-

If you get a python error that looks like the following:

-
-TypeError: in method 'MyClass_MyMethod', argument 2 of type 'int'
-
-

and the argument you are passing is an integer extracted from a -NumPy array, then you have stumbled upon this problem. The -solution is to modify the SWIG type conversion system to accept -Numpy array scalars in addition to the standard integer types. -Fortunately, this capabilitiy has been provided for you. Simply copy -the file:

-
-pyfragments.swg
-
-

to the working build directory for you project, and this problem will -be fixed. It is suggested that you do this anyway, as it only -increases the capabilities of your python interface.

-
-

Why is There a Second File?

-

The SWIG type checking and conversion system is a complicated -combination of C macros, SWIG macros, SWIG typemaps and SWIG -fragments. Fragments are a way to conditionally insert code into your -wrapper file if it is needed, and not insert it if not needed. If -multiple typemaps require the same fragment, the fragment only gets -inserted into your wrapper code once.

-

There is a fragment for converting a python integer to a C -long. There is a different fragment that converts a python -integer to a C int, that calls the rountine defined in the -long fragment. We can make the changes we want here by changing -the definition for the long fragment. SWIG determines the -active definition for a fragment using a "first come, first served" -system. That is, we need to define the fragment for long -conversions prior to SWIG doing it internally. SWIG allows us -to do this by putting our fragment definitions in the file -pyfragments.swg. If we were to put the new fragment definitions -in numpy.i, they would be ignored.

-
-
-
-

Helper Functions

-

The numpy.i file containes several macros and routines that it -uses internally to build its typemaps. However, these functions may -be useful elsewhere in your interface file. These macros and routines -are implemented as fragments, which are described briefly in the -previous section. If you try to use one or more of the following -macros or functions, but your compiler complains that it does not -recognize the symbol, then you need to force these fragments to appear -in your code using:

-
-%fragment("NumPy_Fragments");
-
-

in your SWIG interface file.

-
-

Macros

-
-
-
is_array(a)
-
Evaluates as true if a is non-NULL and can be cast to a -PyArrayObject*.
-
array_type(a)
-
Evaluates to the integer data type code of a, assuming a can -be cast to a PyArrayObject*.
-
array_numdims(a)
-
Evaluates to the integer number of dimensions of a, assuming -a can be cast to a PyArrayObject*.
-
array_dimensions(a)
-
Evaluates to an array of type npy_intp and length -array_numdims(a), giving the lengths of all of the dimensions -of a, assuming a can be cast to a PyArrayObject*.
-
array_size(a,i)
-
Evaluates to the i-th dimension size of a, assuming a -can be cast to a PyArrayObject*.
-
array_data(a)
-
Evaluates to a pointer of type void* that points to the data -buffer of a, assuming a can be cast to a PyArrayObject*.
-
array_is_contiguous(a)
-
Evaluates as true if a is a contiguous array. Equivalent to -(PyArray_ISCONTIGUOUS(a)).
-
array_is_native(a)
-
Evaluates as true if the data buffer of a uses native byte -order. Equivalent to (PyArray_ISNOTSWAPPED(a)).
-
array_is_fortran(a)
-
Evaluates as true if a is FORTRAN ordered.
-
-
-
-
-

Routines

-
-

pytype_string()

-
-

Return type: char*

-

Arguments:

-
    -
  • PyObject* py_obj, a general python object.
  • -
-

Return a string describing the type of py_obj.

-
-

typecode_string()

-
-

Return type: char*

-

Arguments:

-
    -
  • int typecode, a NumPy integer typecode.
  • -
-

Return a string describing the type corresponding to the NumPy -typecode.

-
-

type_match()

-
-

Return type: int

-

Arguments:

-
    -
  • int actual_type, the NumPy typecode of a NumPy array.
  • -
  • int desired_type, the desired NumPy typecode.
  • -
-

Make sure that actual_type is compatible with -desired_type. For example, this allows character and -byte types, or int and long types, to match. This is now -equivalent to PyArray_EquivTypenums().

-
-

obj_to_array_no_conversion()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyObject* input, a general python object.
  • -
  • int typecode, the desired NumPy typecode.
  • -
-

Cast input to a PyArrayObject* if legal, and ensure that -it is of type typecode. If input cannot be cast, or the -typecode is wrong, set a python error and return NULL.

-
-

obj_to_array_allow_conversion()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyObject* input, a general python object.
  • -
  • int typecode, the desired NumPy typecode of the resulting -array.
  • -
  • int* is_new_object, returns a value of 0 if no conversion -performed, else 1.
  • -
-

Convert input to a NumPy array with the given typecode. -On success, return a valid PyArrayObject* with the correct -type. On failure, the python error string will be set and the -routine returns NULL.

-
-

make_contiguous()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • int* is_new_object, returns a value of 0 if no conversion -performed, else 1.
  • -
  • int min_dims, minimum allowable dimensions.
  • -
  • int max_dims, maximum allowable dimensions.
  • -
-

Check to see if ary is contiguous. If so, return the input -pointer and flag it as not a new object. If it is not contiguous, -create a new PyArrayObject* using the original data, flag it -as a new object and return the pointer.

-
-

obj_to_array_contiguous_allow_conversion()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyObject* input, a general python object.
  • -
  • int typecode, the desired NumPy typecode of the resulting -array.
  • -
  • int* is_new_object, returns a value of 0 if no conversion -performed, else 1.
  • -
-

Convert input to a contiguous PyArrayObject* of the -specified type. If the input object is not a contiguous -PyArrayObject*, a new one will be created and the new object -flag will be set.

-
-

require_contiguous()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
-

Test whether ary is contiguous. If so, return 1. Otherwise, -set a python error and return 0.

-
-

require_native()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArray_Object* ary, a NumPy array.
  • -
-

Require that ary is not byte-swapped. If the array is not -byte-swapped, return 1. Otherwise, set a python error and -return 0.

-
-

require_dimensions()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • int exact_dimensions, the desired number of dimensions.
  • -
-

Require ary to have a specified number of dimensions. If the -array has the specified number of dimensions, return 1. -Otherwise, set a python error and return 0.

-
-

require_dimensions_n()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • int* exact_dimensions, an array of integers representing -acceptable numbers of dimensions.
  • -
  • int n, the length of exact_dimensions.
  • -
-

Require ary to have one of a list of specified number of -dimensions. If the array has one of the specified number of -dimensions, return 1. Otherwise, set the python error string -and return 0.

-
-

require_size()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • npy_int* size, an array representing the desired lengths of -each dimension.
  • -
  • int n, the length of size.
  • -
-

Require ary to have a specified shape. If the array has the -specified shape, return 1. Otherwise, set the python error -string and return 0.

-
-

require_fortran()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
-

Require the given PyArrayObject to to be FORTRAN ordered. If -the the PyArrayObject is already FORTRAN ordered, do nothing. -Else, set the FORTRAN ordering flag and recompute the strides.

-
-
-
-
-
-

Beyond the Provided Typemaps

-

There are many C or C++ array/NumPy array situations not covered by -a simple %include "numpy.i" and subsequent %apply directives.

-
-

A Common Example

-

Consider a reasonable prototype for a dot product function:

-
-double dot(int len, double* vec1, double* vec2);
-
-

The python interface that we want is:

-
-def dot(vec1, vec2):
-    """
-    dot(PyObject,PyObject) -> double
-    """
-
-

The problem here is that there is one dimension argument and two array -arguments, and our typemaps are set up for dimensions that apply to a -single array (in fact, SWIG does not provide a mechanism for -associating len with vec2 that takes two python input -arguments). The recommended solution is the following:

-
-%apply (int DIM1, double* IN_ARRAY1) {(int len1, double* vec1),
-                                      (int len2, double* vec2)}
-%rename (dot) my_dot;
-%exception my_dot {
-    $action
-    if (PyErr_Occurred()) SWIG_fail;
-}
-%inline %{
-double my_dot(int len1, double* vec1, int len2, double* vec2) {
-    if (len1 != len2) {
-        PyErr_Format(PyExc_ValueError,
-                     "Arrays of lengths (%d,%d) given",
-                     len1, len2);
-        return 0.0;
-    }
-    return dot(len1, vec1, vec2);
-}
-%}
-
-

If the header file that contains the prototype for double dot() -also contains other prototypes that you want to wrap, so that you need -to %include this header file, then you will also need a %ignore -dot; directive, placed after the %rename and before the -%include directives. Or, if the function in question is a class -method, you will want to use %extend rather than %inline in -addition to %ignore.

-

A note on error handling: Note that my_dot returns a -double but that it can also raise a python error. The -resulting wrapper function will return a python float -representation of 0.0 when the vector lengths do not match. Since -this is not NULL, the python interpreter will not know to check -for an error. For this reason, we add the %exception directive -above for my_dot to get the behavior we want (note that -$action is a macro that gets expanded to a valid call to -my_dot). In general, you will probably want to write a SWIG -macro to perform this task.

-
-
-

Other Situations

-

There are other wrapping situations in which numpy.i may be -helpful when you encounter them.

-
-
    -
  • In some situations, it is possible that you could use the -%numpy_templates macro to implement typemaps for your own -types. See the Other Common Types: bool or Other Common -Types: complex sections for examples. Another situation is if -your dimensions are of a type other than int (say long for -example):

    -
    -%numpy_typemaps(double, NPY_DOUBLE, long)
    -
    -
  • -
  • You can use the code in numpy.i to write your own typemaps. -For example, if you had a four-dimensional array as a function -argument, you could cut-and-paste the appropriate -three-dimensional typemaps into your interface file. The -modifications for the fourth dimension would be trivial.

    -
  • -
  • Sometimes, the best approach is to use the %extend directive -to define new methods for your classes (or overload existing ones) -that take a PyObject* (that either is or can be converted to a -PyArrayObject*) instead of a pointer to a buffer. In this -case, the helper routines in numpy.i can be very useful.

    -
  • -
  • Writing typemaps can be a bit nonintuitive. If you have specific -questions about writing SWIG typemaps for NumPy, the -developers of numpy.i do monitor the -Numpy-discussion and -Swig-user mail lists.

    -
  • -
-
-
-
-

A Final Note

-

When you use the %apply directive, as is usually necessary to use -numpy.i, it will remain in effect until you tell SWIG that it -shouldn't be. If the arguments to the functions or methods that you -are wrapping have common names, such as length or vector, -these typemaps may get applied in situations you do not expect or -want. Therefore, it is always a good idea to add a %clear -directive after you are done with a specific typemap:

-
-%apply (double* IN_ARRAY1, int DIM1) {(double* vector, int length)}
-%include "my_header.h"
-%clear (double* vector, int length);
-
-

In general, you should target these typemap signatures specifically -where you want them, and then clear them after you are done.

-
-
-
-

Summary

-

Out of the box, numpy.i provides typemaps that support conversion -between NumPy arrays and C arrays:

-
-
    -
  • That can be one of 12 different scalar types: signed char, -unsigned char, short, unsigned short, int, -unsigned int, long, unsigned long, long long, -unsigned long long, float and double.
  • -
  • That support 41 different argument signatures for each data type, -including:
      -
    • One-dimensional, two-dimensional and three-dimensional arrays.
    • -
    • Input-only, in-place, argout and argoutview behavior.
    • -
    • Hard-coded dimensions, data-buffer-then-dimensions -specification, and dimensions-then-data-buffer specification.
    • -
    • Both C-ordering ("last dimension fastest") or FORTRAN-ordering -("first dimension fastest") support for 2D and 3D arrays.
    • -
    -
  • -
-
-

The numpy.i interface file also provides additional tools for -wrapper developers, including:

-
-
    -
  • A SWIG macro (%numpy_typemaps) with three arguments for -implementing the 41 argument signatures for the user's choice of -(1) C data type, (2) NumPy data type (assuming they match), and -(3) dimension type.
  • -
  • Nine C macros and 13 C functions that can be used to write -specialized typemaps, extensions, or inlined functions that handle -cases not covered by the provided typemaps.
  • -
-
-
-
-

Acknowledgements

-

Many people have worked to glue SWIG and NumPy together (as well -as SWIG and the predecessors of NumPy, Numeric and numarray). -The effort to standardize this work into numpy.i began at the 2005 -SciPy Conference with a conversation between -Fernando Perez and myself. Fernando collected helper functions and -typemaps from Eric Jones, Michael Hunter, Anna Omelchenko and Michael -Sanner. Sebastian Hasse and Georg Holzmann have also provided -additional error checking and use cases. The work of these -contributors has made this end result possible.

-
-
- - - diff --git a/numpy/doc/swig/doc/numpy_swig.pdf b/numpy/doc/swig/doc/numpy_swig.pdf deleted file mode 100644 index 1d4642cf7..000000000 Binary files a/numpy/doc/swig/doc/numpy_swig.pdf and /dev/null differ diff --git a/numpy/doc/swig/doc/numpy_swig.txt b/numpy/doc/swig/doc/numpy_swig.txt deleted file mode 100644 index bfde018bf..000000000 --- a/numpy/doc/swig/doc/numpy_swig.txt +++ /dev/null @@ -1,950 +0,0 @@ -========================================== - numpy.i: a SWIG Interface File for NumPy -========================================== - -:Author: Bill Spotz -:Institution: Sandia National Laboratories -:Date: 1 December, 2007 - -.. contents:: - -Introduction -============ - -The Simple Wrapper and Interface Generator (or `SWIG -`_) is a powerful tool for generating wrapper -code for interfacing to a wide variety of scripting languages. -`SWIG`_ can parse header files, and using only the code prototypes, -create an interface to the target language. But `SWIG`_ is not -omnipotent. For example, it cannot know from the prototype:: - - double rms(double* seq, int n); - -what exactly ``seq`` is. Is it a single value to be altered in-place? -Is it an array, and if so what is its length? Is it input-only? -Output-only? Input-output? `SWIG`_ cannot determine these details, -and does not attempt to do so. - -If we designed ``rms``, we probably made it a routine that takes an -input-only array of length ``n`` of ``double`` values called ``seq`` -and returns the root mean square. The default behavior of `SWIG`_, -however, will be to create a wrapper function that compiles, but is -nearly impossible to use from the scripting language in the way the C -routine was intended. - -For `python `_, the preferred way of handling -contiguous (or technically, *strided*) blocks of homogeneous data is -with the module `NumPy `_, which provides full -object-oriented access to multidimensial arrays of data. Therefore, -the most logical `python`_ interface for the ``rms`` function would be -(including doc string):: - - def rms(seq): - """ - rms: return the root mean square of a sequence - rms(numpy.ndarray) -> double - rms(list) -> double - rms(tuple) -> double - """ - -where ``seq`` would be a `NumPy`_ array of ``double`` values, and its -length ``n`` would be extracted from ``seq`` internally before being -passed to the C routine. Even better, since `NumPy`_ supports -construction of arrays from arbitrary `python`_ sequences, ``seq`` -itself could be a nearly arbitrary sequence (so long as each element -can be converted to a ``double``) and the wrapper code would -internally convert it to a `NumPy`_ array before extracting its data -and length. - -`SWIG`_ allows these types of conversions to be defined via a -mechanism called typemaps. This document provides information on how -to use ``numpy.i``, a `SWIG`_ interface file that defines a series of -typemaps intended to make the type of array-related conversions -described above relatively simple to implement. For example, suppose -that the ``rms`` function prototype defined above was in a header file -named ``rms.h``. To obtain the `python`_ interface discussed above, -your `SWIG`_ interface file would need the following:: - - %{ - #define SWIG_FILE_WITH_INIT - #include "rms.h" - %} - - %include "numpy.i" - - %init %{ - import_array(); - %} - - %apply (double* IN_ARRAY1, int DIM1) {(double* seq, int n)}; - %include "rms.h" - -Typemaps are keyed off a list of one or more function arguments, -either by type or by type and name. We will refer to such lists as -*signatures*. One of the many typemaps defined by ``numpy.i`` is used -above and has the signature ``(double* IN_ARRAY1, int DIM1)``. The -argument names are intended to suggest that the ``double*`` argument -is an input array of one dimension and that the ``int`` represents -that dimension. This is precisely the pattern in the ``rms`` -prototype. - -Most likely, no actual prototypes to be wrapped will have the argument -names ``IN_ARRAY1`` and ``DIM1``. We use the ``%apply`` directive to -apply the typemap for one-dimensional input arrays of type ``double`` -to the actual prototype used by ``rms``. Using ``numpy.i`` -effectively, therefore, requires knowing what typemaps are available -and what they do. - -A `SWIG`_ interface file that includes the `SWIG`_ directives given -above will produce wrapper code that looks something like:: - - 1 PyObject *_wrap_rms(PyObject *args) { - 2 PyObject *resultobj = 0; - 3 double *arg1 = (double *) 0 ; - 4 int arg2 ; - 5 double result; - 6 PyArrayObject *array1 = NULL ; - 7 int is_new_object1 = 0 ; - 8 PyObject * obj0 = 0 ; - 9 - 10 if (!PyArg_ParseTuple(args,(char *)"O:rms",&obj0)) SWIG_fail; - 11 { - 12 array1 = obj_to_array_contiguous_allow_conversion( - 13 obj0, NPY_DOUBLE, &is_new_object1); - 14 npy_intp size[1] = { - 15 -1 - 16 }; - 17 if (!array1 || !require_dimensions(array1, 1) || - 18 !require_size(array1, size, 1)) SWIG_fail; - 19 arg1 = (double*) array1->data; - 20 arg2 = (int) array1->dimensions[0]; - 21 } - 22 result = (double)rms(arg1,arg2); - 23 resultobj = SWIG_From_double((double)(result)); - 24 { - 25 if (is_new_object1 && array1) Py_DECREF(array1); - 26 } - 27 return resultobj; - 28 fail: - 29 { - 30 if (is_new_object1 && array1) Py_DECREF(array1); - 31 } - 32 return NULL; - 33 } - -The typemaps from ``numpy.i`` are responsible for the following lines -of code: 12--20, 25 and 30. Line 10 parses the input to the ``rms`` -function. From the format string ``"O:rms"``, we can see that the -argument list is expected to be a single `python`_ object (specified -by the ``O`` before the colon) and whose pointer is stored in -``obj0``. A number of functions, supplied by ``numpy.i``, are called -to make and check the (possible) conversion from a generic `python`_ -object to a `NumPy`_ array. These functions are explained in the -section `Helper Functions`_, but hopefully their names are -self-explanatory. At line 12 we use ``obj0`` to construct a `NumPy`_ -array. At line 17, we check the validity of the result: that it is -non-null and that it has a single dimension of arbitrary length. Once -these states are verified, we extract the data buffer and length in -lines 19 and 20 so that we can call the underlying C function at line -22. Line 25 performs memory management for the case where we have -created a new array that is no longer needed. - -This code has a significant amount of error handling. Note the -``SWIG_fail`` is a macro for ``goto fail``, refering to the label at -line 28. If the user provides the wrong number of arguments, this -will be caught at line 10. If construction of the `NumPy`_ array -fails or produces an array with the wrong number of dimensions, these -errors are caught at line 17. And finally, if an error is detected, -memory is still managed correctly at line 30. - -Note that if the C function signature was in a different order:: - - double rms(int n, double* seq); - -that `SWIG`_ would not match the typemap signature given above with -the argument list for ``rms``. Fortunately, ``numpy.i`` has a set of -typemaps with the data pointer given last:: - - %apply (int DIM1, double* IN_ARRAY1) {(int n, double* seq)}; - -This simply has the effect of switching the definitions of ``arg1`` -and ``arg2`` in lines 3 and 4 of the generated code above, and their -assignments in lines 19 and 20. - -Using numpy.i -============= - -The ``numpy.i`` file is currently located in the ``numpy/docs/swig`` -sub-directory under the ``numpy`` installation directory. Typically, -you will want to copy it to the directory where you are developing -your wrappers. If it is ever adopted by `SWIG`_ developers, then it -will be installed in a standard place where `SWIG`_ can find it. - -A simple module that only uses a single `SWIG`_ interface file should -include the following:: - - %{ - #define SWIG_FILE_WITH_INIT - %} - %include "numpy.i" - %init %{ - import_array(); - %} - -Within a compiled `python`_ module, ``import_array()`` should only get -called once. This could be in a C/C++ file that you have written and -is linked to the module. If this is the case, then none of your -interface files should ``#define SWIG_FILE_WITH_INIT`` or call -``import_array()``. Or, this initialization call could be in a -wrapper file generated by `SWIG`_ from an interface file that has the -``%init`` block as above. If this is the case, and you have more than -one `SWIG`_ interface file, then only one interface file should -``#define SWIG_FILE_WITH_INIT`` and call ``import_array()``. - -Available Typemaps -================== - -The typemap directives provided by ``numpy.i`` for arrays of different -data types, say ``double`` and ``int``, and dimensions of different -types, say ``int`` or ``long``, are identical to one another except -for the C and `NumPy`_ type specifications. The typemaps are -therefore implemented (typically behind the scenes) via a macro:: - - %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE) - -that can be invoked for appropriate ``(DATA_TYPE, DATA_TYPECODE, -DIM_TYPE)`` triplets. For example:: - - %numpy_typemaps(double, NPY_DOUBLE, int) - %numpy_typemaps(int, NPY_INT , int) - -The ``numpy.i`` interface file uses the ``%numpy_typemaps`` macro to -implement typemaps for the following C data types and ``int`` -dimension types: - - * ``signed char`` - * ``unsigned char`` - * ``short`` - * ``unsigned short`` - * ``int`` - * ``unsigned int`` - * ``long`` - * ``unsigned long`` - * ``long long`` - * ``unsigned long long`` - * ``float`` - * ``double`` - -In the following descriptions, we reference a generic ``DATA_TYPE``, which -could be any of the C data types listed above, and ``DIM_TYPE`` which -should be one of the many types of integers. - -The typemap signatures are largely differentiated on the name given to -the buffer pointer. Names with ``FARRAY`` are for FORTRAN-ordered -arrays, and names with ``ARRAY`` are for C-ordered (or 1D arrays). - -Input Arrays ------------- - -Input arrays are defined as arrays of data that are passed into a -routine but are not altered in-place or returned to the user. The -`python`_ input array is therefore allowed to be almost any `python`_ -sequence (such as a list) that can be converted to the requested type -of array. The input array signatures are - -1D: - - * ``( DATA_TYPE IN_ARRAY1[ANY] )`` - * ``( DATA_TYPE* IN_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* IN_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` - * ``( DATA_TYPE* IN_ARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* IN_ARRAY2 )`` - * ``( DATA_TYPE* IN_FARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* IN_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE IN_ARRAY3[ANY][ANY][ANY] )`` - * ``( DATA_TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_ARRAY3 )`` - * ``( DATA_TYPE* IN_FARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_FARRAY3 )`` - -The first signature listed, ``( DATA_TYPE IN_ARRAY[ANY] )`` is for -one-dimensional arrays with hard-coded dimensions. Likewise, -``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` is for two-dimensional arrays -with hard-coded dimensions, and similarly for three-dimensional. - -In-Place Arrays ---------------- - -In-place arrays are defined as arrays that are modified in-place. The -input values may or may not be used, but the values at the time the -function returns are significant. The provided `python`_ argument -must therefore be a `NumPy`_ array of the required type. The in-place -signatures are - -1D: - - * ``( DATA_TYPE INPLACE_ARRAY1[ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* INPLACE_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE INPLACE_ARRAY2[ANY][ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_ARRAY2 )`` - * ``( DATA_TYPE* INPLACE_FARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_ARRAY3 )`` - * ``( DATA_TYPE* INPLACE_FARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_FARRAY3 )`` - -These typemaps now check to make sure that the ``INPLACE_ARRAY`` -arguments use native byte ordering. If not, an exception is raised. - -Argout Arrays -------------- - -Argout arrays are arrays that appear in the input arguments in C, but -are in fact output arrays. This pattern occurs often when there is -more than one output variable and the single return argument is -therefore not sufficient. In `python`_, the convential way to return -multiple arguments is to pack them into a sequence (tuple, list, etc.) -and return the sequence. This is what the argout typemaps do. If a -wrapped function that uses these argout typemaps has more than one -return argument, they are packed into a tuple or list, depending on -the version of `python`_. The `python`_ user does not pass these -arrays in, they simply get returned. For the case where a dimension -is specified, the python user must provide that dimension as an -argument. The argout signatures are - -1D: - - * ``( DATA_TYPE ARGOUT_ARRAY1[ANY] )`` - * ``( DATA_TYPE* ARGOUT_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* ARGOUT_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE ARGOUT_ARRAY2[ANY][ANY] )`` - -3D: - - * ``( DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY] )`` - -These are typically used in situations where in C/C++, you would -allocate a(n) array(s) on the heap, and call the function to fill the -array(s) values. In `python`_, the arrays are allocated for you and -returned as new array objects. - -Note that we support ``DATA_TYPE*`` argout typemaps in 1D, but not 2D -or 3D. This is because of a quirk with the `SWIG`_ typemap syntax and -cannot be avoided. Note that for these types of 1D typemaps, the -`python`_ function will take a single argument representing ``DIM1``. - -Argoutview Arrays ------------------ - -Argoutview arrays are for when your C code provides you with a view of -its internal data and does not require any memory to be allocated by -the user. This can be dangerous. There is almost no way to guarantee -that the internal data from the C code will remain in existence for -the entire lifetime of the `NumPy`_ array that encapsulates it. If -the user destroys the object that provides the view of the data before -destroying the `NumPy`_ array, then using that array my result in bad -memory references or segmentation faults. Nevertheless, there are -situations, working with large data sets, where you simply have no -other choice. - -The C code to be wrapped for argoutview arrays are characterized by -pointers: pointers to the dimensions and double pointers to the data, -so that these values can be passed back to the user. The argoutview -typemap signatures are therefore - -1D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )`` - * ``( DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2 )`` - * ``( DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)`` - * ``( DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)`` - -Note that arrays with hard-coded dimensions are not supported. These -cannot follow the double pointer signatures of these typemaps. - -Output Arrays -------------- - -The ``numpy.i`` interface file does not support typemaps for output -arrays, for several reasons. First, C/C++ return arguments are -limited to a single value. This prevents obtaining dimension -information in a general way. Second, arrays with hard-coded lengths -are not permitted as return arguments. In other words:: - - double[3] newVector(double x, double y, double z); - -is not legal C/C++ syntax. Therefore, we cannot provide typemaps of -the form:: - - %typemap(out) (TYPE[ANY]); - -If you run into a situation where a function or method is returning a -pointer to an array, your best bet is to write your own version of the -function to be wrapped, either with ``%extend`` for the case of class -methods or ``%ignore`` and ``%rename`` for the case of functions. - -Other Common Types: bool ------------------------- - -Note that C++ type ``bool`` is not supported in the list in the -`Available Typemaps`_ section. NumPy bools are a single byte, while -the C++ ``bool`` is four bytes (at least on my system). Therefore:: - - %numpy_typemaps(bool, NPY_BOOL, int) - -will result in typemaps that will produce code that reference -improper data lengths. You can implement the following macro -expansion:: - - %numpy_typemaps(bool, NPY_UINT, int) - -to fix the data length problem, and `Input Arrays`_ will work fine, -but `In-Place Arrays`_ might fail type-checking. - -Other Common Types: complex ---------------------------- - -Typemap conversions for complex floating-point types is also not -supported automatically. This is because `python`_ and `NumPy`_ are -written in C, which does not have native complex types. Both -`python`_ and `NumPy`_ implement their own (essentially equivalent) -``struct`` definitions for complex variables:: - - /* Python */ - typedef struct {double real; double imag;} Py_complex; - - /* NumPy */ - typedef struct {float real, imag;} npy_cfloat; - typedef struct {double real, imag;} npy_cdouble; - -We could have implemented:: - - %numpy_typemaps(Py_complex , NPY_CDOUBLE, int) - %numpy_typemaps(npy_cfloat , NPY_CFLOAT , int) - %numpy_typemaps(npy_cdouble, NPY_CDOUBLE, int) - -which would have provided automatic type conversions for arrays of -type ``Py_complex``, ``npy_cfloat`` and ``npy_cdouble``. However, it -seemed unlikely that there would be any independent (non-`python`_, -non-`NumPy`_) application code that people would be using `SWIG`_ to -generate a `python`_ interface to, that also used these definitions -for complex types. More likely, these application codes will define -their own complex types, or in the case of C++, use ``std::complex``. -Assuming these data structures are compatible with `python`_ and -`NumPy`_ complex types, ``%numpy_typemap`` expansions as above (with -the user's complex type substituted for the first argument) should -work. - -NumPy Array Scalars and SWIG -============================ - -`SWIG`_ has sophisticated type checking for numerical types. For -example, if your C/C++ routine expects an integer as input, the code -generated by `SWIG`_ will check for both `python`_ integers and -`python`_ long integers, and raise an overflow error if the provided -`python`_ integer is too big to cast down to a C integer. With the -introduction of `NumPy`_ scalar arrays into your `python`_ code, you -might conceivably extract an integer from a `NumPy`_ array and attempt -to pass this to a `SWIG`_-wrapped C/C++ function that expects an -``int``, but the `SWIG`_ type checking will not recognize the `NumPy`_ -array scalar as an integer. (Often, this does in fact work -- it -depends on whether `NumPy`_ recognizes the integer type you are using -as inheriting from the `python`_ integer type on the platform you are -using. Sometimes, this means that code that works on a 32-bit machine -will fail on a 64-bit machine.) - -If you get a `python`_ error that looks like the following:: - - TypeError: in method 'MyClass_MyMethod', argument 2 of type 'int' - -and the argument you are passing is an integer extracted from a -`NumPy`_ array, then you have stumbled upon this problem. The -solution is to modify the `SWIG`_ type conversion system to accept -`Numpy`_ array scalars in addition to the standard integer types. -Fortunately, this capabilitiy has been provided for you. Simply copy -the file:: - - pyfragments.swg - -to the working build directory for you project, and this problem will -be fixed. It is suggested that you do this anyway, as it only -increases the capabilities of your `python`_ interface. - -Why is There a Second File? ---------------------------- - -The `SWIG`_ type checking and conversion system is a complicated -combination of C macros, `SWIG`_ macros, `SWIG`_ typemaps and `SWIG`_ -fragments. Fragments are a way to conditionally insert code into your -wrapper file if it is needed, and not insert it if not needed. If -multiple typemaps require the same fragment, the fragment only gets -inserted into your wrapper code once. - -There is a fragment for converting a `python`_ integer to a C -``long``. There is a different fragment that converts a `python`_ -integer to a C ``int``, that calls the rountine defined in the -``long`` fragment. We can make the changes we want here by changing -the definition for the ``long`` fragment. `SWIG`_ determines the -active definition for a fragment using a "first come, first served" -system. That is, we need to define the fragment for ``long`` -conversions prior to `SWIG`_ doing it internally. `SWIG`_ allows us -to do this by putting our fragment definitions in the file -``pyfragments.swg``. If we were to put the new fragment definitions -in ``numpy.i``, they would be ignored. - -Helper Functions -================ - -The ``numpy.i`` file containes several macros and routines that it -uses internally to build its typemaps. However, these functions may -be useful elsewhere in your interface file. These macros and routines -are implemented as fragments, which are described briefly in the -previous section. If you try to use one or more of the following -macros or functions, but your compiler complains that it does not -recognize the symbol, then you need to force these fragments to appear -in your code using:: - - %fragment("NumPy_Fragments"); - -in your `SWIG`_ interface file. - -Macros ------- - - **is_array(a)** - Evaluates as true if ``a`` is non-``NULL`` and can be cast to a - ``PyArrayObject*``. - - **array_type(a)** - Evaluates to the integer data type code of ``a``, assuming ``a`` can - be cast to a ``PyArrayObject*``. - - **array_numdims(a)** - Evaluates to the integer number of dimensions of ``a``, assuming - ``a`` can be cast to a ``PyArrayObject*``. - - **array_dimensions(a)** - Evaluates to an array of type ``npy_intp`` and length - ``array_numdims(a)``, giving the lengths of all of the dimensions - of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. - - **array_size(a,i)** - Evaluates to the ``i``-th dimension size of ``a``, assuming ``a`` - can be cast to a ``PyArrayObject*``. - - **array_data(a)** - Evaluates to a pointer of type ``void*`` that points to the data - buffer of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. - - **array_is_contiguous(a)** - Evaluates as true if ``a`` is a contiguous array. Equivalent to - ``(PyArray_ISCONTIGUOUS(a))``. - - **array_is_native(a)** - Evaluates as true if the data buffer of ``a`` uses native byte - order. Equivalent to ``(PyArray_ISNOTSWAPPED(a))``. - - **array_is_fortran(a)** - Evaluates as true if ``a`` is FORTRAN ordered. - -Routines --------- - - **pytype_string()** - - Return type: ``char*`` - - Arguments: - - * ``PyObject* py_obj``, a general `python`_ object. - - Return a string describing the type of ``py_obj``. - - - **typecode_string()** - - Return type: ``char*`` - - Arguments: - - * ``int typecode``, a `NumPy`_ integer typecode. - - Return a string describing the type corresponding to the `NumPy`_ - ``typecode``. - - **type_match()** - - Return type: ``int`` - - Arguments: - - * ``int actual_type``, the `NumPy`_ typecode of a `NumPy`_ array. - - * ``int desired_type``, the desired `NumPy`_ typecode. - - Make sure that ``actual_type`` is compatible with - ``desired_type``. For example, this allows character and - byte types, or int and long types, to match. This is now - equivalent to ``PyArray_EquivTypenums()``. - - - **obj_to_array_no_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general `python`_ object. - - * ``int typecode``, the desired `NumPy`_ typecode. - - Cast ``input`` to a ``PyArrayObject*`` if legal, and ensure that - it is of type ``typecode``. If ``input`` cannot be cast, or the - ``typecode`` is wrong, set a `python`_ error and return ``NULL``. - - - **obj_to_array_allow_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general `python`_ object. - - * ``int typecode``, the desired `NumPy`_ typecode of the resulting - array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - Convert ``input`` to a `NumPy`_ array with the given ``typecode``. - On success, return a valid ``PyArrayObject*`` with the correct - type. On failure, the `python`_ error string will be set and the - routine returns ``NULL``. - - - **make_contiguous()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - * ``int min_dims``, minimum allowable dimensions. - - * ``int max_dims``, maximum allowable dimensions. - - Check to see if ``ary`` is contiguous. If so, return the input - pointer and flag it as not a new object. If it is not contiguous, - create a new ``PyArrayObject*`` using the original data, flag it - as a new object and return the pointer. - - - **obj_to_array_contiguous_allow_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general `python`_ object. - - * ``int typecode``, the desired `NumPy`_ typecode of the resulting - array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - Convert ``input`` to a contiguous ``PyArrayObject*`` of the - specified type. If the input object is not a contiguous - ``PyArrayObject*``, a new one will be created and the new object - flag will be set. - - - **require_contiguous()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - Test whether ``ary`` is contiguous. If so, return 1. Otherwise, - set a `python`_ error and return 0. - - - **require_native()** - - Return type: ``int`` - - Arguments: - - * ``PyArray_Object* ary``, a `NumPy`_ array. - - Require that ``ary`` is not byte-swapped. If the array is not - byte-swapped, return 1. Otherwise, set a `python`_ error and - return 0. - - **require_dimensions()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``int exact_dimensions``, the desired number of dimensions. - - Require ``ary`` to have a specified number of dimensions. If the - array has the specified number of dimensions, return 1. - Otherwise, set a `python`_ error and return 0. - - - **require_dimensions_n()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``int* exact_dimensions``, an array of integers representing - acceptable numbers of dimensions. - - * ``int n``, the length of ``exact_dimensions``. - - Require ``ary`` to have one of a list of specified number of - dimensions. If the array has one of the specified number of - dimensions, return 1. Otherwise, set the `python`_ error string - and return 0. - - - **require_size()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``npy_int* size``, an array representing the desired lengths of - each dimension. - - * ``int n``, the length of ``size``. - - Require ``ary`` to have a specified shape. If the array has the - specified shape, return 1. Otherwise, set the `python`_ error - string and return 0. - - - **require_fortran()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - Require the given ``PyArrayObject`` to to be FORTRAN ordered. If - the the ``PyArrayObject`` is already FORTRAN ordered, do nothing. - Else, set the FORTRAN ordering flag and recompute the strides. - - -Beyond the Provided Typemaps -============================ - -There are many C or C++ array/`NumPy`_ array situations not covered by -a simple ``%include "numpy.i"`` and subsequent ``%apply`` directives. - -A Common Example ----------------- - -Consider a reasonable prototype for a dot product function:: - - double dot(int len, double* vec1, double* vec2); - -The `python`_ interface that we want is:: - - def dot(vec1, vec2): - """ - dot(PyObject,PyObject) -> double - """ - -The problem here is that there is one dimension argument and two array -arguments, and our typemaps are set up for dimensions that apply to a -single array (in fact, `SWIG`_ does not provide a mechanism for -associating ``len`` with ``vec2`` that takes two `python`_ input -arguments). The recommended solution is the following:: - - %apply (int DIM1, double* IN_ARRAY1) {(int len1, double* vec1), - (int len2, double* vec2)} - %rename (dot) my_dot; - %exception my_dot { - $action - if (PyErr_Occurred()) SWIG_fail; - } - %inline %{ - double my_dot(int len1, double* vec1, int len2, double* vec2) { - if (len1 != len2) { - PyErr_Format(PyExc_ValueError, - "Arrays of lengths (%d,%d) given", - len1, len2); - return 0.0; - } - return dot(len1, vec1, vec2); - } - %} - -If the header file that contains the prototype for ``double dot()`` -also contains other prototypes that you want to wrap, so that you need -to ``%include`` this header file, then you will also need a ``%ignore -dot;`` directive, placed after the ``%rename`` and before the -``%include`` directives. Or, if the function in question is a class -method, you will want to use ``%extend`` rather than ``%inline`` in -addition to ``%ignore``. - -**A note on error handling:** Note that ``my_dot`` returns a -``double`` but that it can also raise a `python`_ error. The -resulting wrapper function will return a `python`_ float -representation of 0.0 when the vector lengths do not match. Since -this is not ``NULL``, the `python`_ interpreter will not know to check -for an error. For this reason, we add the ``%exception`` directive -above for ``my_dot`` to get the behavior we want (note that -``$action`` is a macro that gets expanded to a valid call to -``my_dot``). In general, you will probably want to write a `SWIG`_ -macro to perform this task. - -Other Situations ----------------- - -There are other wrapping situations in which ``numpy.i`` may be -helpful when you encounter them. - - * In some situations, it is possible that you could use the - ``%numpy_templates`` macro to implement typemaps for your own - types. See the `Other Common Types: bool`_ or `Other Common - Types: complex`_ sections for examples. Another situation is if - your dimensions are of a type other than ``int`` (say ``long`` for - example):: - - %numpy_typemaps(double, NPY_DOUBLE, long) - - * You can use the code in ``numpy.i`` to write your own typemaps. - For example, if you had a four-dimensional array as a function - argument, you could cut-and-paste the appropriate - three-dimensional typemaps into your interface file. The - modifications for the fourth dimension would be trivial. - - * Sometimes, the best approach is to use the ``%extend`` directive - to define new methods for your classes (or overload existing ones) - that take a ``PyObject*`` (that either is or can be converted to a - ``PyArrayObject*``) instead of a pointer to a buffer. In this - case, the helper routines in ``numpy.i`` can be very useful. - - * Writing typemaps can be a bit nonintuitive. If you have specific - questions about writing `SWIG`_ typemaps for `NumPy`_, the - developers of ``numpy.i`` do monitor the - `Numpy-discussion `_ and - `Swig-user `_ mail lists. - -A Final Note ------------- - -When you use the ``%apply`` directive, as is usually necessary to use -``numpy.i``, it will remain in effect until you tell `SWIG`_ that it -shouldn't be. If the arguments to the functions or methods that you -are wrapping have common names, such as ``length`` or ``vector``, -these typemaps may get applied in situations you do not expect or -want. Therefore, it is always a good idea to add a ``%clear`` -directive after you are done with a specific typemap:: - - %apply (double* IN_ARRAY1, int DIM1) {(double* vector, int length)} - %include "my_header.h" - %clear (double* vector, int length); - -In general, you should target these typemap signatures specifically -where you want them, and then clear them after you are done. - -Summary -======= - -Out of the box, ``numpy.i`` provides typemaps that support conversion -between `NumPy`_ arrays and C arrays: - - * That can be one of 12 different scalar types: ``signed char``, - ``unsigned char``, ``short``, ``unsigned short``, ``int``, - ``unsigned int``, ``long``, ``unsigned long``, ``long long``, - ``unsigned long long``, ``float`` and ``double``. - - * That support 41 different argument signatures for each data type, - including: - - + One-dimensional, two-dimensional and three-dimensional arrays. - - + Input-only, in-place, argout and argoutview behavior. - - + Hard-coded dimensions, data-buffer-then-dimensions - specification, and dimensions-then-data-buffer specification. - - + Both C-ordering ("last dimension fastest") or FORTRAN-ordering - ("first dimension fastest") support for 2D and 3D arrays. - -The ``numpy.i`` interface file also provides additional tools for -wrapper developers, including: - - * A `SWIG`_ macro (``%numpy_typemaps``) with three arguments for - implementing the 41 argument signatures for the user's choice of - (1) C data type, (2) `NumPy`_ data type (assuming they match), and - (3) dimension type. - - * Nine C macros and 13 C functions that can be used to write - specialized typemaps, extensions, or inlined functions that handle - cases not covered by the provided typemaps. - -Acknowledgements -================ - -Many people have worked to glue `SWIG`_ and `NumPy`_ together (as well -as `SWIG`_ and the predecessors of `NumPy`_, Numeric and numarray). -The effort to standardize this work into ``numpy.i`` began at the 2005 -`SciPy `_ Conference with a conversation between -Fernando Perez and myself. Fernando collected helper functions and -typemaps from Eric Jones, Michael Hunter, Anna Omelchenko and Michael -Sanner. Sebastian Hasse and Georg Holzmann have also provided -additional error checking and use cases. The work of these -contributors has made this end result possible. diff --git a/numpy/doc/swig/doc/testing.html b/numpy/doc/swig/doc/testing.html deleted file mode 100644 index 3622550df..000000000 --- a/numpy/doc/swig/doc/testing.html +++ /dev/null @@ -1,482 +0,0 @@ - - - - - -Testing the numpy.i Typemaps - - - - - -
-

Testing the numpy.i Typemaps

- --- - - - - - - - -
Author:Bill Spotz
Institution:Sandia National Laboratories
Date:6 April, 2007
- -
-

Introduction

-

Writing tests for the numpy.i SWIG -interface file is a combinatorial headache. At present, 12 different -data types are supported, each with 23 different argument signatures, -for a total of 276 typemaps supported "out of the box". Each of these -typemaps, in turn, might require several unit tests in order to verify -expected behavior for both proper and improper inputs. Currently, -this results in 1,020 individual unit tests that are performed when -make test is run in the numpy/docs/swig subdirectory.

-

To facilitate this many similar unit tests, some high-level -programming techniques are employed, including C and SWIG macros, -as well as python inheritance. The -purpose of this document is to describe the testing infrastructure -employed to verify that the numpy.i typemaps are working as -expected.

-
-
-

Testing Organization

-

There are three indepedent testing frameworks supported, for one-, -two-, and three-dimensional arrays respectively. For one-dimensional -arrays, there are two C++ files, a header and a source, named:

-
-Vector.h
-Vector.cxx
-
-

that contain prototypes and code for a variety of functions that have -one-dimensional arrays as function arguments. The file:

-
-Vector.i
-
-

is a SWIG interface file that defines a python module Vector -that wraps the functions in Vector.h while utilizing the typemaps -in numpy.i to correctly handle the C arrays.

-

The Makefile calls swig to generate Vector.py and -Vector_wrap.cxx, and also executes the setup.py script that -compiles Vector_wrap.cxx and links together the extension module -_Vector.so or _Vector.dylib, depending on the platform. This -extension module and the proxy file Vector.py are both placed in a -subdirectory under the build directory.

-

The actual testing takes place with a python script named:

-
-testVector.py
-
-

that uses the standard python library module unittest, which -performs several tests of each function defined in Vector.h for -each data type supported.

-

Two-dimensional arrays are tested in exactly the same manner. The -above description applies, but with Matrix substituted for -Vector. For three-dimensional tests, substitute Tensor for -Vector. For the descriptions that follow, we will reference the -Vector tests, but the same information applies to Matrix and -Tensor tests.

-

The command make test will ensure that all of the test software is -built and then run all three test scripts.

-
-
-

Testing Header Files

-

Vector.h is a C++ header file that defines a C macro called -TEST_FUNC_PROTOS that takes two arguments: TYPE, which is a -data type name such as unsigned int; and SNAME, which is a -short name for the same data type with no spaces, e.g. uint. This -macro defines several function prototypes that have the prefix -SNAME and have at least one argument that is an array of type -TYPE. Those functions that have return arguments return a -TYPE value.

-

TEST_FUNC_PROTOS is then implemented for all of the data types -supported by numpy.i:

-
-
    -
  • signed char
  • -
  • unsigned char
  • -
  • short
  • -
  • unsigned short
  • -
  • int
  • -
  • unsigned int
  • -
  • long
  • -
  • unsigned long
  • -
  • long long
  • -
  • unsigned long long
  • -
  • float
  • -
  • double
  • -
-
-
-
-

Testing Source Files

-

Vector.cxx is a C++ source file that implements compilable code -for each of the function prototypes specified in Vector.h. It -defines a C macro TEST_FUNCS that has the same arguments and works -in the same way as TEST_FUNC_PROTOS does in Vector.h. -TEST_FUNCS is implemented for each of the 12 data types as above.

-
-
-

Testing SWIG Interface Files

-

Vector.i is a SWIG interface file that defines python module -Vector. It follows the conventions for using numpy.i as -described in the numpy.i documentation. It -defines a SWIG macro %apply_numpy_typemaps that has a single -argument TYPE. It uses the SWIG directive %apply as -described in the numpy.i documentation to apply the provided -typemaps to the argument signatures found in Vector.h. This macro -is then implemented for all of the data types supported by -numpy.i. It then does a %include "Vector.h" to wrap all of -the function prototypes in Vector.h using the typemaps in -numpy.i.

-
-
-

Testing Python Scripts

-

After make is used to build the testing extension modules, -testVector.py can be run to execute the tests. As with other -scripts that use unittest to facilitate unit testing, -testVector.py defines a class that inherits from -unittest.TestCase:

-
-class VectorTestCase(unittest.TestCase):
-
-

However, this class is not run directly. Rather, it serves as a base -class to several other python classes, each one specific to a -particular data type. The VectorTestCase class stores two strings -for typing information:

-
-
-
self.typeStr
-
A string that matches one of the SNAME prefixes used in -Vector.h and Vector.cxx. For example, "double".
-
self.typeCode
-
A short (typically single-character) string that represents a -data type in numpy and corresponds to self.typeStr. For -example, if self.typeStr is "double", then -self.typeCode should be "d".
-
-
-

Each test defined by the VectorTestCase class extracts the python -function it is trying to test by accessing the Vector module's -dictionary:

-
-length = Vector.__dict__[self.typeStr + "Length"]
-
-

In the case of double precision tests, this will return the python -function Vector.doubleLength.

-

We then define a new test case class for each supported data type with -a short definition such as:

-
-class doubleTestCase(VectorTestCase):
-    def __init__(self, methodName="runTest"):
-        VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "double"
-        self.typeCode = "d"
-
-

Each of these 12 classes is collected into a unittest.TestSuite, -which is then executed. Errors and failures are summed together and -returned as the exit argument. Any non-zero result indicates that at -least one test did not pass.

-
-
- - - diff --git a/numpy/doc/swig/doc/testing.pdf b/numpy/doc/swig/doc/testing.pdf deleted file mode 100644 index 9ffcf7575..000000000 Binary files a/numpy/doc/swig/doc/testing.pdf and /dev/null differ diff --git a/numpy/doc/swig/doc/testing.txt b/numpy/doc/swig/doc/testing.txt deleted file mode 100644 index bfd5218e8..000000000 --- a/numpy/doc/swig/doc/testing.txt +++ /dev/null @@ -1,173 +0,0 @@ -============================ -Testing the numpy.i Typemaps -============================ - -:Author: Bill Spotz -:Institution: Sandia National Laboratories -:Date: 6 April, 2007 - -.. contents:: - -Introduction -============ - -Writing tests for the ``numpy.i`` `SWIG `_ -interface file is a combinatorial headache. At present, 12 different -data types are supported, each with 23 different argument signatures, -for a total of 276 typemaps supported "out of the box". Each of these -typemaps, in turn, might require several unit tests in order to verify -expected behavior for both proper and improper inputs. Currently, -this results in 1,020 individual unit tests that are performed when -``make test`` is run in the ``numpy/docs/swig`` subdirectory. - -To facilitate this many similar unit tests, some high-level -programming techniques are employed, including C and `SWIG`_ macros, -as well as `python `_ inheritance. The -purpose of this document is to describe the testing infrastructure -employed to verify that the ``numpy.i`` typemaps are working as -expected. - -Testing Organization -==================== - -There are three indepedent testing frameworks supported, for one-, -two-, and three-dimensional arrays respectively. For one-dimensional -arrays, there are two C++ files, a header and a source, named:: - - Vector.h - Vector.cxx - -that contain prototypes and code for a variety of functions that have -one-dimensional arrays as function arguments. The file:: - - Vector.i - -is a `SWIG`_ interface file that defines a python module ``Vector`` -that wraps the functions in ``Vector.h`` while utilizing the typemaps -in ``numpy.i`` to correctly handle the C arrays. - -The ``Makefile`` calls ``swig`` to generate ``Vector.py`` and -``Vector_wrap.cxx``, and also executes the ``setup.py`` script that -compiles ``Vector_wrap.cxx`` and links together the extension module -``_Vector.so`` or ``_Vector.dylib``, depending on the platform. This -extension module and the proxy file ``Vector.py`` are both placed in a -subdirectory under the ``build`` directory. - -The actual testing takes place with a `python`_ script named:: - - testVector.py - -that uses the standard `python`_ library module ``unittest``, which -performs several tests of each function defined in ``Vector.h`` for -each data type supported. - -Two-dimensional arrays are tested in exactly the same manner. The -above description applies, but with ``Matrix`` substituted for -``Vector``. For three-dimensional tests, substitute ``Tensor`` for -``Vector``. For the descriptions that follow, we will reference the -``Vector`` tests, but the same information applies to ``Matrix`` and -``Tensor`` tests. - -The command ``make test`` will ensure that all of the test software is -built and then run all three test scripts. - -Testing Header Files -==================== - -``Vector.h`` is a C++ header file that defines a C macro called -``TEST_FUNC_PROTOS`` that takes two arguments: ``TYPE``, which is a -data type name such as ``unsigned int``; and ``SNAME``, which is a -short name for the same data type with no spaces, e.g. ``uint``. This -macro defines several function prototypes that have the prefix -``SNAME`` and have at least one argument that is an array of type -``TYPE``. Those functions that have return arguments return a -``TYPE`` value. - -``TEST_FUNC_PROTOS`` is then implemented for all of the data types -supported by ``numpy.i``: - - * ``signed char`` - * ``unsigned char`` - * ``short`` - * ``unsigned short`` - * ``int`` - * ``unsigned int`` - * ``long`` - * ``unsigned long`` - * ``long long`` - * ``unsigned long long`` - * ``float`` - * ``double`` - -Testing Source Files -==================== - -``Vector.cxx`` is a C++ source file that implements compilable code -for each of the function prototypes specified in ``Vector.h``. It -defines a C macro ``TEST_FUNCS`` that has the same arguments and works -in the same way as ``TEST_FUNC_PROTOS`` does in ``Vector.h``. -``TEST_FUNCS`` is implemented for each of the 12 data types as above. - -Testing SWIG Interface Files -============================ - -``Vector.i`` is a `SWIG`_ interface file that defines python module -``Vector``. It follows the conventions for using ``numpy.i`` as -described in the `numpy.i documentation `_. It -defines a `SWIG`_ macro ``%apply_numpy_typemaps`` that has a single -argument ``TYPE``. It uses the `SWIG`_ directive ``%apply`` as -described in the `numpy.i documentation`_ to apply the provided -typemaps to the argument signatures found in ``Vector.h``. This macro -is then implemented for all of the data types supported by -``numpy.i``. It then does a ``%include "Vector.h"`` to wrap all of -the function prototypes in ``Vector.h`` using the typemaps in -``numpy.i``. - -Testing Python Scripts -====================== - -After ``make`` is used to build the testing extension modules, -``testVector.py`` can be run to execute the tests. As with other -scripts that use ``unittest`` to facilitate unit testing, -``testVector.py`` defines a class that inherits from -``unittest.TestCase``:: - - class VectorTestCase(unittest.TestCase): - -However, this class is not run directly. Rather, it serves as a base -class to several other python classes, each one specific to a -particular data type. The ``VectorTestCase`` class stores two strings -for typing information: - - **self.typeStr** - A string that matches one of the ``SNAME`` prefixes used in - ``Vector.h`` and ``Vector.cxx``. For example, ``"double"``. - - **self.typeCode** - A short (typically single-character) string that represents a - data type in numpy and corresponds to ``self.typeStr``. For - example, if ``self.typeStr`` is ``"double"``, then - ``self.typeCode`` should be ``"d"``. - -Each test defined by the ``VectorTestCase`` class extracts the python -function it is trying to test by accessing the ``Vector`` module's -dictionary:: - - length = Vector.__dict__[self.typeStr + "Length"] - -In the case of double precision tests, this will return the python -function ``Vector.doubleLength``. - -We then define a new test case class for each supported data type with -a short definition such as:: - - class doubleTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -Each of these 12 classes is collected into a ``unittest.TestSuite``, -which is then executed. Errors and failures are summed together and -returned as the exit argument. Any non-zero result indicates that at -least one test did not pass. diff --git a/numpy/doc/swig/numpy.i b/numpy/doc/swig/numpy.i deleted file mode 100644 index cb1ae6338..000000000 --- a/numpy/doc/swig/numpy.i +++ /dev/null @@ -1,1581 +0,0 @@ -/* -*- C -*- (not really, but good for syntax highlighting) */ -#ifdef SWIGPYTHON - -%{ -#ifndef SWIG_FILE_WITH_INIT -# define NO_IMPORT_ARRAY -#endif -#include "stdio.h" -#include -%} - -/**********************************************************************/ - -%fragment("NumPy_Backward_Compatibility", "header") -{ -/* Support older NumPy data type names -*/ -%#if NDARRAY_VERSION < 0x01000000 -%#define NPY_BOOL PyArray_BOOL -%#define NPY_BYTE PyArray_BYTE -%#define NPY_UBYTE PyArray_UBYTE -%#define NPY_SHORT PyArray_SHORT -%#define NPY_USHORT PyArray_USHORT -%#define NPY_INT PyArray_INT -%#define NPY_UINT PyArray_UINT -%#define NPY_LONG PyArray_LONG -%#define NPY_ULONG PyArray_ULONG -%#define NPY_LONGLONG PyArray_LONGLONG -%#define NPY_ULONGLONG PyArray_ULONGLONG -%#define NPY_FLOAT PyArray_FLOAT -%#define NPY_DOUBLE PyArray_DOUBLE -%#define NPY_LONGDOUBLE PyArray_LONGDOUBLE -%#define NPY_CFLOAT PyArray_CFLOAT -%#define NPY_CDOUBLE PyArray_CDOUBLE -%#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE -%#define NPY_OBJECT PyArray_OBJECT -%#define NPY_STRING PyArray_STRING -%#define NPY_UNICODE PyArray_UNICODE -%#define NPY_VOID PyArray_VOID -%#define NPY_NTYPES PyArray_NTYPES -%#define NPY_NOTYPE PyArray_NOTYPE -%#define NPY_CHAR PyArray_CHAR -%#define NPY_USERDEF PyArray_USERDEF -%#define npy_intp intp - -%#define NPY_MAX_BYTE MAX_BYTE -%#define NPY_MIN_BYTE MIN_BYTE -%#define NPY_MAX_UBYTE MAX_UBYTE -%#define NPY_MAX_SHORT MAX_SHORT -%#define NPY_MIN_SHORT MIN_SHORT -%#define NPY_MAX_USHORT MAX_USHORT -%#define NPY_MAX_INT MAX_INT -%#define NPY_MIN_INT MIN_INT -%#define NPY_MAX_UINT MAX_UINT -%#define NPY_MAX_LONG MAX_LONG -%#define NPY_MIN_LONG MIN_LONG -%#define NPY_MAX_ULONG MAX_ULONG -%#define NPY_MAX_LONGLONG MAX_LONGLONG -%#define NPY_MIN_LONGLONG MIN_LONGLONG -%#define NPY_MAX_ULONGLONG MAX_ULONGLONG -%#define NPY_MAX_INTP MAX_INTP -%#define NPY_MIN_INTP MIN_INTP - -%#define NPY_FARRAY FARRAY -%#define NPY_F_CONTIGUOUS F_CONTIGUOUS -%#endif -} - -/**********************************************************************/ - -/* The following code originally appeared in - * enthought/kiva/agg/src/numeric.i written by Eric Jones. It was - * translated from C++ to C by John Hunter. Bill Spotz has modified - * it to fix some minor bugs, upgrade from Numeric to numpy (all - * versions), add some comments and functionality, and convert from - * direct code insertion to SWIG fragments. - */ - -%fragment("NumPy_Macros", "header") -{ -/* Macros to extract array attributes. - */ -%#define is_array(a) ((a) && PyArray_Check((PyArrayObject *)a)) -%#define array_type(a) (int)(PyArray_TYPE(a)) -%#define array_numdims(a) (((PyArrayObject *)a)->nd) -%#define array_dimensions(a) (((PyArrayObject *)a)->dimensions) -%#define array_size(a,i) (((PyArrayObject *)a)->dimensions[i]) -%#define array_data(a) (((PyArrayObject *)a)->data) -%#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS(a)) -%#define array_is_native(a) (PyArray_ISNOTSWAPPED(a)) -%#define array_is_fortran(a) (PyArray_ISFORTRAN(a)) -} - -/**********************************************************************/ - -%fragment("NumPy_Utilities", "header") -{ - /* Given a PyObject, return a string describing its type. - */ - char* pytype_string(PyObject* py_obj) { - if (py_obj == NULL ) return "C NULL value"; - if (py_obj == Py_None ) return "Python None" ; - if (PyCallable_Check(py_obj)) return "callable" ; - if (PyString_Check( py_obj)) return "string" ; - if (PyInt_Check( py_obj)) return "int" ; - if (PyFloat_Check( py_obj)) return "float" ; - if (PyDict_Check( py_obj)) return "dict" ; - if (PyList_Check( py_obj)) return "list" ; - if (PyTuple_Check( py_obj)) return "tuple" ; - if (PyFile_Check( py_obj)) return "file" ; - if (PyModule_Check( py_obj)) return "module" ; - if (PyInstance_Check(py_obj)) return "instance" ; - - return "unkown type"; - } - - /* Given a NumPy typecode, return a string describing the type. - */ - char* typecode_string(int typecode) { - static char* type_names[25] = {"bool", "byte", "unsigned byte", - "short", "unsigned short", "int", - "unsigned int", "long", "unsigned long", - "long long", "unsigned long long", - "float", "double", "long double", - "complex float", "complex double", - "complex long double", "object", - "string", "unicode", "void", "ntypes", - "notype", "char", "unknown"}; - return typecode < 24 ? type_names[typecode] : type_names[24]; - } - - /* Make sure input has correct numpy type. Allow character and byte - * to match. Also allow int and long to match. This is deprecated. - * You should use PyArray_EquivTypenums() instead. - */ - int type_match(int actual_type, int desired_type) { - return PyArray_EquivTypenums(actual_type, desired_type); - } -} - -/**********************************************************************/ - -%fragment("NumPy_Object_to_Array", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros", - fragment="NumPy_Utilities") -{ - /* Given a PyObject pointer, cast it to a PyArrayObject pointer if - * legal. If not, set the python error string appropriately and - * return NULL. - */ - PyArrayObject* obj_to_array_no_conversion(PyObject* input, int typecode) - { - PyArrayObject* ary = NULL; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input), typecode))) - { - ary = (PyArrayObject*) input; - } - else if is_array(input) - { - char* desired_type = typecode_string(typecode); - char* actual_type = typecode_string(array_type(input)); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. Array of type '%s' given", - desired_type, actual_type); - ary = NULL; - } - else - { - char * desired_type = typecode_string(typecode); - char * actual_type = pytype_string(input); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. A '%s' was given", - desired_type, actual_type); - ary = NULL; - } - return ary; - } - - /* Convert the given PyObject to a NumPy array with the given - * typecode. On success, return a valid PyArrayObject* with the - * correct type. On failure, the python error string will be set and - * the routine returns NULL. - */ - PyArrayObject* obj_to_array_allow_conversion(PyObject* input, int typecode, - int* is_new_object) - { - PyArrayObject* ary = NULL; - PyObject* py_obj; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input),typecode))) - { - ary = (PyArrayObject*) input; - *is_new_object = 0; - } - else - { - py_obj = PyArray_FromObject(input, typecode, 0, 0); - /* If NULL, PyArray_FromObject will have set python error value.*/ - ary = (PyArrayObject*) py_obj; - *is_new_object = 1; - } - return ary; - } - - /* Given a PyArrayObject, check to see if it is contiguous. If so, - * return the input pointer and flag it as not a new object. If it is - * not contiguous, create a new PyArrayObject using the original data, - * flag it as a new object and return the pointer. - */ - PyArrayObject* make_contiguous(PyArrayObject* ary, int* is_new_object, - int min_dims, int max_dims) - { - PyArrayObject* result; - if (array_is_contiguous(ary)) - { - result = ary; - *is_new_object = 0; - } - else - { - result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, - array_type(ary), - min_dims, - max_dims); - *is_new_object = 1; - } - return result; - } - - /* Convert a given PyObject to a contiguous PyArrayObject of the - * specified type. If the input object is not a contiguous - * PyArrayObject, a new one will be created and the new object flag - * will be set. - */ - PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input, - int typecode, - int* is_new_object) - { - int is_new1 = 0; - int is_new2 = 0; - PyArrayObject* ary2; - PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, - &is_new1); - if (ary1) - { - ary2 = make_contiguous(ary1, &is_new2, 0, 0); - if ( is_new1 && is_new2) - { - Py_DECREF(ary1); - } - ary1 = ary2; - } - *is_new_object = is_new1 || is_new2; - return ary1; - } -} - -/**********************************************************************/ - -%fragment("NumPy_Array_Requirements", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros") -{ - /* Test whether a python object is contiguous. If array is - * contiguous, return 1. Otherwise, set the python error string and - * return 0. - */ - int require_contiguous(PyArrayObject* ary) - { - int contiguous = 1; - if (!array_is_contiguous(ary)) - { - PyErr_SetString(PyExc_TypeError, - "Array must be contiguous. A non-contiguous array was given"); - contiguous = 0; - } - return contiguous; - } - - /* Require that a numpy array is not byte-swapped. If the array is - * not byte-swapped, return 1. Otherwise, set the python error string - * and return 0. - */ - int require_native(PyArrayObject* ary) - { - int native = 1; - if (!array_is_native(ary)) - { - PyErr_SetString(PyExc_TypeError, - "Array must have native byteorder. " - "A byte-swapped array was given"); - native = 0; - } - return native; - } - - /* Require the given PyArrayObject to have a specified number of - * dimensions. If the array has the specified number of dimensions, - * return 1. Otherwise, set the python error string and return 0. - */ - int require_dimensions(PyArrayObject* ary, int exact_dimensions) - { - int success = 1; - if (array_numdims(ary) != exact_dimensions) - { - PyErr_Format(PyExc_TypeError, - "Array must have %d dimensions. Given array has %d dimensions", - exact_dimensions, array_numdims(ary)); - success = 0; - } - return success; - } - - /* Require the given PyArrayObject to have one of a list of specified - * number of dimensions. If the array has one of the specified number - * of dimensions, return 1. Otherwise, set the python error string - * and return 0. - */ - int require_dimensions_n(PyArrayObject* ary, int* exact_dimensions, int n) - { - int success = 0; - int i; - char dims_str[255] = ""; - char s[255]; - for (i = 0; i < n && !success; i++) - { - if (array_numdims(ary) == exact_dimensions[i]) - { - success = 1; - } - } - if (!success) - { - for (i = 0; i < n-1; i++) - { - sprintf(s, "%d, ", exact_dimensions[i]); - strcat(dims_str,s); - } - sprintf(s, " or %d", exact_dimensions[n-1]); - strcat(dims_str,s); - PyErr_Format(PyExc_TypeError, - "Array must be have %s dimensions. Given array has %d dimensions", - dims_str, array_numdims(ary)); - } - return success; - } - - /* Require the given PyArrayObject to have a specified shape. If the - * array has the specified shape, return 1. Otherwise, set the python - * error string and return 0. - */ - int require_size(PyArrayObject* ary, npy_intp* size, int n) - { - int i; - int success = 1; - int len; - char desired_dims[255] = "["; - char s[255]; - char actual_dims[255] = "["; - for(i=0; i < n;i++) - { - if (size[i] != -1 && size[i] != array_size(ary,i)) - { - success = 0; - } - } - if (!success) - { - for (i = 0; i < n; i++) - { - if (size[i] == -1) - { - sprintf(s, "*,"); - } - else - { - sprintf(s, "%ld,", (long int)size[i]); - } - strcat(desired_dims,s); - } - len = strlen(desired_dims); - desired_dims[len-1] = ']'; - for (i = 0; i < n; i++) - { - sprintf(s, "%ld,", (long int)array_size(ary,i)); - strcat(actual_dims,s); - } - len = strlen(actual_dims); - actual_dims[len-1] = ']'; - PyErr_Format(PyExc_TypeError, - "Array must be have shape of %s. Given array has shape of %s", - desired_dims, actual_dims); - } - return success; - } - - /* Require the given PyArrayObject to to be FORTRAN ordered. If the - * the PyArrayObject is already FORTRAN ordered, do nothing. Else, - * set the FORTRAN ordering flag and recompute the strides. - */ - int require_fortran(PyArrayObject* ary) - { - int success = 1; - int nd = array_numdims(ary); - int i; - if (array_is_fortran(ary)) return success; - /* Set the FORTRAN ordered flag */ - ary->flags = NPY_FARRAY; - /* Recompute the strides */ - ary->strides[0] = ary->strides[nd-1]; - for (i=1; i < nd; ++i) - ary->strides[i] = ary->strides[i-1] * array_size(ary,i-1); - return success; - } -} - -/* Combine all NumPy fragments into one for convenience */ -%fragment("NumPy_Fragments", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros", - fragment="NumPy_Utilities", - fragment="NumPy_Object_to_Array", - fragment="NumPy_Array_Requirements") { } - -/* End John Hunter translation (with modifications by Bill Spotz) - */ - -/* %numpy_typemaps() macro - * - * This macro defines a family of 41 typemaps that allow C arguments - * of the form - * - * (DATA_TYPE IN_ARRAY1[ANY]) - * (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) - * (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) - * - * (DATA_TYPE IN_ARRAY2[ANY][ANY]) - * (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) - * (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) - * - * (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) - * (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) - * (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) - * - * (DATA_TYPE INPLACE_ARRAY1[ANY]) - * (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) - * (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) - * - * (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) - * (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) - * (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) - * - * (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) - * (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) - * (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) - * - * (DATA_TYPE ARGOUT_ARRAY1[ANY]) - * (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) - * (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) - * - * (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) - * - * (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) - * - * (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) - * (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) - * - * (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) - * (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) - * - * (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) - * (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) - * - * where "DATA_TYPE" is any type supported by the NumPy module, and - * "DIM_TYPE" is any int-like type suitable for specifying dimensions. - * The difference between "ARRAY" typemaps and "FARRAY" typemaps is - * that the "FARRAY" typemaps expect FORTRAN ordering of - * multidimensional arrays. In python, the dimensions will not need - * to be specified (except for the "DATA_TYPE* ARGOUT_ARRAY1" - * typemaps). The IN_ARRAYs can be a numpy array or any sequence that - * can be converted to a numpy array of the specified type. The - * INPLACE_ARRAYs must be numpy arrays of the appropriate type. The - * ARGOUT_ARRAYs will be returned as new numpy arrays of the - * appropriate type. - * - * These typemaps can be applied to existing functions using the - * %apply directive. For example: - * - * %apply (double* IN_ARRAY1, int DIM1) {(double* series, int length)}; - * double prod(double* series, int length); - * - * %apply (int DIM1, int DIM2, double* INPLACE_ARRAY2) - * {(int rows, int cols, double* matrix )}; - * void floor(int rows, int cols, double* matrix, double f); - * - * %apply (double IN_ARRAY3[ANY][ANY][ANY]) - * {(double tensor[2][2][2] )}; - * %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY]) - * {(double low[2][2][2] )}; - * %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY]) - * {(double upp[2][2][2] )}; - * void luSplit(double tensor[2][2][2], - * double low[2][2][2], - * double upp[2][2][2] ); - * - * or directly with - * - * double prod(double* IN_ARRAY1, int DIM1); - * - * void floor(int DIM1, int DIM2, double* INPLACE_ARRAY2, double f); - * - * void luSplit(double IN_ARRAY3[ANY][ANY][ANY], - * double ARGOUT_ARRAY3[ANY][ANY][ANY], - * double ARGOUT_ARRAY3[ANY][ANY][ANY]); - */ - -%define %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE) - -/************************/ -/* Input Array Typemaps */ -/************************/ - -/* Typemap suite for (DATA_TYPE IN_ARRAY1[ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE IN_ARRAY1[ANY]) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE IN_ARRAY1[ANY]) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[1] = { $1_dim0 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 1) || - !require_size(array, size, 1)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(freearg) - (DATA_TYPE IN_ARRAY1[ANY]) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[1] = { -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 1) || - !require_size(array, size, 1)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); -} -%typemap(freearg) - (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[1] = {-1}; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 1) || - !require_size(array, size, 1)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE IN_ARRAY2[ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE IN_ARRAY2[ANY][ANY]) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE IN_ARRAY2[ANY][ANY]) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { $1_dim0, $1_dim1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(freearg) - (DATA_TYPE IN_ARRAY2[ANY][ANY]) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} -%typemap(freearg) - (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} -%typemap(freearg) - (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(freearg) - (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} -%typemap(freearg) - (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* IN_ARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3) | !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} -%typemap(freearg) - (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* IN_FARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/***************************/ -/* In-Place Array Typemaps */ -/***************************/ - -/* Typemap suite for (DATA_TYPE INPLACE_ARRAY1[ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE INPLACE_ARRAY1[ANY]) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE INPLACE_ARRAY1[ANY]) - (PyArrayObject* array=NULL) -{ - npy_intp size[1] = { $1_dim0 }; - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,1) || !require_size(array, size, 1) || - !require_contiguous(array) || !require_native(array)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) - (PyArrayObject* array=NULL, int i=1) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,1) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = 1; - for (i=0; i < array_numdims(array); ++i) $2 *= array_size(array,i); -} - -/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) - (PyArrayObject* array=NULL, int i=0) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,1) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = 1; - for (i=0; i < array_numdims(array); ++i) $1 *= array_size(array,i); - $2 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) - (PyArrayObject* array=NULL) -{ - npy_intp size[2] = { $1_dim0, $1_dim1 }; - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_size(array, size, 2) || - !require_contiguous(array) || !require_native(array)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) || - !require_native(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) - || !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) || - !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) - (PyArrayObject* array=NULL) -{ - npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 }; - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_size(array, size, 3) || - !require_contiguous(array) || !require_native(array)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) || - !require_native(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* INPLACE_ARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) || - !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* INPLACE_FARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) - || !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} - -/*************************/ -/* Argout Array Typemaps */ -/*************************/ - -/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY1[ANY]) - */ -%typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") - (DATA_TYPE ARGOUT_ARRAY1[ANY]) - (PyObject * array = NULL) -{ - npy_intp dims[1] = { $1_dim0 }; - array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(argout) - (DATA_TYPE ARGOUT_ARRAY1[ANY]) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) - */ -%typemap(in,numinputs=1, - fragment="NumPy_Fragments") - (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) - (PyObject * array = NULL) -{ - npy_intp dims[1]; - if (!PyInt_Check($input)) - { - char* typestring = pytype_string($input); - PyErr_Format(PyExc_TypeError, - "Int dimension expected. '%s' given.", - typestring); - SWIG_fail; - } - $2 = (DIM_TYPE) PyInt_AsLong($input); - dims[0] = (npy_intp) $2; - array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); -} -%typemap(argout) - (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) - */ -%typemap(in,numinputs=1, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) - (PyObject * array = NULL) -{ - npy_intp dims[1]; - if (!PyInt_Check($input)) - { - char* typestring = pytype_string($input); - PyErr_Format(PyExc_TypeError, - "Int dimension expected. '%s' given.", - typestring); - SWIG_fail; - } - $1 = (DIM_TYPE) PyInt_AsLong($input); - dims[0] = (npy_intp) $1; - array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $2 = (DATA_TYPE*) array_data(array); -} -%typemap(argout) - (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) - */ -%typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") - (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) - (PyObject * array = NULL) -{ - npy_intp dims[2] = { $1_dim0, $1_dim1 }; - array = PyArray_SimpleNew(2, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(argout) - (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) - */ -%typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") - (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) - (PyObject * array = NULL) -{ - npy_intp dims[3] = { $1_dim0, $1_dim1, $1_dim2 }; - array = PyArray_SimpleNew(3, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(argout) - (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/*****************************/ -/* Argoutview Array Typemaps */ -/*****************************/ - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 ) - (DATA_TYPE* data_temp , DIM_TYPE dim_temp) -{ - $1 = &data_temp; - $2 = &dim_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) -{ - npy_intp dims[1] = { *$2 }; - PyObject * array = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$1)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DATA_TYPE** ARGOUTVIEW_ARRAY1) - (DIM_TYPE dim_temp, DATA_TYPE* data_temp ) -{ - $1 = &dim_temp; - $2 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) -{ - npy_intp dims[1] = { *$1 }; - PyObject * array = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$2)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 ) - (DATA_TYPE* data_temp , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) -{ - npy_intp dims[2] = { *$2, *$3 }; - PyObject * array = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEW_ARRAY2) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp ) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) -{ - npy_intp dims[2] = { *$1, *$2 }; - PyObject * array = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 ) - (DATA_TYPE* data_temp , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) -{ - npy_intp dims[2] = { *$2, *$3 }; - PyObject * obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEW_FARRAY2) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp ) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) -{ - npy_intp dims[2] = { *$1, *$2 }; - PyObject * obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, - DIM_TYPE* DIM3) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - (DATA_TYPE* data_temp, DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; - $4 = &dim3_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) -{ - npy_intp dims[3] = { *$2, *$3, *$4 }; - PyObject * array = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, - DATA_TYPE** ARGOUTVIEW_ARRAY3) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &dim3_temp; - $4 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) -{ - npy_intp dims[3] = { *$1, *$2, *$3 }; - PyObject * array = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$3)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, - DIM_TYPE* DIM3) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - (DATA_TYPE* data_temp, DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; - $4 = &dim3_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) -{ - npy_intp dims[3] = { *$2, *$3, *$4 }; - PyObject * obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, - DATA_TYPE** ARGOUTVIEW_FARRAY3) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &dim3_temp; - $4 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) -{ - npy_intp dims[3] = { *$1, *$2, *$3 }; - PyObject * obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$3)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -%enddef /* %numpy_typemaps() macro */ -/* *************************************************************** */ - -/* Concrete instances of the %numpy_typemaps() macro: Each invocation - * below applies all of the typemaps above to the specified data type. - */ -%numpy_typemaps(signed char , NPY_BYTE , int) -%numpy_typemaps(unsigned char , NPY_UBYTE , int) -%numpy_typemaps(short , NPY_SHORT , int) -%numpy_typemaps(unsigned short , NPY_USHORT , int) -%numpy_typemaps(int , NPY_INT , int) -%numpy_typemaps(unsigned int , NPY_UINT , int) -%numpy_typemaps(long , NPY_LONG , int) -%numpy_typemaps(unsigned long , NPY_ULONG , int) -%numpy_typemaps(long long , NPY_LONGLONG , int) -%numpy_typemaps(unsigned long long, NPY_ULONGLONG, int) -%numpy_typemaps(float , NPY_FLOAT , int) -%numpy_typemaps(double , NPY_DOUBLE , int) - -/* *************************************************************** - * The follow macro expansion does not work, because C++ bool is 4 - * bytes and NPY_BOOL is 1 byte - * - * %numpy_typemaps(bool, NPY_BOOL, int) - */ - -/* *************************************************************** - * On my Mac, I get the following warning for this macro expansion: - * 'swig/python detected a memory leak of type 'long double *', no destructor found.' - * - * %numpy_typemaps(long double, NPY_LONGDOUBLE, int) - */ - -/* *************************************************************** - * Swig complains about a syntax error for the following macro - * expansions: - * - * %numpy_typemaps(complex float, NPY_CFLOAT , int) - * - * %numpy_typemaps(complex double, NPY_CDOUBLE, int) - * - * %numpy_typemaps(complex long double, NPY_CLONGDOUBLE, int) - */ - -#endif /* SWIGPYTHON */ diff --git a/numpy/doc/swig/pyfragments.swg b/numpy/doc/swig/pyfragments.swg deleted file mode 100644 index 3c9502ff5..000000000 --- a/numpy/doc/swig/pyfragments.swg +++ /dev/null @@ -1,174 +0,0 @@ -/*-*- C -*-*/ - -/**********************************************************************/ - -/* For numpy versions prior to 1.0, the names of certain data types - * are different than in later versions. This fragment provides macro - * substitutions that allow us to support old and new versions of - * numpy. - */ - -%fragment("NumPy_Backward_Compatibility", "header") -{ -/* Support older NumPy data type names - */ -%#if NDARRAY_VERSION < 0x01000000 -%#define NPY_BOOL PyArray_BOOL -%#define NPY_BYTE PyArray_BYTE -%#define NPY_UBYTE PyArray_UBYTE -%#define NPY_SHORT PyArray_SHORT -%#define NPY_USHORT PyArray_USHORT -%#define NPY_INT PyArray_INT -%#define NPY_UINT PyArray_UINT -%#define NPY_LONG PyArray_LONG -%#define NPY_ULONG PyArray_ULONG -%#define NPY_LONGLONG PyArray_LONGLONG -%#define NPY_ULONGLONG PyArray_ULONGLONG -%#define NPY_FLOAT PyArray_FLOAT -%#define NPY_DOUBLE PyArray_DOUBLE -%#define NPY_LONGDOUBLE PyArray_LONGDOUBLE -%#define NPY_CFLOAT PyArray_CFLOAT -%#define NPY_CDOUBLE PyArray_CDOUBLE -%#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE -%#define NPY_OBJECT PyArray_OBJECT -%#define NPY_STRING PyArray_STRING -%#define NPY_UNICODE PyArray_UNICODE -%#define NPY_VOID PyArray_VOID -%#define NPY_NTYPES PyArray_NTYPES -%#define NPY_NOTYPE PyArray_NOTYPE -%#define NPY_CHAR PyArray_CHAR -%#define NPY_USERDEF PyArray_USERDEF -%#define npy_intp intp - -%#define NPY_MAX_BYTE MAX_BYTE -%#define NPY_MIN_BYTE MIN_BYTE -%#define NPY_MAX_UBYTE MAX_UBYTE -%#define NPY_MAX_SHORT MAX_SHORT -%#define NPY_MIN_SHORT MIN_SHORT -%#define NPY_MAX_USHORT MAX_USHORT -%#define NPY_MAX_INT MAX_INT -%#define NPY_MIN_INT MIN_INT -%#define NPY_MAX_UINT MAX_UINT -%#define NPY_MAX_LONG MAX_LONG -%#define NPY_MIN_LONG MIN_LONG -%#define NPY_MAX_ULONG MAX_ULONG -%#define NPY_MAX_LONGLONG MAX_LONGLONG -%#define NPY_MIN_LONGLONG MIN_LONGLONG -%#define NPY_MAX_ULONGLONG MAX_ULONGLONG -%#define NPY_MAX_INTP MAX_INTP -%#define NPY_MIN_INTP MIN_INTP - -%#define NPY_FARRAY FARRAY -%#define NPY_F_CONTIGUOUS F_CONTIGUOUS -%#endif -} - -/**********************************************************************/ - -/* Override the SWIG_AsVal_frag(long) fragment so that it also checks - * for numpy scalar array types. The code through the %#endif is - * essentially cut-and-paste from pyprimtype.swg - */ - -%fragment(SWIG_AsVal_frag(long), "header", - fragment="SWIG_CanCastAsInteger", - fragment="NumPy_Backward_Compatibility") -{ - SWIGINTERN int - SWIG_AsVal_dec(long)(PyObject * obj, long * val) - { - static PyArray_Descr * longDescr = PyArray_DescrNewFromType(NPY_LONG); - if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -%#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - long v = PyInt_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - double d; - int res = SWIG_AddCast(SWIG_AsVal(double)(obj,&d)); - if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, LONG_MIN, LONG_MAX)) { - if (val) *val = (long)(d); - return res; - } - } - } -%#endif - if (!PyArray_IsScalar(obj,Integer)) return SWIG_TypeError; - PyArray_CastScalarToCtype(obj, (void*)val, longDescr); - return SWIG_OK; - } -} - - -/* Override the SWIG_AsVal_frag(unsigned long) fragment so that it - * also checks for numpy scalar array types. The code through the - * %#endif is essentially cut-and-paste from pyprimtype.swg - */ - -%fragment(SWIG_AsVal_frag(unsigned long),"header", - fragment="SWIG_CanCastAsInteger", - fragment="NumPy_Backward_Compatibility") -{ - SWIGINTERN int - SWIG_AsVal_dec(unsigned long)(PyObject *obj, unsigned long *val) - { - static PyArray_Descr * ulongDescr = PyArray_DescrNewFromType(NPY_ULONG); - if (PyInt_Check(obj)) { - long v = PyInt_AsLong(obj); - if (v >= 0) { - if (val) *val = v; - return SWIG_OK; - } else { - return SWIG_OverflowError; - } - } else if (PyLong_Check(obj)) { - unsigned long v = PyLong_AsUnsignedLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -%#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - unsigned long v = PyLong_AsUnsignedLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - double d; - int res = SWIG_AddCast(SWIG_AsVal(double)(obj,&d)); - if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, 0, ULONG_MAX)) { - if (val) *val = (unsigned long)(d); - return res; - } - } - } -%#endif - if (!PyArray_IsScalar(obj,Integer)) return SWIG_TypeError; - PyArray_CastScalarToCtype(obj, (void*)val, ulongDescr); - return SWIG_OK; - } -} diff --git a/numpy/doc/swig/test/Array.i b/numpy/doc/swig/test/Array.i deleted file mode 100644 index d56dd2d1c..000000000 --- a/numpy/doc/swig/test/Array.i +++ /dev/null @@ -1,107 +0,0 @@ -// -*- c++ -*- - -%module Array - -%{ -#define SWIG_FILE_WITH_INIT -#include "Array1.h" -#include "Array2.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - - // Get the STL typemaps -%include "stl.i" - -// Handle standard exceptions -%include "exception.i" -%exception -{ - try - { - $action - } - catch (const std::invalid_argument& e) - { - SWIG_exception(SWIG_ValueError, e.what()); - } - catch (const std::out_of_range& e) - { - SWIG_exception(SWIG_IndexError, e.what()); - } -} -%init %{ - import_array(); -%} - -// Global ignores -%ignore *::operator=; -%ignore *::operator[]; - -// Apply the 1D NumPy typemaps -%apply (int DIM1 , long* INPLACE_ARRAY1) - {(int length, long* data )}; -%apply (long** ARGOUTVIEW_ARRAY1, int* DIM1 ) - {(long** data , int* length)}; - -// Apply the 2D NumPy typemaps -%apply (int DIM1 , int DIM2 , long* INPLACE_ARRAY2) - {(int nrows, int ncols, long* data )}; -%apply (int* DIM1 , int* DIM2 , long** ARGOUTVIEW_ARRAY2) - {(int* nrows, int* ncols, long** data )}; -// Note: the %apply for INPLACE_ARRAY2 above gets successfully applied -// to the constructor Array2(int nrows, int ncols, long* data), but -// does not get applied to the method Array2::resize(int nrows, int -// ncols, long* data). I have no idea why. For this reason the test -// for Apply2.resize(numpy.ndarray) in testArray.py is commented out. - -// Array1 support -%include "Array1.h" -%extend Array1 -{ - void __setitem__(int i, long v) - { - self->operator[](i) = v; - } - - long __getitem__(int i) - { - return self->operator[](i); - } - - int __len__() - { - return self->length(); - } - - std::string __str__() - { - return self->asString(); - } -} - -// Array2 support -%include "Array2.h" -%extend Array2 -{ - void __setitem__(int i, Array1 & v) - { - self->operator[](i) = v; - } - - Array1 & __getitem__(int i) - { - return self->operator[](i); - } - - int __len__() - { - return self->nrows() * self->ncols(); - } - - std::string __str__() - { - return self->asString(); - } -} diff --git a/numpy/doc/swig/test/Array1.cxx b/numpy/doc/swig/test/Array1.cxx deleted file mode 100644 index 0c09e02f9..000000000 --- a/numpy/doc/swig/test/Array1.cxx +++ /dev/null @@ -1,131 +0,0 @@ -#include "Array1.h" -#include -#include - -// Default/length/array constructor -Array1::Array1(int length, long* data) : - _ownData(false), _length(0), _buffer(0) -{ - resize(length, data); -} - -// Copy constructor -Array1::Array1(const Array1 & source) : - _length(source._length) -{ - allocateMemory(); - *this = source; -} - -// Destructor -Array1::~Array1() -{ - deallocateMemory(); -} - -// Assignment operator -Array1 & Array1::operator=(const Array1 & source) -{ - int len = _length < source._length ? _length : source._length; - for (int i=0; i < len; ++i) - { - (*this)[i] = source[i]; - } - return *this; -} - -// Equals operator -bool Array1::operator==(const Array1 & other) const -{ - if (_length != other._length) return false; - for (int i=0; i < _length; ++i) - { - if ((*this)[i] != other[i]) return false; - } - return true; -} - -// Length accessor -int Array1::length() const -{ - return _length; -} - -// Resize array -void Array1::resize(int length, long* data) -{ - if (length < 0) throw std::invalid_argument("Array1 length less than 0"); - if (length == _length) return; - deallocateMemory(); - _length = length; - if (!data) - { - allocateMemory(); - } - else - { - _ownData = false; - _buffer = data; - } -} - -// Set item accessor -long & Array1::operator[](int i) -{ - if (i < 0 || i >= _length) throw std::out_of_range("Array1 index out of range"); - return _buffer[i]; -} - -// Get item accessor -const long & Array1::operator[](int i) const -{ - if (i < 0 || i >= _length) throw std::out_of_range("Array1 index out of range"); - return _buffer[i]; -} - -// String output -std::string Array1::asString() const -{ - std::stringstream result; - result << "["; - for (int i=0; i < _length; ++i) - { - result << " " << _buffer[i]; - if (i < _length-1) result << ","; - } - result << " ]"; - return result.str(); -} - -// Get view -void Array1::view(long** data, int* length) const -{ - *data = _buffer; - *length = _length; -} - -// Private methods - void Array1::allocateMemory() - { - if (_length == 0) - { - _ownData = false; - _buffer = 0; - } - else - { - _ownData = true; - _buffer = new long[_length]; - } - } - - void Array1::deallocateMemory() - { - if (_ownData && _length && _buffer) - { - delete [] _buffer; - } - _ownData = false; - _length = 0; - _buffer = 0; - } diff --git a/numpy/doc/swig/test/Array1.h b/numpy/doc/swig/test/Array1.h deleted file mode 100644 index 754c248fc..000000000 --- a/numpy/doc/swig/test/Array1.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef ARRAY1_H -#define ARRAY1_H - -#include -#include - -class Array1 -{ -public: - - // Default/length/array constructor - Array1(int length = 0, long* data = 0); - - // Copy constructor - Array1(const Array1 & source); - - // Destructor - ~Array1(); - - // Assignment operator - Array1 & operator=(const Array1 & source); - - // Equals operator - bool operator==(const Array1 & other) const; - - // Length accessor - int length() const; - - // Resize array - void resize(int length, long* data = 0); - - // Set item accessor - long & operator[](int i); - - // Get item accessor - const long & operator[](int i) const; - - // String output - std::string asString() const; - - // Get view - void view(long** data, int* length) const; - -private: - // Members - bool _ownData; - int _length; - long * _buffer; - - // Methods - void allocateMemory(); - void deallocateMemory(); -}; - -#endif diff --git a/numpy/doc/swig/test/Array2.cxx b/numpy/doc/swig/test/Array2.cxx deleted file mode 100644 index e3558f786..000000000 --- a/numpy/doc/swig/test/Array2.cxx +++ /dev/null @@ -1,168 +0,0 @@ -#include "Array2.h" -#include - -// Default constructor -Array2::Array2() : - _ownData(false), _nrows(0), _ncols(), _buffer(0), _rows(0) -{ } - -// Size/array constructor -Array2::Array2(int nrows, int ncols, long* data) : - _ownData(false), _nrows(0), _ncols(), _buffer(0), _rows(0) -{ - resize(nrows, ncols, data); -} - -// Copy constructor -Array2::Array2(const Array2 & source) : - _nrows(source._nrows), _ncols(source._ncols) -{ - _ownData = true; - allocateMemory(); - *this = source; -} - -// Destructor -Array2::~Array2() -{ - deallocateMemory(); -} - -// Assignment operator -Array2 & Array2::operator=(const Array2 & source) -{ - int nrows = _nrows < source._nrows ? _nrows : source._nrows; - int ncols = _ncols < source._ncols ? _ncols : source._ncols; - for (int i=0; i < nrows; ++i) - { - for (int j=0; j < ncols; ++j) - { - (*this)[i][j] = source[i][j]; - } - } - return *this; -} - -// Equals operator -bool Array2::operator==(const Array2 & other) const -{ - if (_nrows != other._nrows) return false; - if (_ncols != other._ncols) return false; - for (int i=0; i < _nrows; ++i) - { - for (int j=0; j < _ncols; ++j) - { - if ((*this)[i][j] != other[i][j]) return false; - } - } - return true; -} - -// Length accessors -int Array2::nrows() const -{ - return _nrows; -} - -int Array2::ncols() const -{ - return _ncols; -} - -// Resize array -void Array2::resize(int nrows, int ncols, long* data) -{ - if (nrows < 0) throw std::invalid_argument("Array2 nrows less than 0"); - if (ncols < 0) throw std::invalid_argument("Array2 ncols less than 0"); - if (nrows == _nrows && ncols == _ncols) return; - deallocateMemory(); - _nrows = nrows; - _ncols = ncols; - if (!data) - { - allocateMemory(); - } - else - { - _ownData = false; - _buffer = data; - allocateRows(); - } -} - -// Set item accessor -Array1 & Array2::operator[](int i) -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Array2 row index out of range"); - return _rows[i]; -} - -// Get item accessor -const Array1 & Array2::operator[](int i) const -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Array2 row index out of range"); - return _rows[i]; -} - -// String output -std::string Array2::asString() const -{ - std::stringstream result; - result << "[ "; - for (int i=0; i < _nrows; ++i) - { - if (i > 0) result << " "; - result << (*this)[i].asString(); - if (i < _nrows-1) result << "," << std::endl; - } - result << " ]" << std::endl; - return result.str(); -} - -// Get view -void Array2::view(int* nrows, int* ncols, long** data) const -{ - *nrows = _nrows; - *ncols = _ncols; - *data = _buffer; -} - -// Private methods -void Array2::allocateMemory() -{ - if (_nrows * _ncols == 0) - { - _ownData = false; - _buffer = 0; - _rows = 0; - } - else - { - _ownData = true; - _buffer = new long[_nrows*_ncols]; - allocateRows(); - } -} - -void Array2::allocateRows() -{ - _rows = new Array1[_nrows]; - for (int i=0; i < _nrows; ++i) - { - _rows[i].resize(_ncols, &_buffer[i*_ncols]); - } -} - -void Array2::deallocateMemory() -{ - if (_ownData && _nrows*_ncols && _buffer) - { - delete [] _rows; - delete [] _buffer; - } - _ownData = false; - _nrows = 0; - _ncols = 0; - _buffer = 0; - _rows = 0; -} diff --git a/numpy/doc/swig/test/Array2.h b/numpy/doc/swig/test/Array2.h deleted file mode 100644 index a6e5bfc30..000000000 --- a/numpy/doc/swig/test/Array2.h +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef ARRAY2_H -#define ARRAY2_H - -#include "Array1.h" -#include -#include - -class Array2 -{ -public: - - // Default constructor - Array2(); - - // Size/array constructor - Array2(int nrows, int ncols, long* data=0); - - // Copy constructor - Array2(const Array2 & source); - - // Destructor - ~Array2(); - - // Assignment operator - Array2 & operator=(const Array2 & source); - - // Equals operator - bool operator==(const Array2 & other) const; - - // Length accessors - int nrows() const; - int ncols() const; - - // Resize array - void resize(int ncols, int nrows, long* data=0); - - // Set item accessor - Array1 & operator[](int i); - - // Get item accessor - const Array1 & operator[](int i) const; - - // String output - std::string asString() const; - - // Get view - void view(int* nrows, int* ncols, long** data) const; - -private: - // Members - bool _ownData; - int _nrows; - int _ncols; - long * _buffer; - Array1 * _rows; - - // Methods - void allocateMemory(); - void allocateRows(); - void deallocateMemory(); -}; - -#endif diff --git a/numpy/doc/swig/test/Farray.cxx b/numpy/doc/swig/test/Farray.cxx deleted file mode 100644 index 3983c333b..000000000 --- a/numpy/doc/swig/test/Farray.cxx +++ /dev/null @@ -1,122 +0,0 @@ -#include "Farray.h" -#include - -// Size constructor -Farray::Farray(int nrows, int ncols) : - _nrows(nrows), _ncols(ncols), _buffer(0) -{ - allocateMemory(); -} - -// Copy constructor -Farray::Farray(const Farray & source) : - _nrows(source._nrows), _ncols(source._ncols) -{ - allocateMemory(); - *this = source; -} - -// Destructor -Farray::~Farray() -{ - delete [] _buffer; -} - -// Assignment operator -Farray & Farray::operator=(const Farray & source) -{ - int nrows = _nrows < source._nrows ? _nrows : source._nrows; - int ncols = _ncols < source._ncols ? _ncols : source._ncols; - for (int i=0; i < nrows; ++i) - { - for (int j=0; j < ncols; ++j) - { - (*this)(i,j) = source(i,j); - } - } - return *this; -} - -// Equals operator -bool Farray::operator==(const Farray & other) const -{ - if (_nrows != other._nrows) return false; - if (_ncols != other._ncols) return false; - for (int i=0; i < _nrows; ++i) - { - for (int j=0; j < _ncols; ++j) - { - if ((*this)(i,j) != other(i,j)) return false; - } - } - return true; -} - -// Length accessors -int Farray::nrows() const -{ - return _nrows; -} - -int Farray::ncols() const -{ - return _ncols; -} - -// Set item accessor -long & Farray::operator()(int i, int j) -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Farray row index out of range"); - if (j < 0 || j > _ncols) throw std::out_of_range("Farray col index out of range"); - return _buffer[offset(i,j)]; -} - -// Get item accessor -const long & Farray::operator()(int i, int j) const -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Farray row index out of range"); - if (j < 0 || j > _ncols) throw std::out_of_range("Farray col index out of range"); - return _buffer[offset(i,j)]; -} - -// String output -std::string Farray::asString() const -{ - std::stringstream result; - result << "[ "; - for (int i=0; i < _nrows; ++i) - { - if (i > 0) result << " "; - result << "["; - for (int j=0; j < _ncols; ++j) - { - result << " " << (*this)(i,j); - if (j < _ncols-1) result << ","; - } - result << " ]"; - if (i < _nrows-1) result << "," << std::endl; - } - result << " ]" << std::endl; - return result.str(); -} - -// Get view -void Farray::view(int* nrows, int* ncols, long** data) const -{ - *nrows = _nrows; - *ncols = _ncols; - *data = _buffer; -} - -// Private methods -void Farray::allocateMemory() -{ - if (_nrows <= 0) throw std::invalid_argument("Farray nrows <= 0"); - if (_ncols <= 0) throw std::invalid_argument("Farray ncols <= 0"); - _buffer = new long[_nrows*_ncols]; -} - -inline int Farray::offset(int i, int j) const -{ - return i + j * _nrows; -} diff --git a/numpy/doc/swig/test/Farray.h b/numpy/doc/swig/test/Farray.h deleted file mode 100644 index 4199a287c..000000000 --- a/numpy/doc/swig/test/Farray.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef FARRAY_H -#define FARRAY_H - -#include -#include - -class Farray -{ -public: - - // Size constructor - Farray(int nrows, int ncols); - - // Copy constructor - Farray(const Farray & source); - - // Destructor - ~Farray(); - - // Assignment operator - Farray & operator=(const Farray & source); - - // Equals operator - bool operator==(const Farray & other) const; - - // Length accessors - int nrows() const; - int ncols() const; - - // Set item accessor - long & operator()(int i, int j); - - // Get item accessor - const long & operator()(int i, int j) const; - - // String output - std::string asString() const; - - // Get view - void view(int* nrows, int* ncols, long** data) const; - -private: - // Members - int _nrows; - int _ncols; - long * _buffer; - - // Default constructor: not implemented - Farray(); - - // Methods - void allocateMemory(); - int offset(int i, int j) const; -}; - -#endif diff --git a/numpy/doc/swig/test/Farray.i b/numpy/doc/swig/test/Farray.i deleted file mode 100644 index 25f6cd025..000000000 --- a/numpy/doc/swig/test/Farray.i +++ /dev/null @@ -1,73 +0,0 @@ -// -*- c++ -*- - -%module Farray - -%{ -#define SWIG_FILE_WITH_INIT -#include "Farray.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - - // Get the STL typemaps -%include "stl.i" - -// Handle standard exceptions -%include "exception.i" -%exception -{ - try - { - $action - } - catch (const std::invalid_argument& e) - { - SWIG_exception(SWIG_ValueError, e.what()); - } - catch (const std::out_of_range& e) - { - SWIG_exception(SWIG_IndexError, e.what()); - } -} -%init %{ - import_array(); -%} - -// Global ignores -%ignore *::operator=; -%ignore *::operator(); - -// Apply the 2D NumPy typemaps -%apply (int* DIM1 , int* DIM2 , long** ARGOUTVIEW_FARRAY2) - {(int* nrows, int* ncols, long** data )}; - -// Farray support -%include "Farray.h" -%extend Farray -{ - PyObject * __setitem__(PyObject* index, long v) - { - int i, j; - if (!PyArg_ParseTuple(index, "ii:Farray___setitem__",&i,&j)) return NULL; - self->operator()(i,j) = v; - return Py_BuildValue(""); - } - - PyObject * __getitem__(PyObject * index) - { - int i, j; - if (!PyArg_ParseTuple(index, "ii:Farray___getitem__",&i,&j)) return NULL; - return SWIG_From_long(self->operator()(i,j)); - } - - int __len__() - { - return self->nrows() * self->ncols(); - } - - std::string __str__() - { - return self->asString(); - } -} diff --git a/numpy/doc/swig/test/Makefile b/numpy/doc/swig/test/Makefile deleted file mode 100644 index 86ba5e310..000000000 --- a/numpy/doc/swig/test/Makefile +++ /dev/null @@ -1,32 +0,0 @@ -# SWIG -INTERFACES = Array.i Farray.i Vector.i Matrix.i Tensor.i -WRAPPERS = $(INTERFACES:.i=_wrap.cxx) -PROXIES = $(INTERFACES:.i=.py ) - -# Default target: build the tests -.PHONY : all -all: $(WRAPPERS) Array1.cxx Array1.h Farray.cxx Farray.h Vector.cxx Vector.h \ - Matrix.cxx Matrix.h Tensor.cxx Tensor.h - ./setup.py build - -# Test target: run the tests -.PHONY : test -test: all - python testVector.py - python testMatrix.py - python testTensor.py - python testArray.py - python testFarray.py - -# Rule: %.i -> %_wrap.cxx -%_wrap.cxx: %.i %.h ../numpy.i - swig -c++ -python $< -%_wrap.cxx: %.i %1.h %2.h ../numpy.i - swig -c++ -python $< - -# Clean target -.PHONY : clean -clean: - $(RM) -r build - $(RM) $(WRAPPERS) - $(RM) $(PROXIES) diff --git a/numpy/doc/swig/test/Matrix.cxx b/numpy/doc/swig/test/Matrix.cxx deleted file mode 100644 index b953d7017..000000000 --- a/numpy/doc/swig/test/Matrix.cxx +++ /dev/null @@ -1,112 +0,0 @@ -#include -#include -#include -#include "Matrix.h" - -// The following macro defines a family of functions that work with 2D -// arrays with the forms -// -// TYPE SNAMEDet( TYPE matrix[2][2]); -// TYPE SNAMEMax( TYPE * matrix, int rows, int cols); -// TYPE SNAMEMin( int rows, int cols, TYPE * matrix); -// void SNAMEScale( TYPE matrix[3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, TYPE floor); -// void SNAMECeil( int rows, int cols, TYPE * array, TYPE ceil); -// void SNAMELUSplit(TYPE in[3][3], TYPE lower[3][3], TYPE upper[3][3]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 2D input arrays, hard-coded length -// * 2D input arrays -// * 2D input arrays, data last -// * 2D in-place arrays, hard-coded lengths -// * 2D in-place arrays -// * 2D in-place arrays, data last -// * 2D argout arrays, hard-coded length -// -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## Det(TYPE matrix[2][2]) { \ - return matrix[0][0]*matrix[1][1] - matrix[0][1]*matrix[1][0]; \ -} \ -\ -TYPE SNAME ## Max(TYPE * matrix, int rows, int cols) { \ - int i, j, index; \ - TYPE result = matrix[0]; \ - for (j=0; j result) result = matrix[index]; \ - } \ - } \ - return result; \ -} \ -\ -TYPE SNAME ## Min(int rows, int cols, TYPE * matrix) { \ - int i, j, index; \ - TYPE result = matrix[0]; \ - for (j=0; j ceil) array[index] = ceil; \ - } \ - } \ -} \ -\ -void SNAME ## LUSplit(TYPE matrix[3][3], TYPE lower[3][3], TYPE upper[3][3]) { \ - for (int i=0; i<3; ++i) { \ - for (int j=0; j<3; ++j) { \ - if (i >= j) { \ - lower[i][j] = matrix[i][j]; \ - upper[i][j] = 0; \ - } else { \ - lower[i][j] = 0; \ - upper[i][j] = matrix[i][j]; \ - } \ - } \ - } \ -} - -TEST_FUNCS(signed char , schar ) -TEST_FUNCS(unsigned char , uchar ) -TEST_FUNCS(short , short ) -TEST_FUNCS(unsigned short , ushort ) -TEST_FUNCS(int , int ) -TEST_FUNCS(unsigned int , uint ) -TEST_FUNCS(long , long ) -TEST_FUNCS(unsigned long , ulong ) -TEST_FUNCS(long long , longLong ) -TEST_FUNCS(unsigned long long, ulongLong) -TEST_FUNCS(float , float ) -TEST_FUNCS(double , double ) diff --git a/numpy/doc/swig/test/Matrix.h b/numpy/doc/swig/test/Matrix.h deleted file mode 100644 index f37836cc4..000000000 --- a/numpy/doc/swig/test/Matrix.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef MATRIX_H -#define MATRIX_H - -// The following macro defines the prototypes for a family of -// functions that work with 2D arrays with the forms -// -// TYPE SNAMEDet( TYPE matrix[2][2]); -// TYPE SNAMEMax( TYPE * matrix, int rows, int cols); -// TYPE SNAMEMin( int rows, int cols, TYPE * matrix); -// void SNAMEScale( TYPE array[3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, TYPE floor); -// void SNAMECeil( int rows, int cols, TYPE * array, TYPE ceil ); -// void SNAMELUSplit(TYPE in[3][3], TYPE lower[3][3], TYPE upper[3][3]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 2D input arrays, hard-coded lengths -// * 2D input arrays -// * 2D input arrays, data last -// * 2D in-place arrays, hard-coded lengths -// * 2D in-place arrays -// * 2D in-place arrays, data last -// * 2D argout arrays, hard-coded length -// -#define TEST_FUNC_PROTOS(TYPE, SNAME) \ -\ -TYPE SNAME ## Det( TYPE matrix[2][2]); \ -TYPE SNAME ## Max( TYPE * matrix, int rows, int cols); \ -TYPE SNAME ## Min( int rows, int cols, TYPE * matrix); \ -void SNAME ## Scale( TYPE array[3][3], TYPE val); \ -void SNAME ## Floor( TYPE * array, int rows, int cols, TYPE floor); \ -void SNAME ## Ceil( int rows, int cols, TYPE * array, TYPE ceil ); \ -void SNAME ## LUSplit(TYPE matrix[3][3], TYPE lower[3][3], TYPE upper[3][3]); - -TEST_FUNC_PROTOS(signed char , schar ) -TEST_FUNC_PROTOS(unsigned char , uchar ) -TEST_FUNC_PROTOS(short , short ) -TEST_FUNC_PROTOS(unsigned short , ushort ) -TEST_FUNC_PROTOS(int , int ) -TEST_FUNC_PROTOS(unsigned int , uint ) -TEST_FUNC_PROTOS(long , long ) -TEST_FUNC_PROTOS(unsigned long , ulong ) -TEST_FUNC_PROTOS(long long , longLong ) -TEST_FUNC_PROTOS(unsigned long long, ulongLong) -TEST_FUNC_PROTOS(float , float ) -TEST_FUNC_PROTOS(double , double ) - -#endif diff --git a/numpy/doc/swig/test/Matrix.i b/numpy/doc/swig/test/Matrix.i deleted file mode 100644 index e721397a0..000000000 --- a/numpy/doc/swig/test/Matrix.i +++ /dev/null @@ -1,45 +0,0 @@ -// -*- c++ -*- -%module Matrix - -%{ -#define SWIG_FILE_WITH_INIT -#include "Matrix.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - -%init %{ - import_array(); -%} - -%define %apply_numpy_typemaps(TYPE) - -%apply (TYPE IN_ARRAY2[ANY][ANY]) {(TYPE matrix[ANY][ANY])}; -%apply (TYPE* IN_ARRAY2, int DIM1, int DIM2) {(TYPE* matrix, int rows, int cols)}; -%apply (int DIM1, int DIM2, TYPE* IN_ARRAY2) {(int rows, int cols, TYPE* matrix)}; - -%apply (TYPE INPLACE_ARRAY2[ANY][ANY]) {(TYPE array[3][3])}; -%apply (TYPE* INPLACE_ARRAY2, int DIM1, int DIM2) {(TYPE* array, int rows, int cols)}; -%apply (int DIM1, int DIM2, TYPE* INPLACE_ARRAY2) {(int rows, int cols, TYPE* array)}; - -%apply (TYPE ARGOUT_ARRAY2[ANY][ANY]) {(TYPE lower[3][3])}; -%apply (TYPE ARGOUT_ARRAY2[ANY][ANY]) {(TYPE upper[3][3])}; - -%enddef /* %apply_numpy_typemaps() macro */ - -%apply_numpy_typemaps(signed char ) -%apply_numpy_typemaps(unsigned char ) -%apply_numpy_typemaps(short ) -%apply_numpy_typemaps(unsigned short ) -%apply_numpy_typemaps(int ) -%apply_numpy_typemaps(unsigned int ) -%apply_numpy_typemaps(long ) -%apply_numpy_typemaps(unsigned long ) -%apply_numpy_typemaps(long long ) -%apply_numpy_typemaps(unsigned long long) -%apply_numpy_typemaps(float ) -%apply_numpy_typemaps(double ) - -// Include the header file to be wrapped -%include "Matrix.h" diff --git a/numpy/doc/swig/test/Tensor.cxx b/numpy/doc/swig/test/Tensor.cxx deleted file mode 100644 index dce595291..000000000 --- a/numpy/doc/swig/test/Tensor.cxx +++ /dev/null @@ -1,131 +0,0 @@ -#include -#include -#include -#include "Tensor.h" - -// The following macro defines a family of functions that work with 3D -// arrays with the forms -// -// TYPE SNAMENorm( TYPE tensor[2][2][2]); -// TYPE SNAMEMax( TYPE * tensor, int rows, int cols, int num); -// TYPE SNAMEMin( int rows, int cols, int num, TYPE * tensor); -// void SNAMEScale( TYPE tensor[3][3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, int num, TYPE floor); -// void SNAMECeil( int rows, int cols, int num, TYPE * array, TYPE ceil); -// void SNAMELUSplit(TYPE in[2][2][2], TYPE lower[2][2][2], TYPE upper[2][2][2]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 3D input arrays, hard-coded length -// * 3D input arrays -// * 3D input arrays, data last -// * 3D in-place arrays, hard-coded lengths -// * 3D in-place arrays -// * 3D in-place arrays, data last -// * 3D argout arrays, hard-coded length -// -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## Norm(TYPE tensor[2][2][2]) { \ - double result = 0; \ - for (int k=0; k<2; ++k) \ - for (int j=0; j<2; ++j) \ - for (int i=0; i<2; ++i) \ - result += tensor[i][j][k] * tensor[i][j][k]; \ - return (TYPE)sqrt(result/8); \ -} \ -\ -TYPE SNAME ## Max(TYPE * tensor, int rows, int cols, int num) { \ - int i, j, k, index; \ - TYPE result = tensor[0]; \ - for (k=0; k result) result = tensor[index]; \ - } \ - } \ - } \ - return result; \ -} \ -\ -TYPE SNAME ## Min(int rows, int cols, int num, TYPE * tensor) { \ - int i, j, k, index; \ - TYPE result = tensor[0]; \ - for (k=0; k ceil) array[index] = ceil; \ - } \ - } \ - } \ -} \ -\ -void SNAME ## LUSplit(TYPE tensor[2][2][2], TYPE lower[2][2][2], \ - TYPE upper[2][2][2]) { \ - int sum; \ - for (int k=0; k<2; ++k) { \ - for (int j=0; j<2; ++j) { \ - for (int i=0; i<2; ++i) { \ - sum = i + j + k; \ - if (sum < 2) { \ - lower[i][j][k] = tensor[i][j][k]; \ - upper[i][j][k] = 0; \ - } else { \ - upper[i][j][k] = tensor[i][j][k]; \ - lower[i][j][k] = 0; \ - } \ - } \ - } \ - } \ -} - -TEST_FUNCS(signed char , schar ) -TEST_FUNCS(unsigned char , uchar ) -TEST_FUNCS(short , short ) -TEST_FUNCS(unsigned short , ushort ) -TEST_FUNCS(int , int ) -TEST_FUNCS(unsigned int , uint ) -TEST_FUNCS(long , long ) -TEST_FUNCS(unsigned long , ulong ) -TEST_FUNCS(long long , longLong ) -TEST_FUNCS(unsigned long long, ulongLong) -TEST_FUNCS(float , float ) -TEST_FUNCS(double , double ) diff --git a/numpy/doc/swig/test/Tensor.h b/numpy/doc/swig/test/Tensor.h deleted file mode 100644 index d60eb2d2e..000000000 --- a/numpy/doc/swig/test/Tensor.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef TENSOR_H -#define TENSOR_H - -// The following macro defines the prototypes for a family of -// functions that work with 3D arrays with the forms -// -// TYPE SNAMENorm( TYPE tensor[2][2][2]); -// TYPE SNAMEMax( TYPE * tensor, int rows, int cols, int num); -// TYPE SNAMEMin( int rows, int cols, int num, TYPE * tensor); -// void SNAMEScale( TYPE array[3][3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, int num, TYPE floor); -// void SNAMECeil( int rows, int cols, int num, TYPE * array, TYPE ceil ); -// void SNAMELUSplit(TYPE in[3][3][3], TYPE lower[3][3][3], TYPE upper[3][3][3]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 3D input arrays, hard-coded lengths -// * 3D input arrays -// * 3D input arrays, data last -// * 3D in-place arrays, hard-coded lengths -// * 3D in-place arrays -// * 3D in-place arrays, data last -// * 3D argout arrays, hard-coded length -// -#define TEST_FUNC_PROTOS(TYPE, SNAME) \ -\ -TYPE SNAME ## Norm( TYPE tensor[2][2][2]); \ -TYPE SNAME ## Max( TYPE * tensor, int rows, int cols, int num); \ -TYPE SNAME ## Min( int rows, int cols, int num, TYPE * tensor); \ -void SNAME ## Scale( TYPE array[3][3][3], TYPE val); \ -void SNAME ## Floor( TYPE * array, int rows, int cols, int num, TYPE floor); \ -void SNAME ## Ceil( int rows, int cols, int num, TYPE * array, TYPE ceil ); \ -void SNAME ## LUSplit(TYPE tensor[2][2][2], TYPE lower[2][2][2], TYPE upper[2][2][2]); - -TEST_FUNC_PROTOS(signed char , schar ) -TEST_FUNC_PROTOS(unsigned char , uchar ) -TEST_FUNC_PROTOS(short , short ) -TEST_FUNC_PROTOS(unsigned short , ushort ) -TEST_FUNC_PROTOS(int , int ) -TEST_FUNC_PROTOS(unsigned int , uint ) -TEST_FUNC_PROTOS(long , long ) -TEST_FUNC_PROTOS(unsigned long , ulong ) -TEST_FUNC_PROTOS(long long , longLong ) -TEST_FUNC_PROTOS(unsigned long long, ulongLong) -TEST_FUNC_PROTOS(float , float ) -TEST_FUNC_PROTOS(double , double ) - -#endif diff --git a/numpy/doc/swig/test/Tensor.i b/numpy/doc/swig/test/Tensor.i deleted file mode 100644 index a1198dc9e..000000000 --- a/numpy/doc/swig/test/Tensor.i +++ /dev/null @@ -1,49 +0,0 @@ -// -*- c++ -*- -%module Tensor - -%{ -#define SWIG_FILE_WITH_INIT -#include "Tensor.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - -%init %{ - import_array(); -%} - -%define %apply_numpy_typemaps(TYPE) - -%apply (TYPE IN_ARRAY3[ANY][ANY][ANY]) {(TYPE tensor[ANY][ANY][ANY])}; -%apply (TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3) - {(TYPE* tensor, int rows, int cols, int num)}; -%apply (int DIM1, int DIM2, int DIM3, TYPE* IN_ARRAY3) - {(int rows, int cols, int num, TYPE* tensor)}; - -%apply (TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) {(TYPE array[3][3][3])}; -%apply (TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) - {(TYPE* array, int rows, int cols, int num)}; -%apply (int DIM1, int DIM2, int DIM3, TYPE* INPLACE_ARRAY3) - {(int rows, int cols, int num, TYPE* array)}; - -%apply (TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) {(TYPE lower[2][2][2])}; -%apply (TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) {(TYPE upper[2][2][2])}; - -%enddef /* %apply_numpy_typemaps() macro */ - -%apply_numpy_typemaps(signed char ) -%apply_numpy_typemaps(unsigned char ) -%apply_numpy_typemaps(short ) -%apply_numpy_typemaps(unsigned short ) -%apply_numpy_typemaps(int ) -%apply_numpy_typemaps(unsigned int ) -%apply_numpy_typemaps(long ) -%apply_numpy_typemaps(unsigned long ) -%apply_numpy_typemaps(long long ) -%apply_numpy_typemaps(unsigned long long) -%apply_numpy_typemaps(float ) -%apply_numpy_typemaps(double ) - -// Include the header file to be wrapped -%include "Tensor.h" diff --git a/numpy/doc/swig/test/Vector.cxx b/numpy/doc/swig/test/Vector.cxx deleted file mode 100644 index 2c90404da..000000000 --- a/numpy/doc/swig/test/Vector.cxx +++ /dev/null @@ -1,100 +0,0 @@ -#include -#include -#include -#include "Vector.h" - -// The following macro defines a family of functions that work with 1D -// arrays with the forms -// -// TYPE SNAMELength( TYPE vector[3]); -// TYPE SNAMEProd( TYPE * series, int size); -// TYPE SNAMESum( int size, TYPE * series); -// void SNAMEReverse(TYPE array[3]); -// void SNAMEOnes( TYPE * array, int size); -// void SNAMEZeros( int size, TYPE * array); -// void SNAMEEOSplit(TYPE vector[3], TYPE even[3], odd[3]); -// void SNAMETwos( TYPE * twoVec, int size); -// void SNAMEThrees( int size, TYPE * threeVec); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 1D input arrays, hard-coded length -// * 1D input arrays -// * 1D input arrays, data last -// * 1D in-place arrays, hard-coded length -// * 1D in-place arrays -// * 1D in-place arrays, data last -// * 1D argout arrays, hard-coded length -// * 1D argout arrays -// * 1D argout arrays, data last -// -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## Length(TYPE vector[3]) { \ - double result = 0; \ - for (int i=0; i<3; ++i) result += vector[i]*vector[i]; \ - return (TYPE)sqrt(result); \ -} \ -\ -TYPE SNAME ## Prod(TYPE * series, int size) { \ - TYPE result = 1; \ - for (int i=0; i>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7],[6,9]] - self.assertEquals(det(matrix), 30) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetBadList(self): - "Test det function with bad list" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7], ["e", "pi"]] - self.assertRaises(BadListError, det, matrix) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetWrongDim(self): - "Test det function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [8,7] - self.assertRaises(TypeError, det, matrix) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetWrongSize(self): - "Test det function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7,6], [5,4,3], [2,1,0]] - self.assertRaises(TypeError, det, matrix) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetNonContainer(self): - "Test det function with non-container" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - self.assertRaises(TypeError, det, None) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMax(self): - "Test max function" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - matrix = [[6,5,4],[3,2,1]] - self.assertEquals(max(matrix), 6) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMaxBadList(self): - "Test max function with bad list" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - matrix = [[6,"five",4], ["three", 2, "one"]] - self.assertRaises(BadListError, max, matrix) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMaxNonContainer(self): - "Test max function with non-container" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, None) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMaxWrongDim(self): - "Test max function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, [0, 1, 2, 3]) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMin(self): - "Test min function" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - matrix = [[9,8],[7,6],[5,4]] - self.assertEquals(min(matrix), 4) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMinBadList(self): - "Test min function with bad list" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - matrix = [["nine","eight"], ["seven","six"]] - self.assertRaises(BadListError, min, matrix) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMinWrongDim(self): - "Test min function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, [1,3,5,7,9]) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMinNonContainer(self): - "Test min function with non-container" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, False) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScale(self): - "Test scale function" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = N.array([[1,2,3],[2,1,2],[3,2,1]],self.typeCode) - scale(matrix,4) - self.assertEquals((matrix == [[4,8,12],[8,4,8],[12,8,4]]).all(), True) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleWrongDim(self): - "Test scale function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = N.array([1,2,2,1],self.typeCode) - self.assertRaises(TypeError, scale, matrix) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleWrongSize(self): - "Test scale function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = N.array([[1,2],[2,1]],self.typeCode) - self.assertRaises(TypeError, scale, matrix) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleWrongType(self): - "Test scale function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = N.array([[1,2,3],[2,1,2],[3,2,1]],'c') - self.assertRaises(TypeError, scale, matrix) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleNonArray(self): - "Test scale function with non-array" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = [[1,2,3],[2,1,2],[3,2,1]] - self.assertRaises(TypeError, scale, matrix) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloor(self): - "Test floor function" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = N.array([[6,7],[8,9]],self.typeCode) - floor(matrix,7) - N.testing.assert_array_equal(matrix, N.array([[7,7],[8,9]])) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloorWrongDim(self): - "Test floor function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = N.array([6,7,8,9],self.typeCode) - self.assertRaises(TypeError, floor, matrix) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloorWrongType(self): - "Test floor function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = N.array([[6,7], [8,9]],'c') - self.assertRaises(TypeError, floor, matrix) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloorNonArray(self): - "Test floor function with non-array" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = [[6,7], [8,9]] - self.assertRaises(TypeError, floor, matrix) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeil(self): - "Test ceil function" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = N.array([[1,2],[3,4]],self.typeCode) - ceil(matrix,3) - N.testing.assert_array_equal(matrix, N.array([[1,2],[3,3]])) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeilWrongDim(self): - "Test ceil function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = N.array([1,2,3,4],self.typeCode) - self.assertRaises(TypeError, ceil, matrix) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeilWrongType(self): - "Test ceil function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = N.array([[1,2], [3,4]],'c') - self.assertRaises(TypeError, ceil, matrix) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeilNonArray(self): - "Test ceil function with non-array" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = [[1,2], [3,4]] - self.assertRaises(TypeError, ceil, matrix) - - # Test (type ARGOUT_ARRAY2[ANY][ANY]) typemap - def testLUSplit(self): - "Test luSplit function" - print >>sys.stderr, self.typeStr, "... ", - luSplit = Matrix.__dict__[self.typeStr + "LUSplit"] - lower, upper = luSplit([[1,2,3],[4,5,6],[7,8,9]]) - self.assertEquals((lower == [[1,0,0],[4,5,0],[7,8,9]]).all(), True) - self.assertEquals((upper == [[0,2,3],[0,0,6],[0,0,0]]).all(), True) - -###################################################################### - -class scharTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - -###################################################################### - -class ucharTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - -###################################################################### - -class shortTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - -###################################################################### - -class ushortTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - -###################################################################### - -class intTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - -###################################################################### - -class uintTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - -###################################################################### - -class longTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - -###################################################################### - -class ulongTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - -###################################################################### - -class longLongTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - -###################################################################### - -class ulongLongTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - -###################################################################### - -class floatTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 2D Functions of Module Matrix" - print "NumPy version", N.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy/doc/swig/test/testTensor.py b/numpy/doc/swig/test/testTensor.py deleted file mode 100755 index f68e6b720..000000000 --- a/numpy/doc/swig/test/testTensor.py +++ /dev/null @@ -1,405 +0,0 @@ -#! /usr/bin/env python - -# System imports -from distutils.util import get_platform -from math import sqrt -import os -import sys -import unittest - -# Import NumPy -import numpy as N -major, minor = [ int(d) for d in N.__version__.split(".")[:2] ] -if major == 0: BadListError = TypeError -else: BadListError = ValueError - -# Add the distutils-generated build directory to the python search path and then -# import the extension module -libDir = "lib.%s-%s" % (get_platform(), sys.version[:3]) -sys.path.insert(0,os.path.join("build", libDir)) -import Tensor - -###################################################################### - -class TensorTestCase(unittest.TestCase): - - def __init__(self, methodName="runTests"): - unittest.TestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - self.result = sqrt(28.0/8) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNorm(self): - "Test norm function" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,1], [2,3]], - [[3,2], [1,0]]] - if isinstance(self.result, int): - self.assertEquals(norm(tensor), self.result) - else: - self.assertAlmostEqual(norm(tensor), self.result, 6) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormBadList(self): - "Test norm function with bad list" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,"one"],[2,3]], - [[3,"two"],[1,0]]] - self.assertRaises(BadListError, norm, tensor) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormWrongDim(self): - "Test norm function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[0,1,2,3], - [3,2,1,0]] - self.assertRaises(TypeError, norm, tensor) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormWrongSize(self): - "Test norm function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,1,0], [2,3,2]], - [[3,2,3], [1,0,1]]] - self.assertRaises(TypeError, norm, tensor) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormNonContainer(self): - "Test norm function with non-container" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - self.assertRaises(TypeError, norm, None) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMax(self): - "Test max function" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - tensor = [[[1,2], [3,4]], - [[5,6], [7,8]]] - self.assertEquals(max(tensor), 8) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMaxBadList(self): - "Test max function with bad list" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - tensor = [[[1,"two"], [3,4]], - [[5,"six"], [7,8]]] - self.assertRaises(BadListError, max, tensor) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMaxNonContainer(self): - "Test max function with non-container" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, None) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMaxWrongDim(self): - "Test max function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, [0, -1, 2, -3]) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMin(self): - "Test min function" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - tensor = [[[9,8], [7,6]], - [[5,4], [3,2]]] - self.assertEquals(min(tensor), 2) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMinBadList(self): - "Test min function with bad list" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - tensor = [[["nine",8], [7,6]], - [["five",4], [3,2]]] - self.assertRaises(BadListError, min, tensor) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMinNonContainer(self): - "Test min function with non-container" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, True) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMinWrongDim(self): - "Test min function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, [[1,3],[5,7]]) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScale(self): - "Test scale function" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = N.array([[[1,0,1], [0,1,0], [1,0,1]], - [[0,1,0], [1,0,1], [0,1,0]], - [[1,0,1], [0,1,0], [1,0,1]]],self.typeCode) - scale(tensor,4) - self.assertEquals((tensor == [[[4,0,4], [0,4,0], [4,0,4]], - [[0,4,0], [4,0,4], [0,4,0]], - [[4,0,4], [0,4,0], [4,0,4]]]).all(), True) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleWrongType(self): - "Test scale function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = N.array([[[1,0,1], [0,1,0], [1,0,1]], - [[0,1,0], [1,0,1], [0,1,0]], - [[1,0,1], [0,1,0], [1,0,1]]],'c') - self.assertRaises(TypeError, scale, tensor) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleWrongDim(self): - "Test scale function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = N.array([[1,0,1], [0,1,0], [1,0,1], - [0,1,0], [1,0,1], [0,1,0]],self.typeCode) - self.assertRaises(TypeError, scale, tensor) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleWrongSize(self): - "Test scale function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = N.array([[[1,0], [0,1], [1,0]], - [[0,1], [1,0], [0,1]], - [[1,0], [0,1], [1,0]]],self.typeCode) - self.assertRaises(TypeError, scale, tensor) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleNonArray(self): - "Test scale function with non-array" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - self.assertRaises(TypeError, scale, True) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloor(self): - "Test floor function" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = N.array([[[1,2], [3,4]], - [[5,6], [7,8]]],self.typeCode) - floor(tensor,4) - N.testing.assert_array_equal(tensor, N.array([[[4,4], [4,4]], - [[5,6], [7,8]]])) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloorWrongType(self): - "Test floor function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = N.array([[[1,2], [3,4]], - [[5,6], [7,8]]],'c') - self.assertRaises(TypeError, floor, tensor) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloorWrongDim(self): - "Test floor function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = N.array([[1,2], [3,4], [5,6], [7,8]],self.typeCode) - self.assertRaises(TypeError, floor, tensor) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloorNonArray(self): - "Test floor function with non-array" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - self.assertRaises(TypeError, floor, object) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeil(self): - "Test ceil function" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = N.array([[[9,8], [7,6]], - [[5,4], [3,2]]],self.typeCode) - ceil(tensor,5) - N.testing.assert_array_equal(tensor, N.array([[[5,5], [5,5]], - [[5,4], [3,2]]])) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeilWrongType(self): - "Test ceil function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = N.array([[[9,8], [7,6]], - [[5,4], [3,2]]],'c') - self.assertRaises(TypeError, ceil, tensor) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeilWrongDim(self): - "Test ceil function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = N.array([[9,8], [7,6], [5,4], [3,2]], self.typeCode) - self.assertRaises(TypeError, ceil, tensor) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeilNonArray(self): - "Test ceil function with non-array" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = [[[9,8], [7,6]], - [[5,4], [3,2]]] - self.assertRaises(TypeError, ceil, tensor) - - # Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap - def testLUSplit(self): - "Test luSplit function" - print >>sys.stderr, self.typeStr, "... ", - luSplit = Tensor.__dict__[self.typeStr + "LUSplit"] - lower, upper = luSplit([[[1,1], [1,1]], - [[1,1], [1,1]]]) - self.assertEquals((lower == [[[1,1], [1,0]], - [[1,0], [0,0]]]).all(), True) - self.assertEquals((upper == [[[0,0], [0,1]], - [[0,1], [1,1]]]).all(), True) - -###################################################################### - -class scharTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - self.result = int(self.result) - -###################################################################### - -class ucharTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - self.result = int(self.result) - -###################################################################### - -class shortTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - self.result = int(self.result) - -###################################################################### - -class ushortTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - self.result = int(self.result) - -###################################################################### - -class intTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - self.result = int(self.result) - -###################################################################### - -class uintTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - self.result = int(self.result) - -###################################################################### - -class longTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - self.result = int(self.result) - -###################################################################### - -class ulongTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - self.result = int(self.result) - -###################################################################### - -class longLongTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - self.result = int(self.result) - -###################################################################### - -class ulongLongTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - self.result = int(self.result) - -###################################################################### - -class floatTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 3D Functions of Module Tensor" - print "NumPy version", N.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy/doc/swig/test/testVector.py b/numpy/doc/swig/test/testVector.py deleted file mode 100755 index 82a922e25..000000000 --- a/numpy/doc/swig/test/testVector.py +++ /dev/null @@ -1,384 +0,0 @@ -#! /usr/bin/env python - -# System imports -from distutils.util import get_platform -import os -import sys -import unittest - -# Import NumPy -import numpy as N -major, minor = [ int(d) for d in N.__version__.split(".")[:2] ] -if major == 0: BadListError = TypeError -else: BadListError = ValueError - -# Add the distutils-generated build directory to the python search path and then -# import the extension module -libDir = "lib.%s-%s" % (get_platform(), sys.version[:3]) -sys.path.insert(0,os.path.join("build", libDir)) -import Vector - -###################################################################### - -class VectorTestCase(unittest.TestCase): - - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLength(self): - "Test length function" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertEquals(length([5, 12, 0]), 13) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthBadList(self): - "Test length function with bad list" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(BadListError, length, [5, "twelve", 0]) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthWrongSize(self): - "Test length function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(TypeError, length, [5, 12]) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthWrongDim(self): - "Test length function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(TypeError, length, [[1,2], [3,4]]) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthNonContainer(self): - "Test length function with non-container" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(TypeError, length, None) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProd(self): - "Test prod function" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertEquals(prod([1,2,3,4]), 24) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProdBadList(self): - "Test prod function with bad list" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(BadListError, prod, [[1,"two"], ["e","pi"]]) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProdWrongDim(self): - "Test prod function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(TypeError, prod, [[1,2], [8,9]]) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProdNonContainer(self): - "Test prod function with non-container" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(TypeError, prod, None) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSum(self): - "Test sum function" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertEquals(sum([5,6,7,8]), 26) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSumBadList(self): - "Test sum function with bad list" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(BadListError, sum, [3,4, 5, "pi"]) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSumWrongDim(self): - "Test sum function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(TypeError, sum, [[3,4], [5,6]]) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSumNonContainer(self): - "Test sum function with non-container" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(TypeError, sum, True) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverse(self): - "Test reverse function" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = N.array([1,2,4],self.typeCode) - reverse(vector) - self.assertEquals((vector == [4,2,1]).all(), True) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseWrongDim(self): - "Test reverse function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = N.array([[1,2], [3,4]],self.typeCode) - self.assertRaises(TypeError, reverse, vector) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseWrongSize(self): - "Test reverse function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = N.array([9,8,7,6,5,4],self.typeCode) - self.assertRaises(TypeError, reverse, vector) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseWrongType(self): - "Test reverse function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = N.array([1,2,4],'c') - self.assertRaises(TypeError, reverse, vector) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseNonArray(self): - "Test reverse function with non-array" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - self.assertRaises(TypeError, reverse, [2,4,6]) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnes(self): - "Test ones function" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - vector = N.zeros(5,self.typeCode) - ones(vector) - N.testing.assert_array_equal(vector, N.array([1,1,1,1,1])) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnesWrongDim(self): - "Test ones function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - vector = N.zeros((5,5),self.typeCode) - self.assertRaises(TypeError, ones, vector) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnesWrongType(self): - "Test ones function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - vector = N.zeros((5,5),'c') - self.assertRaises(TypeError, ones, vector) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnesNonArray(self): - "Test ones function with non-array" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - self.assertRaises(TypeError, ones, [2,4,6,8]) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZeros(self): - "Test zeros function" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = N.ones(5,self.typeCode) - zeros(vector) - N.testing.assert_array_equal(vector, N.array([0,0,0,0,0])) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZerosWrongDim(self): - "Test zeros function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = N.ones((5,5),self.typeCode) - self.assertRaises(TypeError, zeros, vector) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZerosWrongType(self): - "Test zeros function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = N.ones(6,'c') - self.assertRaises(TypeError, zeros, vector) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZerosNonArray(self): - "Test zeros function with non-array" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - self.assertRaises(TypeError, zeros, [1,3,5,7,9]) - - # Test the (type ARGOUT_ARRAY1[ANY]) typemap - def testEOSplit(self): - "Test eoSplit function" - print >>sys.stderr, self.typeStr, "... ", - eoSplit = Vector.__dict__[self.typeStr + "EOSplit"] - even, odd = eoSplit([1,2,3]) - self.assertEquals((even == [1,0,3]).all(), True) - self.assertEquals((odd == [0,2,0]).all(), True) - - # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap - def testTwos(self): - "Test twos function" - print >>sys.stderr, self.typeStr, "... ", - twos = Vector.__dict__[self.typeStr + "Twos"] - vector = twos(5) - self.assertEquals((vector == [2,2,2,2,2]).all(), True) - - # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap - def testTwosNonInt(self): - "Test twos function with non-integer dimension" - print >>sys.stderr, self.typeStr, "... ", - twos = Vector.__dict__[self.typeStr + "Twos"] - self.assertRaises(TypeError, twos, 5.0) - - # Test the (int DIM1, type* ARGOUT_ARRAY1) typemap - def testThrees(self): - "Test threes function" - print >>sys.stderr, self.typeStr, "... ", - threes = Vector.__dict__[self.typeStr + "Threes"] - vector = threes(6) - self.assertEquals((vector == [3,3,3,3,3,3]).all(), True) - - # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap - def testThreesNonInt(self): - "Test threes function with non-integer dimension" - print >>sys.stderr, self.typeStr, "... ", - threes = Vector.__dict__[self.typeStr + "Threes"] - self.assertRaises(TypeError, threes, "threes") - -###################################################################### - -class scharTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - -###################################################################### - -class ucharTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - -###################################################################### - -class shortTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - -###################################################################### - -class ushortTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - -###################################################################### - -class intTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - -###################################################################### - -class uintTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - -###################################################################### - -class longTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - -###################################################################### - -class ulongTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - -###################################################################### - -class longLongTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - -###################################################################### - -class ulongLongTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - -###################################################################### - -class floatTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 1D Functions of Module Vector" - print "NumPy version", N.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy/doc/ufuncs.txt b/numpy/doc/ufuncs.txt deleted file mode 100644 index fa107cc21..000000000 --- a/numpy/doc/ufuncs.txt +++ /dev/null @@ -1,103 +0,0 @@ -BUFFERED General Ufunc explanation -================================== - -.. note:: - - This was implemented already, but the notes are kept here for historical - and explanatory purposes. - -We need to optimize the section of ufunc code that handles mixed-type -and misbehaved arrays. In particular, we need to fix it so that items -are not copied into the buffer if they don't have to be. - -Right now, all data is copied into the buffers (even scalars are copied -multiple times into the buffers even if they are not going to be cast). - -Some benchmarks show that this results in a significant slow-down -(factor of 4) over similar numarray code. - -The approach is therefore, to loop over the largest-dimension (just like -the NO_BUFFER) portion of the code. All arrays will either have N or -1 in this last dimension (or their would be a mis-match error). The -buffer size is B. - -If N <= B (and only if needed), we copy the entire last-dimension into -the buffer as fast as possible using the single-stride information. - -Also we only copy into output arrays if needed as well (other-wise the -output arrays are used directly in the ufunc code). - -Call the function using the appropriate strides information from all the input -arrays. Only set the strides to the element-size for arrays that will be copied. - -If N > B, then we have to do the above operation in a loop (with an extra loop -at the end with a different buffer size). - -Both of these cases are handled with the following code:: - - Compute N = quotient * B + remainder. - quotient = N / B # integer math - (store quotient + 1) as the number of innerloops - remainder = N % B # integer remainder - -On the inner-dimension we will have (quotient + 1) loops where -the size of the inner function is B for all but the last when the niter size is -remainder. - -So, the code looks very similar to NOBUFFER_LOOP except the inner loop is -replaced with:: - - for(k=0; iobj gets set to 1. Then there are two cases: - -1) The loop function is an object loop: - - Inputs: - - castbuf starts as NULL and then gets filled with new references. - - function gets called and doesn't alter the reference count in castbuf - - on the next iteration (next value of k), the casting function will - DECREF what is present in castbuf already and place a new object. - - - At the end of the inner loop (for loop over k), the final new-references - in castbuf must be DECREF'd. If its a scalar then a single DECREF suffices - Otherwise, "bufsize" DECREF's are needed (unless there was only one - loop, then "remainder" DECREF's are needed). - - Outputs: - - castbuf contains a new reference as the result of the function call. This - gets converted to the type of interest and. This new reference in castbuf - will be DECREF'd by later calls to the function. Thus, only after the - inner most loop do we need to DECREF the remaining references in castbuf. - -2) The loop function is of a different type: - - Inputs: - - - The PyObject input is copied over to buffer which receives a "borrowed" - reference. This reference is then used but not altered by the cast - call. Nothing needs to be done. - - Outputs: - - - The buffer[i] memory receives the PyObject input after the cast. This is - a new reference which will be "stolen" as it is copied over into memory. - The only problem is that what is presently in memory must be DECREF'd first. - - - - - diff --git a/numpy/dual.py b/numpy/dual.py deleted file mode 100644 index c47f8f820..000000000 --- a/numpy/dual.py +++ /dev/null @@ -1,57 +0,0 @@ -# This module should be used for functions both in numpy and scipy if -# you want to use the numpy version if available but the scipy version -# otherwise. -# Usage --- from numpy.dual import fft, inv - -__all__ = ['fft','ifft','fftn','ifftn','fft2','ifft2', - 'norm','inv','svd','solve','det','eig','eigvals', - 'eigh','eigvalsh','lstsq', 'pinv','cholesky','i0'] - -import numpy.linalg as linpkg -import numpy.fft as fftpkg -from numpy.lib import i0 -import sys - - -fft = fftpkg.fft -ifft = fftpkg.ifft -fftn = fftpkg.fftn -ifftn = fftpkg.ifftn -fft2 = fftpkg.fft2 -ifft2 = fftpkg.ifft2 - -norm = linpkg.norm -inv = linpkg.inv -svd = linpkg.svd -solve = linpkg.solve -det = linpkg.det -eig = linpkg.eig -eigvals = linpkg.eigvals -eigh = linpkg.eigh -eigvalsh = linpkg.eigvalsh -lstsq = linpkg.lstsq -pinv = linpkg.pinv -cholesky = linpkg.cholesky - -_restore_dict = {} - -def register_func(name, func): - if name not in __all__: - raise ValueError, "%s not a dual function." % name - f = sys._getframe(0).f_globals - _restore_dict[name] = f[name] - f[name] = func - -def restore_func(name): - if name not in __all__: - raise ValueError, "%s not a dual function." % name - try: - val = _restore_dict[name] - except KeyError: - return - else: - sys._getframe(0).f_globals[name] = val - -def restore_all(): - for name in _restore_dict.keys(): - restore_func(name) diff --git a/numpy/f2py/BUGS.txt b/numpy/f2py/BUGS.txt deleted file mode 100644 index ee08863bb..000000000 --- a/numpy/f2py/BUGS.txt +++ /dev/null @@ -1,55 +0,0 @@ -December 1, 2002: - -C FILE: STRING.F - SUBROUTINE FOO - END -C END OF FILE STRING.F -does not build with - f2py -c -m string string.f -Cause: string is mapped to string_bn -************************************************************************** -August 16, 2001: -1) re in Python 2.x is **three** times slower than the re in Python 1.5. -************************************************************************** -HP-UX B.10.20 A 9000/780: -Fortran function returning character*(*) (id=7) ... failed(core dump) -Fortran function returning logical*8 (id=21) ... expected .true. but got 0 -Callback function returning real (id=45) ... expected 34.56 but got 14087495680.0 -Callback function returning real*4 (id=46) ... expected 34.56 but got 14087495680.0 -Callback function returning logical*8 (id=55) ... expected .true. but got 0 - C compiler: gcc ('gcc 2.x.x' 2.95.2) (from .f2py_get_compiler_CC) - Fortran compiler: g77 ('g77 2.x.x' 2.95.2) (from .f2py_get_compiler_FC) - Linker: ld ('HP-UX ld' 92453-07 linker linker ld B.10.24 961204) (from .f2py_get_compiler_LD) -************************************************************************** -Linux 2.2.13-0.9 #1 Thu Dec 9 17:03:57 EST 1999 alpha unknown: -Fortran function returning character*(*) (id=7) ... expected 'abcdefgh' but got 'abcdefgh \201' (o?k) -Callback function returning complex (id=48) ... failed(core dump) - Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump) -Callback function returning complex*8 (id=49) ... failed(core dump) - Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump) -Callback function returning complex*16 (id=50) ... failed(core dump) - Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump) - C compiler: cc ('Compaq C' V6.2-002) (from .f2py_get_compiler_CC) - Fortran compiler: fort ('Compaq Fortran' V1.0-920) (from .f2py_get_compiler_FC) - Linker: fort ('Compaq Fortran' V1.0-920) (from .f2py_get_compiler_LD) -************************************************************************** -Linux 2.2.14-15mdk #1 Tue Jan 4 22:24:20 CET 2000 i686 unknown: -Callback function returning logical*8 (id=55) ... failed - C compiler: cc ('gcc 2.x.x' 2.95.2) - Fortran compiler: f90 ('Absoft F90' 3.0) - Linker: ld ('GNU ld' 2.9.5) -************************************************************************** -IRIX64 6.5 04151556 IP30: -Testing integer, intent(inout) ...failed # not f2py problem -Testing integer, intent(inout,out) ...failed -Testing integer*1, intent(inout) ...failed -Testing integer*1, intent(inout,out) ...failed -Testing integer*8, intent(inout) ...failed -Testing integer*8, intent(inout,out) ...failed -cc-1140 cc: WARNING File = genmodule.c, Line = 114 - A value of type "void *" cannot be used to initialize an entity of type - "void (*)()". - {"foo",-1,{-1},0,(char *)F_FUNC(foo,FOO),(void *)gen_foo,doc_gen_foo}, - C compiler: cc ('MIPSpro 7 Compilers' 7.30) - Fortran compiler: f77 ('MIPSpro 7 Compilers' 7.30) - Linker: ld ('Linker for MIPSpro 7 Compilers' 7.30.) diff --git a/numpy/f2py/Makefile b/numpy/f2py/Makefile deleted file mode 100644 index 4e53ac471..000000000 --- a/numpy/f2py/Makefile +++ /dev/null @@ -1,173 +0,0 @@ -# Makefile for f2py2e -# -# Use GNU make for making. -# $Revision: 1.46 $ -# $Date: 2005/01/30 17:22:55 $ -# Pearu Peterson - -PYTHON=python -MAJOR=2 -F2PY2E_CVSROOT=:pserver:anonymous@cens.ioc.ee:/home/cvs -SCIPY_CVSROOT=:pserver:anonymous@numpy.org:/home/cvsroot - -UPLOADCMD = scp -r -UPLOADDIR = pearu@kev.ioc.ee:/net/cens/home/www/unsecure/projects/f2py2e/ - -REV=`python -c 'from __version__ import *;print version'` -SCIPY_DISTUTILS_REV=`cd numpy_distutils && $(PYTHON) -c 'from numpy_distutils_version import *;print numpy_distutils_version' && cd ..` - -SRC_FILES = F2PY-$(MAJOR)-latest.tar.gz numpy_distutils-latest.tar.gz F2PY-$(MAJOR)-latest.win32.exe numpy_distutils-latest.win32.exe - -HTML_FILES = index.html FAQ.html HISTORY.html THANKS.html TESTING.html OLDNEWS.html -FAQ_DEPS = simple.f pytest.py pyforttest.pyf simple_session.dat -README_DEPS = hello.f -UG_FILES = index.html f2py_usersguide.pdf -UG_FILES_DEP = $(shell cd docs/usersguide && ls *.{f,f90,dat,pyf,py}) - -WWW_SRC_FILES = $(SRC_FILES:%=upload/www/$(MAJOR).x/%) -WWW_WEB_FILES = $(HTML_FILES:%=upload/www/%) $(README_DEPS:%=upload/www/%) -WWW_UG_FILES = $(UG_FILES:%=upload/www/usersguide/%) $(UG_FILES_DEP:%=upload/www/usersguide/%) - -TMP_WEB_FILES = $(HTML_FILES:%=upload/tmp/%) $(README_DEPS:%=upload/tmp/%) - -############################################################################## - -all: - @echo "Use 'make install' to install f2py" - @echo "Use 'make generate' to build f2py docs to upload/tmp" -install: - $(PYTHON) setup.py install -test: - cd tests && $(PYTHON) run_all.py - -############################################################################## -# Create F2PY tar-balls -############################################################################## -f2py2e: - test -d f2py2e && (cd f2py2e && cvs -d $(F2PY2E_CVSROOT) -z7 update -Pd && cd -) || cvs -d $(F2PY2E_CVSROOT) checkout f2py2e - -upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.tar.gz: f2py2e - cd f2py2e && python setup.py sdist -f - mkdir -p upload/tmp/$(MAJOR).x - cp f2py2e/dist/F2PY-$(REV).tar.gz upload/tmp/$(MAJOR).x - ln -sf F2PY-$(REV).tar.gz F2PY-$(MAJOR)-latest.tar.gz - mv F2PY-$(MAJOR)-latest.tar.gz upload/tmp/$(MAJOR).x -upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.win32.exe: f2py2e - cd f2py2e && python setup.py bdist_wininst - mkdir -p upload/tmp/$(MAJOR).x - cp f2py2e/dist/F2PY-$(REV).win32.exe upload/tmp/$(MAJOR).x - ln -sf F2PY-$(REV).win32.exe F2PY-$(MAJOR)-latest.win32.exe - mv F2PY-$(MAJOR)-latest.win32.exe upload/tmp/$(MAJOR).x -f2py2e_latest: upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.tar.gz upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.win32.exe - -############################################################################## -# Create Scipy_distutils tar-balls -############################################################################## - -numpy_distutils: - test -d numpy_distutils && (cd numpy_distutils && cvs -d $(SCIPY_CVSROOT) -z7 update -Pd && cd -) || cvs -d $(SCIPY_CVSROOT) checkout numpy_distutils - -upload/tmp/$(MAJOR).x/numpy_distutils-latest.tar.gz: numpy_distutils - cd numpy_distutils && python setup.py sdist -f - mkdir -p upload/tmp/$(MAJOR).x - cp numpy_distutils/dist/numpy_distutils-$(SCIPY_DISTUTILS_REV).tar.gz upload/tmp/$(MAJOR).x - ln -sf numpy_distutils-$(SCIPY_DISTUTILS_REV).tar.gz numpy_distutils-latest.tar.gz - mv numpy_distutils-latest.tar.gz upload/tmp/$(MAJOR).x -upload/tmp/$(MAJOR).x/numpy_distutils-latest.win32.exe: numpy_distutils - cd numpy_distutils && python setup.py bdist_wininst - mkdir -p upload/tmp/$(MAJOR).x - cp numpy_distutils/dist/numpy_distutils-$(SCIPY_DISTUTILS_REV).win32.exe upload/tmp/$(MAJOR).x - ln -sf numpy_distutils-$(SCIPY_DISTUTILS_REV).win32.exe numpy_distutils-latest.win32.exe - mv numpy_distutils-latest.win32.exe upload/tmp/$(MAJOR).x - -numpy_distutils_latest: upload/tmp/$(MAJOR).x/numpy_distutils-latest.tar.gz upload/tmp/$(MAJOR).x/numpy_distutils-latest.win32.exe - -latest: f2py2e_latest numpy_distutils_latest - -############################################################################## -# Upload files. -############################################################################## - -upload/www/$(MAJOR).x/F2PY-$(MAJOR)-latest.tar.gz: upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.tar.gz - -mkdir -p `dirname $@` - cp -P upload/tmp/$(MAJOR).x/F2PY-{$(MAJOR)-latest,$(REV)}.tar.gz upload/www/$(MAJOR).x - $(UPLOADCMD) upload/tmp/$(MAJOR).x/F2PY-{$(MAJOR)-latest,$(REV)}.tar.gz $(UPLOADDIR)/$(MAJOR).x/ -upload/www/$(MAJOR).x/numpy_distutils-latest.tar.gz: upload/tmp/$(MAJOR).x/numpy_distutils-latest.tar.gz - -mkdir -p `dirname $@` - cp -P upload/tmp/$(MAJOR).x/numpy_distutils-{latest,$(SCIPY_DISTUTILS_REV)}.tar.gz upload/www/$(MAJOR).x/ - $(UPLOADCMD) upload/tmp/$(MAJOR).x/numpy_distutils-{latest,$(SCIPY_DISTUTILS_REV)}.tar.gz $(UPLOADDIR)/$(MAJOR).x -upload/www/$(MAJOR).x/F2PY-$(MAJOR)-latest.win32.exe: upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.win32.exe - -mkdir -p `dirname $@` - cp -P upload/tmp/$(MAJOR).x/F2PY-{$(MAJOR)-latest,$(REV)}.win32.exe upload/www/$(MAJOR).x - $(UPLOADCMD) upload/tmp/$(MAJOR).x/F2PY-{$(MAJOR)-latest,$(REV)}.win32.exe $(UPLOADDIR)/$(MAJOR).x/ -upload/www/$(MAJOR).x/numpy_distutils-latest.win32.exe: upload/tmp/$(MAJOR).x/numpy_distutils-latest.win32.exe - -mkdir -p `dirname $@` - cp -P upload/tmp/$(MAJOR).x/numpy_distutils-{latest,$(SCIPY_DISTUTILS_REV)}.win32.exe upload/www/$(MAJOR).x - $(UPLOADCMD) upload/tmp/$(MAJOR).x/numpy_distutils-{latest,$(SCIPY_DISTUTILS_REV)}.win32.exe $(UPLOADDIR)/$(MAJOR).x/ - -upload/tmp/usersguide/index.html: docs/usersguide/index.txt $(UG_FILES_DEP:%=upload/www/usersguide/%) - -mkdir -p upload/tmp/usersguide - rest2html $< $@ -upload/tmp/usersguide/f2py_usersguide.tex: docs/usersguide/index.txt $(UG_FILES_DEP:%=upload/www/usersguide/%) - -mkdir -p upload/tmp/usersguide - rest2latex $< $@ -upload/tmp/usersguide/f2py_usersguide.pdf: upload/tmp/usersguide/f2py_usersguide.tex - cd `dirname $@` && pdflatex `basename $<` -upload/tmp/usersguide/%.f: docs/usersguide/%.f - -mkdir -p upload/tmp/usersguide - cp $< $@ -upload/tmp/usersguide/%.f90: docs/usersguide/%.f90 - -mkdir -p upload/tmp/usersguide - cp $< $@ -upload/tmp/usersguide/%.dat: docs/usersguide/%.dat - -mkdir -p upload/tmp/usersguide - cp $< $@ -upload/tmp/usersguide/%.pyf: docs/usersguide/%.pyf - -mkdir -p upload/tmp/usersguide - cp $< $@ -upload/tmp/usersguide/%.py: docs/usersguide/%.py - -mkdir -p upload/tmp/usersguide - cp $< $@ -upload/www/usersguide/%: upload/tmp/usersguide/% - -mkdir -p `dirname $@` - cp -P $< $@ - $(UPLOADCMD) $@ $(UPLOADDIR)/usersguide - -upload/tmp/FAQ.html: docs/FAQ.txt $(FAQ_DEPS:%=docs/%) - -mkdir -p upload/tmp - rest2html $< $@ -upload/tmp/index.html: docs/README.txt $(README_DEPS:%=docs/%) - -mkdir -p upload/tmp - rest2html $< $@ -upload/tmp/%.f: docs/%.f - -mkdir -p upload/tmp - cp $< $@ -upload/tmp/%.html: docs/%.txt - -mkdir -p upload/tmp - rest2html $< $@ -upload/www/%: upload/tmp/% - -mkdir -p `dirname $@` - cp -P $< $@ - $(UPLOADCMD) $@ $(UPLOADDIR)/ - -upload_web: $(WWW_WEB_FILES) -upload_ug: $(WWW_UG_FILES) -upload_src: $(WWW_SRC_FILES) -upload: upload_src upload_ug upload_web - -generate_web: $(TMP_WEB_FILES) -generate: generate_web - -############################################################################## -# Clean up -############################################################################## -clean: - rm -f {tests/,tests/{f77,f90,mixed}/,docs/,docs/usersguide/,}*.{o,a,so,sl,pyc} - rm -f {tests/,tests/{f77,f90,mixed}/,docs/,docs/usersguide/,}*~ -distclean: clean - rm -f {tests/,src/,}*~ - rm -f tests/*.{f,f90} - rm -rf dist {docs/,docs/usersguide/,}build f2py2e numpy_distutils upload - rm -f MANIFEST f2py?.? f2py - -.PHONY: install test diff --git a/numpy/f2py/NEWS.txt b/numpy/f2py/NEWS.txt deleted file mode 100644 index a4a254405..000000000 --- a/numpy/f2py/NEWS.txt +++ /dev/null @@ -1,2 +0,0 @@ - -Read docs/HISTORY.txt \ No newline at end of file diff --git a/numpy/f2py/README.txt b/numpy/f2py/README.txt deleted file mode 100644 index ebe7e8c88..000000000 --- a/numpy/f2py/README.txt +++ /dev/null @@ -1,5 +0,0 @@ -====================================================================== - F2PY - Fortran to Python Interface Generator -====================================================================== - -Read docs/README.txt diff --git a/numpy/f2py/TODO.txt b/numpy/f2py/TODO.txt deleted file mode 100644 index 093f0119e..000000000 --- a/numpy/f2py/TODO.txt +++ /dev/null @@ -1,67 +0,0 @@ -Determine fixed/free format Fortran 90 dialect from the -contents of Fortran files. See numpy_distutils/command/build_flib.py. - -[DONE] -======================================================================== -Wrapping F90 code as follows: - -subroutine foo -print*,"In foo" -end subroutine foo -subroutine bar(func) - interface aa ! bug: this interface block is ignored - subroutine foo - end subroutine foo - end interface - !external foo - external func - call func(foo) -end subroutine bar -subroutine gun(a) - external a - call a() -end subroutine gun -subroutine fun - call bar(gun) -end subroutine fun - -========================================================================= -Users Guide needs major revision. - -[DONE] -========================================================================= -On Thu, 27 Sep 2001, José Luis Gómez Dans wrote: - -> Hi, -> just one question: does f2py supporte derived types in F90 code? -> Stuff like something%or and things like that. - -Not yet. - -========================================================================= -Date: Tue, 28 Aug 2001 22:23:04 -0700 -From: Patrick LeGresley -To: f2py-users@cens.ioc.ee -Subject: [f2py] Strange initialization of allocatable arrays - -I've noticed an odd behavior when setting an allocatable, multidimensional -array in a module. If the rank of the array is odd, the initialization is -fine. However, if the rank is even only the first element of the array is -set properly. See the attached sample code for example. - -========================================================================= -On Wed, 22 Aug 2001, Patrick LeGresley wrote: - -> I've noticed that if a parameter is defined in terms of another parameter, -> that the parameter is replaced not by a number but by another parameter -> (try the attached subroutine for example). Is there any way to have f2py -> automatically recognize the dependencies and generate a signature file -> without parameter variables ? - -It is certainly possible. In fact, f2py has only a basic support for -PARAMETER statements and it fails in your 'advanced' example to produce a -robust signature file. -I am sorry but you have to wait until I'll get back from my travel tour -(somewhere in the middle of September) and get a chance to work on it. - -[DONE] diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py deleted file mode 100644 index 09b0c6e65..000000000 --- a/numpy/f2py/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python - -__all__ = ['run_main','compile','f2py_testing'] - -import os -import sys -import commands - -from info import __doc__ - -import f2py2e -run_main = f2py2e.run_main -main = f2py2e.main -import f2py_testing - -def compile(source, - modulename = 'untitled', - extra_args = '', - verbose = 1, - source_fn = None - ): - ''' Build extension module from processing source with f2py. - Read the source of this function for more information. - ''' - from numpy.distutils.exec_command import exec_command - import tempfile - if source_fn is None: - fname = os.path.join(tempfile.mktemp()+'.f') - else: - fname = source_fn - - f = open(fname,'w') - f.write(source) - f.close() - - args = ' -c -m %s %s %s'%(modulename,fname,extra_args) - c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable,args) - s,o = exec_command(c) - if source_fn is None: - try: os.remove(fname) - except OSError: pass - return s diff --git a/numpy/f2py/__version__.py b/numpy/f2py/__version__.py deleted file mode 100644 index 88a39f378..000000000 --- a/numpy/f2py/__version__.py +++ /dev/null @@ -1,8 +0,0 @@ -major = 2 - -try: - from __svn_version__ import version - version_info = (major, version) - version = '%s_%s' % version_info -except ImportError: - version = str(major) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py deleted file mode 100644 index e033e5630..000000000 --- a/numpy/f2py/auxfuncs.py +++ /dev/null @@ -1,664 +0,0 @@ -#!/usr/bin/env python -""" - -Auxiliary functions for f2py2e. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) LICENSE. - - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/07/24 19:01:55 $ -Pearu Peterson -""" -__version__ = "$Revision: 1.65 $"[10:-1] - -import __version__ -f2py_version = __version__.version - -import pprint -import sys -import time -import types -import os -import cfuncs - - -errmess=sys.stderr.write -#outmess=sys.stdout.write -show=pprint.pprint - -options={} -debugoptions=[] -wrapfuncs = 1 - -def outmess(t): - if options.get('verbose',1): - sys.stdout.write(t) - -def debugcapi(var): - return 'capi' in debugoptions - -def _isstring(var): - return 'typespec' in var and var['typespec']=='character' and (not isexternal(var)) - -def isstring(var): - return _isstring(var) and not isarray(var) - -def ischaracter(var): - return isstring(var) and 'charselector' not in var - -def isstringarray(var): - return isarray(var) and _isstring(var) - -def isarrayofstrings(var): - # leaving out '*' for now so that - # `character*(*) a(m)` and `character a(m,*)` - # are treated differently. Luckily `character**` is illegal. - return isstringarray(var) and var['dimension'][-1]=='(*)' - -def isarray(var): - return 'dimension' in var and (not isexternal(var)) - -def isscalar(var): - return not (isarray(var) or isstring(var) or isexternal(var)) - -def iscomplex(var): - return isscalar(var) and var.get('typespec') in ['complex','double complex'] - -def islogical(var): - return isscalar(var) and var.get('typespec')=='logical' - -def isinteger(var): - return isscalar(var) and var.get('typespec')=='integer' - -def isreal(var): - return isscalar(var) and var.get('typespec')=='real' - -def get_kind(var): - try: - return var['kindselector']['*'] - except KeyError: - try: - return var['kindselector']['kind'] - except KeyError: - pass - -def islong_long(var): - if not isscalar(var): - return 0 - if var.get('typespec') not in ['integer','logical']: - return 0 - return get_kind(var)=='8' - -def isunsigned_char(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-1' - -def isunsigned_short(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-2' - -def isunsigned(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-4' - -def isunsigned_long_long(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-8' - -def isdouble(var): - if not isscalar(var): - return 0 - if not var.get('typespec')=='real': - return 0 - return get_kind(var)=='8' - -def islong_double(var): - if not isscalar(var): - return 0 - if not var.get('typespec')=='real': - return 0 - return get_kind(var)=='16' - -def islong_complex(var): - if not iscomplex(var): - return 0 - return get_kind(var)=='32' - -def iscomplexarray(var): - return isarray(var) and var.get('typespec') in ['complex','double complex'] - -def isint1array(var): - return isarray(var) and var.get('typespec')=='integer' \ - and get_kind(var)=='1' - -def isunsigned_chararray(var): - return isarray(var) and var.get('typespec')=='integer' and get_kind(var)=='-1' - -def isunsigned_shortarray(var): - return isarray(var) and var.get('typespec')=='integer' and get_kind(var)=='-2' - -def isunsignedarray(var): - return isarray(var) and var.get('typespec')=='integer' and get_kind(var)=='-4' - -def isunsigned_long_longarray(var): - return isarray(var) and var.get('typespec')=='integer' and get_kind(var)=='-8' - -def isallocatable(var): - return 'attrspec' in var and 'allocatable' in var['attrspec'] - -def ismutable(var): - return not (not 'dimension' in var or isstring(var)) - -def ismoduleroutine(rout): - return 'modulename' in rout - -def ismodule(rout): - return ('block' in rout and 'module'==rout['block']) - -def isfunction(rout): - return ('block' in rout and 'function'==rout['block']) - -#def isfunction_wrap(rout): -# return wrapfuncs and (iscomplexfunction(rout) or isstringfunction(rout)) and (not isexternal(rout)) - -def isfunction_wrap(rout): - if isintent_c(rout): - return 0 - return wrapfuncs and isfunction(rout) and (not isexternal(rout)) - -def issubroutine(rout): - return ('block' in rout and 'subroutine'==rout['block']) - -def isroutine(rout): - return isfunction(rout) or issubroutine(rout) - -def islogicalfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return islogical(rout['vars'][a]) - return 0 - -def islong_longfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return islong_long(rout['vars'][a]) - return 0 - -def islong_doublefunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return islong_double(rout['vars'][a]) - return 0 - -def iscomplexfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return iscomplex(rout['vars'][a]) - return 0 - -def iscomplexfunction_warn(rout): - if iscomplexfunction(rout): - outmess("""\ - ************************************************************** - Warning: code with a function returning complex value - may not work correctly with your Fortran compiler. - Run the following test before using it in your applications: - $(f2py install dir)/test-site/{b/runme_scalar,e/runme} - When using GNU gcc/g77 compilers, codes should work correctly. - **************************************************************\n""") - return 1 - return 0 - -def isstringfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return isstring(rout['vars'][a]) - return 0 - -def hasexternals(rout): - return 'externals' in rout and rout['externals'] - -def isthreadsafe(rout): - return 'f2pyenhancements' in rout and 'threadsafe' in rout['f2pyenhancements'] - -def hasvariables(rout): - return 'vars' in rout and rout['vars'] - -def isoptional(var): - return ('attrspec' in var and 'optional' in var['attrspec'] and 'required' not in var['attrspec']) and isintent_nothide(var) - -def isexternal(var): - return ('attrspec' in var and 'external' in var['attrspec']) - -def isrequired(var): - return not isoptional(var) and isintent_nothide(var) - -def isintent_in(var): - if 'intent' not in var: - return 1 - if 'hide' in var['intent']: - return 0 - if 'inplace' in var['intent']: - return 0 - if 'in' in var['intent']: - return 1 - if 'out' in var['intent']: - return 0 - if 'inout' in var['intent']: - return 0 - if 'outin' in var['intent']: - return 0 - return 1 - -def isintent_inout(var): - return 'intent' in var and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent'] - -def isintent_out(var): - return 'out' in var.get('intent',[]) - -def isintent_hide(var): - return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout,isintent_inplace)(var))))) - -def isintent_nothide(var): - return not isintent_hide(var) - -def isintent_c(var): - return 'c' in var.get('intent',[]) - -# def isintent_f(var): -# return not isintent_c(var) - -def isintent_cache(var): - return 'cache' in var.get('intent',[]) - -def isintent_copy(var): - return 'copy' in var.get('intent',[]) - -def isintent_overwrite(var): - return 'overwrite' in var.get('intent',[]) - -def isintent_callback(var): - return 'callback' in var.get('intent',[]) - -def isintent_inplace(var): - return 'inplace' in var.get('intent',[]) - -def isintent_aux(var): - return 'aux' in var.get('intent',[]) - -isintent_dict = {isintent_in:'INTENT_IN',isintent_inout:'INTENT_INOUT', - isintent_out:'INTENT_OUT',isintent_hide:'INTENT_HIDE', - isintent_cache:'INTENT_CACHE', - isintent_c:'INTENT_C',isoptional:'OPTIONAL', - isintent_inplace:'INTENT_INPLACE' - } - -def isprivate(var): - return 'attrspec' in var and 'private' in var['attrspec'] - -def hasinitvalue(var): - return '=' in var - -def hasinitvalueasstring(var): - if not hasinitvalue(var): - return 0 - return var['='][0] in ['"',"'"] - -def hasnote(var): - return 'note' in var - -def hasresultnote(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return hasnote(rout['vars'][a]) - return 0 - -def hascommon(rout): - return 'common' in rout - -def containscommon(rout): - if hascommon(rout): - return 1 - if hasbody(rout): - for b in rout['body']: - if containscommon(b): - return 1 - return 0 - -def containsmodule(block): - if ismodule(block): - return 1 - if not hasbody(block): - return 0 - for b in block['body']: - if containsmodule(b): - return 1 - return 0 - -def hasbody(rout): - return 'body' in rout - -def hascallstatement(rout): - return getcallstatement(rout) is not None - -def istrue(var): - return 1 - -def isfalse(var): - return 0 - -class F2PYError(Exception): - pass - -class throw_error: - def __init__(self,mess): - self.mess = mess - def __call__(self,var): - mess = '\n\n var = %s\n Message: %s\n' % (var,self.mess) - raise F2PYError,mess - -def l_and(*f): - l,l2='lambda v',[] - for i in range(len(f)): - l='%s,f%d=f[%d]'%(l,i,i) - l2.append('f%d(v)'%(i)) - return eval('%s:%s'%(l,' and '.join(l2))) - -def l_or(*f): - l,l2='lambda v',[] - for i in range(len(f)): - l='%s,f%d=f[%d]'%(l,i,i) - l2.append('f%d(v)'%(i)) - return eval('%s:%s'%(l,' or '.join(l2))) - -def l_not(f): - return eval('lambda v,f=f:not f(v)') - -def isdummyroutine(rout): - try: - return rout['f2pyenhancements']['fortranname']=='' - except KeyError: - return 0 - -def getfortranname(rout): - try: - name = rout['f2pyenhancements']['fortranname'] - if name=='': - raise KeyError - if not name: - errmess('Failed to use fortranname from %s\n'%(rout['f2pyenhancements'])) - raise KeyError - except KeyError: - name = rout['name'] - return name - -def getmultilineblock(rout,blockname,comment=1,counter=0): - try: - r = rout['f2pyenhancements'].get(blockname) - except KeyError: - return - if not r: return - if counter>0 and type(r) is type(''): - return - if type(r) is type([]): - if counter>=len(r): return - r = r[counter] - if r[:3]=="'''": - if comment: - r = '\t/* start ' + blockname + ' multiline ('+`counter`+') */\n' + r[3:] - else: - r = r[3:] - if r[-3:]=="'''": - if comment: - r = r[:-3] + '\n\t/* end multiline ('+`counter`+')*/' - else: - r = r[:-3] - else: - errmess("%s multiline block should end with `'''`: %s\n" \ - % (blockname,repr(r))) - return r - -def getcallstatement(rout): - return getmultilineblock(rout,'callstatement') - -def getcallprotoargument(rout,cb_map={}): - r = getmultilineblock(rout,'callprotoargument',comment=0) - if r: return r - if hascallstatement(rout): - outmess('warning: callstatement is defined without callprotoargument\n') - return - from capi_maps import getctype - arg_types,arg_types2 = [],[] - if l_and(isstringfunction,l_not(isfunction_wrap))(rout): - arg_types.extend(['char*','size_t']) - for n in rout['args']: - var = rout['vars'][n] - if isintent_callback(var): - continue - if n in cb_map: - ctype = cb_map[n]+'_typedef' - else: - ctype = getctype(var) - if l_and(isintent_c,l_or(isscalar,iscomplex))(var): - pass - elif isstring(var): - pass - #ctype = 'void*' - else: - ctype = ctype+'*' - if isstring(var) or isarrayofstrings(var): - arg_types2.append('size_t') - arg_types.append(ctype) - - proto_args = ','.join(arg_types+arg_types2) - if not proto_args: - proto_args = 'void' - #print proto_args - return proto_args - -def getusercode(rout): - return getmultilineblock(rout,'usercode') - -def getusercode1(rout): - return getmultilineblock(rout,'usercode',counter=1) - -def getpymethoddef(rout): - return getmultilineblock(rout,'pymethoddef') - -def getargs(rout): - sortargs,args=[],[] - if 'args' in rout: - args=rout['args'] - if 'sortvars' in rout: - for a in rout['sortvars']: - if a in args: sortargs.append(a) - for a in args: - if a not in sortargs: - sortargs.append(a) - else: sortargs=rout['args'] - return args,sortargs - -def getargs2(rout): - sortargs,args=[],rout.get('args',[]) - auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\ - and a not in args] - args = auxvars + args - if 'sortvars' in rout: - for a in rout['sortvars']: - if a in args: sortargs.append(a) - for a in args: - if a not in sortargs: - sortargs.append(a) - else: sortargs=auxvars + rout['args'] - return args,sortargs - -def getrestdoc(rout): - if 'f2pymultilines' not in rout: - return None - k = None - if rout['block']=='python module': - k = rout['block'],rout['name'] - return rout['f2pymultilines'].get(k,None) - -def gentitle(name): - l=(80-len(name)-6)/2 - return '/*%s %s %s*/'%(l*'*',name,l*'*') - -def flatlist(l): - if type(l)==types.ListType: - return reduce(lambda x,y,f=flatlist:x+f(y),l,[]) - return [l] - -def stripcomma(s): - if s and s[-1]==',': return s[:-1] - return s - -def replace(str,dict,defaultsep=''): - if type(dict)==types.ListType: - return map(lambda d,f=replace,sep=defaultsep,s=str:f(s,d,sep),dict) - if type(str)==types.ListType: - return map(lambda s,f=replace,sep=defaultsep,d=dict:f(s,d,sep),str) - for k in 2*dict.keys(): - if k=='separatorsfor': - continue - if 'separatorsfor' in dict and k in dict['separatorsfor']: - sep=dict['separatorsfor'][k] - else: - sep=defaultsep - if type(dict[k])==types.ListType: - str=str.replace('#%s#'%(k),sep.join(flatlist(dict[k]))) - else: - str=str.replace('#%s#'%(k),dict[k]) - return str - -def dictappend(rd,ar): - if type(ar)==types.ListType: - for a in ar: - rd=dictappend(rd,a) - return rd - for k in ar.keys(): - if k[0]=='_': - continue - if k in rd: - if type(rd[k])==types.StringType: - rd[k]=[rd[k]] - if type(rd[k])==types.ListType: - if type(ar[k])==types.ListType: - rd[k]=rd[k]+ar[k] - else: - rd[k].append(ar[k]) - elif type(rd[k])==types.DictType: - if type(ar[k])==types.DictType: - if k=='separatorsfor': - for k1 in ar[k].keys(): - if k1 not in rd[k]: - rd[k][k1]=ar[k][k1] - else: - rd[k]=dictappend(rd[k],ar[k]) - else: - rd[k]=ar[k] - return rd - -def applyrules(rules,dict,var={}): - ret={} - if type(rules)==types.ListType: - for r in rules: - rr=applyrules(r,dict,var) - ret=dictappend(ret,rr) - if '_break' in rr: - break - return ret - if '_check' in rules and (not rules['_check'](var)): - return ret - if 'need' in rules: - res = applyrules({'needs':rules['need']},dict,var) - if 'needs' in res: - cfuncs.append_needs(res['needs']) - - for k in rules.keys(): - if k=='separatorsfor': - ret[k]=rules[k]; continue - if type(rules[k])==types.StringType: - ret[k]=replace(rules[k],dict) - elif type(rules[k])==types.ListType: - ret[k]=[] - for i in rules[k]: - ar=applyrules({k:i},dict,var) - if k in ar: - ret[k].append(ar[k]) - elif k[0]=='_': - continue - elif type(rules[k])==types.DictType: - ret[k]=[] - for k1 in rules[k].keys(): - if type(k1)==types.FunctionType and k1(var): - if type(rules[k][k1])==types.ListType: - for i in rules[k][k1]: - if type(i)==types.DictType: - res=applyrules({'supertext':i},dict,var) - if 'supertext' in res: - i=res['supertext'] - else: i='' - ret[k].append(replace(i,dict)) - else: - i=rules[k][k1] - if type(i)==types.DictType: - res=applyrules({'supertext':i},dict) - if 'supertext' in res: - i=res['supertext'] - else: i='' - ret[k].append(replace(i,dict)) - else: - errmess('applyrules: ignoring rule %s.\n'%`rules[k]`) - if type(ret[k])==types.ListType: - if len(ret[k])==1: - ret[k]=ret[k][0] - if ret[k]==[]: - del ret[k] - return ret diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py deleted file mode 100644 index 3dba288c6..000000000 --- a/numpy/f2py/capi_maps.py +++ /dev/null @@ -1,755 +0,0 @@ -#!/usr/bin/env python -""" - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.60 $"[10:-1] - -import __version__ -f2py_version = __version__.version - -import copy -import re -import os -from auxfuncs import * -from crackfortran import markoutercomma -import cb_rules - -# Numarray and Numeric users should set this False -using_newcore = True - -depargs=[] -lcb_map={} -lcb2_map={} -# forced casting: mainly caused by the fact that Python or Numeric -# C/APIs do not support the corresponding C types. -c2py_map={'double':'float', - 'float':'float', # forced casting - 'long_double':'float', # forced casting - 'char':'int', # forced casting - 'signed_char':'int', # forced casting - 'unsigned_char':'int', # forced casting - 'short':'int', # forced casting - 'unsigned_short':'int', # forced casting - 'int':'int', # (forced casting) - 'long':'int', - 'long_long':'long', - 'unsigned':'int', # forced casting - 'complex_float':'complex', # forced casting - 'complex_double':'complex', - 'complex_long_double':'complex', # forced casting - 'string':'string', - } -c2capi_map={'double':'PyArray_DOUBLE', - 'float':'PyArray_FLOAT', - 'long_double':'PyArray_DOUBLE', # forced casting - 'char':'PyArray_CHAR', - 'unsigned_char':'PyArray_UBYTE', - 'signed_char':'PyArray_SBYTE', - 'short':'PyArray_SHORT', - 'unsigned_short':'PyArray_USHORT', - 'int':'PyArray_INT', - 'unsigned':'PyArray_UINT', - 'long':'PyArray_LONG', - 'long_long':'PyArray_LONG', # forced casting - 'complex_float':'PyArray_CFLOAT', - 'complex_double':'PyArray_CDOUBLE', - 'complex_long_double':'PyArray_CDOUBLE', # forced casting - 'string':'PyArray_CHAR'} - -#These new maps aren't used anyhere yet, but should be by default -# unless building numeric or numarray extensions. -if using_newcore: - c2capi_map={'double':'PyArray_DOUBLE', - 'float':'PyArray_FLOAT', - 'long_double':'PyArray_LONGDOUBLE', - 'char':'PyArray_BYTE', - 'unsigned_char':'PyArray_UBYTE', - 'signed_char':'PyArray_BYTE', - 'short':'PyArray_SHORT', - 'unsigned_short':'PyArray_USHORT', - 'int':'PyArray_INT', - 'unsigned':'PyArray_UINT', - 'long':'PyArray_LONG', - 'unsigned_long':'PyArray_ULONG', - 'long_long':'PyArray_LONGLONG', - 'unsigned_long_long':'Pyarray_ULONGLONG', - 'complex_float':'PyArray_CFLOAT', - 'complex_double':'PyArray_CDOUBLE', - 'complex_long_double':'PyArray_CDOUBLE', - 'string':'PyArray_CHAR', # f2py 2e is not ready for PyArray_STRING (must set itemisize etc) - #'string':'PyArray_STRING' - - } -c2pycode_map={'double':'d', - 'float':'f', - 'long_double':'d', # forced casting - 'char':'1', - 'signed_char':'1', - 'unsigned_char':'b', - 'short':'s', - 'unsigned_short':'w', - 'int':'i', - 'unsigned':'u', - 'long':'l', - 'long_long':'L', - 'complex_float':'F', - 'complex_double':'D', - 'complex_long_double':'D', # forced casting - 'string':'c' - } -if using_newcore: - c2pycode_map={'double':'d', - 'float':'f', - 'long_double':'g', - 'char':'b', - 'unsigned_char':'B', - 'signed_char':'b', - 'short':'h', - 'unsigned_short':'H', - 'int':'i', - 'unsigned':'I', - 'long':'l', - 'unsigned_long':'L', - 'long_long':'q', - 'unsigned_long_long':'Q', - 'complex_float':'F', - 'complex_double':'D', - 'complex_long_double':'G', - 'string':'S'} -c2buildvalue_map={'double':'d', - 'float':'f', - 'char':'b', - 'signed_char':'b', - 'short':'h', - 'int':'i', - 'long':'l', - 'long_long':'L', - 'complex_float':'N', - 'complex_double':'N', - 'complex_long_double':'N', - 'string':'z'} -if using_newcore: - #c2buildvalue_map=??? - pass - -f2cmap_all={'real':{'':'float','4':'float','8':'double','12':'long_double','16':'long_double'}, - 'integer':{'':'int','1':'signed_char','2':'short','4':'int','8':'long_long', - '-1':'unsigned_char','-2':'unsigned_short','-4':'unsigned', - '-8':'unsigned_long_long'}, - 'complex':{'':'complex_float','8':'complex_float', - '16':'complex_double','24':'complex_long_double', - '32':'complex_long_double'}, - 'complexkind':{'':'complex_float','4':'complex_float', - '8':'complex_double','12':'complex_long_double', - '16':'complex_long_double'}, - 'logical':{'':'int','1':'char','2':'short','4':'int','8':'long_long'}, - 'double complex':{'':'complex_double'}, - 'double precision':{'':'double'}, - 'byte':{'':'char'}, - 'character':{'':'string'} - } - -if os.path.isfile('.f2py_f2cmap'): - # User defined additions to f2cmap_all. - # .f2py_f2cmap must contain a dictionary of dictionaries, only. - # For example, {'real':{'low':'float'}} means that Fortran 'real(low)' is - # interpreted as C 'float'. - # This feature is useful for F90/95 users if they use PARAMETERSs - # in type specifications. - try: - outmess('Reading .f2py_f2cmap ...\n') - f = open('.f2py_f2cmap','r') - d = eval(f.read(),{},{}) - f.close() - for k,d1 in d.items(): - for k1 in d1.keys(): - d1[k1.lower()] = d1[k1] - d[k.lower()] = d[k] - for k in d.keys(): - if k not in f2cmap_all: - f2cmap_all[k]={} - for k1 in d[k].keys(): - if d[k][k1] in c2py_map: - if k1 in f2cmap_all[k]: - outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k,k1,f2cmap_all[k][k1],d[k][k1])) - f2cmap_all[k][k1] = d[k][k1] - outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k,k1,d[k][k1])) - else: - errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k,k1,d[k][k1],d[k][k1],c2py_map.keys())) - outmess('Succesfully applied user defined changes from .f2py_f2cmap\n') - except: - errmess('Failed to apply user defined changes from .f2py_f2cmap. Skipping.\n') -cformat_map={'double':'%g', - 'float':'%g', - 'long_double':'%Lg', - 'char':'%d', - 'signed_char':'%d', - 'unsigned_char':'%hhu', - 'short':'%hd', - 'unsigned_short':'%hu', - 'int':'%d', - 'unsigned':'%u', - 'long':'%ld', - 'unsigned_long':'%lu', - 'long_long':'%ld', - 'complex_float':'(%g,%g)', - 'complex_double':'(%g,%g)', - 'complex_long_double':'(%Lg,%Lg)', - 'string':'%s', - } - -############### Auxiliary functions -def getctype(var): - """ - Determines C type - """ - ctype='void' - if isfunction(var): - if 'result' in var: - a=var['result'] - else: - a=var['name'] - if a in var['vars']: - return getctype(var['vars'][a]) - else: - errmess('getctype: function %s has no return value?!\n'%a) - elif issubroutine(var): - return ctype - elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: - typespec = var['typespec'].lower() - f2cmap=f2cmap_all[typespec] - ctype=f2cmap[''] # default type - if 'kindselector' in var: - if '*' in var['kindselector']: - try: - ctype=f2cmap[var['kindselector']['*']] - except KeyError: - errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'],'*',var['kindselector']['*'])) - elif 'kind' in var['kindselector']: - if typespec+'kind' in f2cmap_all: - f2cmap=f2cmap_all[typespec+'kind'] - try: - ctype=f2cmap[var['kindselector']['kind']] - except KeyError: - if typespec in f2cmap_all: - f2cmap=f2cmap_all[typespec] - try: - ctype=f2cmap[str(var['kindselector']['kind'])] - except KeyError: - errmess('getctype: "%s(kind=%s)" not supported (use .f2py_f2cmap).\n'\ - %(typespec,var['kindselector']['kind'])) - - else: - if not isexternal(var): - errmess('getctype: No C-type found in "%s", assuming void.\n'%var) - return ctype - -def getstrlength(var): - if isstringfunction(var): - if 'result' in var: - a=var['result'] - else: - a=var['name'] - if a in var['vars']: - return getstrlength(var['vars'][a]) - else: - errmess('getstrlength: function %s has no return value?!\n'%a) - if not isstring(var): - errmess('getstrlength: expected a signature of a string but got: %s\n'%(`var`)) - len='1' - if 'charselector' in var: - a=var['charselector'] - if '*' in a: - len=a['*'] - elif 'len' in a: - len=a['len'] - if re.match(r'\(\s*([*]|[:])\s*\)',len) or re.match(r'([*]|[:])',len): - #if len in ['(*)','*','(:)',':']: - if isintent_hide(var): - errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(`var`)) - len='-1' - return len - -def getarrdims(a,var,verbose=0): - global depargs - ret={} - if isstring(var) and not isarray(var): - ret['dims']=getstrlength(var) - ret['size']=ret['dims'] - ret['rank']='1' - elif isscalar(var): - ret['size']='1' - ret['rank']='0' - ret['dims']='' - elif isarray(var): -# if not isintent_c(var): -# var['dimension'].reverse() - dim=copy.copy(var['dimension']) - ret['size']='*'.join(dim) - try: ret['size']=`eval(ret['size'])` - except: pass - ret['dims']=','.join(dim) - ret['rank']=`len(dim)` - ret['rank*[-1]']=`len(dim)*[-1]`[1:-1] - for i in range(len(dim)): # solve dim for dependecies - v=[] - if dim[i] in depargs: v=[dim[i]] - else: - for va in depargs: - if re.match(r'.*?\b%s\b.*'%va,dim[i]): - v.append(va) - for va in v: - if depargs.index(va)>depargs.index(a): - dim[i]='*' - break - ret['setdims'],i='',-1 - for d in dim: - i=i+1 - if d not in ['*',':','(*)','(:)']: - ret['setdims']='%s#varname#_Dims[%d]=%s,'%(ret['setdims'],i,d) - if ret['setdims']: ret['setdims']=ret['setdims'][:-1] - ret['cbsetdims'],i='',-1 - for d in var['dimension']: - i=i+1 - if d not in ['*',':','(*)','(:)']: - ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'],i,d) - elif isintent_in(var): - outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' \ - % (d)) - ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'],i,0) - elif verbose : - errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(`a`,`d`)) - if ret['cbsetdims']: ret['cbsetdims']=ret['cbsetdims'][:-1] -# if not isintent_c(var): -# var['dimension'].reverse() - return ret - -def getpydocsign(a,var): - global lcb_map - if isfunction(var): - if 'result' in var: - af=var['result'] - else: - af=var['name'] - if af in var['vars']: - return getpydocsign(af,var['vars'][af]) - else: - errmess('getctype: function %s has no return value?!\n'%af) - return '','' - sig,sigout=a,a - opt='' - if isintent_in(var): opt='input' - elif isintent_inout(var): opt='in/output' - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4]=='out=': - out_a = k[4:] - break - init='' - ctype=getctype(var) - - if hasinitvalue(var): - init,showinit=getinit(a,var) - init='= %s'%(showinit) - if isscalar(var): - if isintent_inout(var): - sig='%s :%s %s rank-0 array(%s,\'%s\')'%(a,init,opt,c2py_map[ctype], - c2pycode_map[ctype],) - else: - sig='%s :%s %s %s'%(a,init,opt,c2py_map[ctype]) - sigout='%s : %s'%(out_a,c2py_map[ctype]) - elif isstring(var): - if isintent_inout(var): - sig='%s :%s %s rank-0 array(string(len=%s),\'c\')'%(a,init,opt,getstrlength(var)) - else: - sig='%s :%s %s string(len=%s)'%(a,init,opt,getstrlength(var)) - sigout='%s : string(len=%s)'%(out_a,getstrlength(var)) - elif isarray(var): - dim=var['dimension'] - rank=`len(dim)` - sig='%s :%s %s rank-%s array(\'%s\') with bounds (%s)'%(a,init,opt,rank, - c2pycode_map[ctype], - ','.join(dim)) - if a==out_a: - sigout='%s : rank-%s array(\'%s\') with bounds (%s)'\ - %(a,rank,c2pycode_map[ctype],','.join(dim)) - else: - sigout='%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ - %(out_a,rank,c2pycode_map[ctype],','.join(dim),a) - elif isexternal(var): - ua='' - if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: - ua=lcb2_map[lcb_map[a]]['argname'] - if not ua==a: ua=' => %s'%ua - else: ua='' - sig='%s : call-back function%s'%(a,ua) - sigout=sig - else: - errmess('getpydocsign: Could not resolve docsignature for "%s".\\n'%a) - return sig,sigout - -def getarrdocsign(a,var): - ctype=getctype(var) - if isstring(var) and (not isarray(var)): - sig='%s : rank-0 array(string(len=%s),\'c\')'%(a,getstrlength(var)) - elif isscalar(var): - sig='%s : rank-0 array(%s,\'%s\')'%(a,c2py_map[ctype], - c2pycode_map[ctype],) - elif isarray(var): - dim=var['dimension'] - rank=`len(dim)` - sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a,rank, - c2pycode_map[ctype], - ','.join(dim)) - return sig - -def getinit(a,var): - if isstring(var): init,showinit='""',"''" - else: init,showinit='','' - if hasinitvalue(var): - init=var['='] - showinit=init - if iscomplex(var) or iscomplexarray(var): - ret={} - - try: - v = var["="] - if ',' in v: - ret['init.r'],ret['init.i']=markoutercomma(v[1:-1]).split('@,@') - else: - v = eval(v,{},{}) - ret['init.r'],ret['init.i']=str(v.real),str(v.imag) - except: raise 'sign2map: expected complex number `(r,i)\' but got `%s\' as initial value of %s.'%(init,`a`) - if isarray(var): - init='(capi_c.r=%s,capi_c.i=%s,capi_c)'%(ret['init.r'],ret['init.i']) - elif isstring(var): - if not init: init,showinit='""',"''" - if init[0]=="'": - init='"%s"'%(init[1:-1].replace('"','\\"')) - if init[0]=='"': showinit="'%s'"%(init[1:-1]) - return init,showinit - -def sign2map(a,var): - """ - varname,ctype,atype - init,init.r,init.i,pytype - vardebuginfo,vardebugshowvalue,varshowvalue - varrfromat - intent - """ - global lcb_map,cb_map - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4]=='out=': - out_a = k[4:] - break - ret={'varname':a,'outvarname':out_a} - ret['ctype']=getctype(var) - intent_flags = [] - for f,s in isintent_dict.items(): - if f(var): intent_flags.append('F2PY_%s'%s) - if intent_flags: - #XXX: Evaluate intent_flags here. - ret['intent'] = '|'.join(intent_flags) - else: - ret['intent'] = 'F2PY_INTENT_IN' - if isarray(var): ret['varrformat']='N' - elif ret['ctype'] in c2buildvalue_map: - ret['varrformat']=c2buildvalue_map[ret['ctype']] - else: ret['varrformat']='O' - ret['init'],ret['showinit']=getinit(a,var) - if hasinitvalue(var) and iscomplex(var) and not isarray(var): - ret['init.r'],ret['init.i'] = markoutercomma(ret['init'][1:-1]).split('@,@') - if isexternal(var): - ret['cbnamekey']=a - if a in lcb_map: - ret['cbname']=lcb_map[a] - ret['maxnofargs']=lcb2_map[lcb_map[a]]['maxnofargs'] - ret['nofoptargs']=lcb2_map[lcb_map[a]]['nofoptargs'] - ret['cbdocstr']=lcb2_map[lcb_map[a]]['docstr'] - ret['cblatexdocstr']=lcb2_map[lcb_map[a]]['latexdocstr'] - else: - ret['cbname']=a - errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a,lcb_map.keys())) - if isstring(var): - ret['length']=getstrlength(var) - if isarray(var): - ret=dictappend(ret,getarrdims(a,var)) - dim=copy.copy(var['dimension']) - if ret['ctype'] in c2capi_map: - ret['atype']=c2capi_map[ret['ctype']] - # Debug info - if debugcapi(var): - il=[isintent_in,'input',isintent_out,'output', - isintent_inout,'inoutput',isrequired,'required', - isoptional,'optional',isintent_hide,'hidden', - iscomplex,'complex scalar', - l_and(isscalar,l_not(iscomplex)),'scalar', - isstring,'string',isarray,'array', - iscomplexarray,'complex array',isstringarray,'string array', - iscomplexfunction,'complex function', - l_and(isfunction,l_not(iscomplexfunction)),'function', - isexternal,'callback', - isintent_callback,'callback', - isintent_aux,'auxiliary', - #ismutable,'mutable',l_not(ismutable),'immutable', - ] - rl=[] - for i in range(0,len(il),2): - if il[i](var): rl.append(il[i+1]) - if isstring(var): - rl.append('slen(%s)=%s'%(a,ret['length'])) - if isarray(var): -# if not isintent_c(var): -# var['dimension'].reverse() - ddim=','.join(map(lambda x,y:'%s|%s'%(x,y),var['dimension'],dim)) - rl.append('dims(%s)'%ddim) -# if not isintent_c(var): -# var['dimension'].reverse() - if isexternal(var): - ret['vardebuginfo']='debug-capi:%s=>%s:%s'%(a,ret['cbname'],','.join(rl)) - else: - ret['vardebuginfo']='debug-capi:%s %s=%s:%s'%(ret['ctype'],a,ret['showinit'],','.join(rl)) - if isscalar(var): - if ret['ctype'] in cformat_map: - ret['vardebugshowvalue']='debug-capi:%s=%s'%(a,cformat_map[ret['ctype']]) - if isstring(var): - ret['vardebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a,a) - if isexternal(var): - ret['vardebugshowvalue']='debug-capi:%s=%%p'%(a) - if ret['ctype'] in cformat_map: - ret['varshowvalue']='#name#:%s=%s'%(a,cformat_map[ret['ctype']]) - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isstring(var): - ret['varshowvalue']='#name#:slen(%s)=%%d %s=\\"%%s\\"'%(a,a) - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var) - if hasnote(var): - ret['note']=var['note'] - return ret - -def routsign2map(rout): - """ - name,NAME,begintitle,endtitle - rname,ctype,rformat - routdebugshowvalue - """ - global lcb_map - name = rout['name'] - fname = getfortranname(rout) - ret={'name':name, - 'texname':name.replace('_','\\_'), - 'name_lower':name.lower(), - 'NAME':name.upper(), - 'begintitle':gentitle(name), - 'endtitle':gentitle('end of %s'%name), - 'fortranname':fname, - 'FORTRANNAME':fname.upper(), - 'callstatement':getcallstatement(rout) or '', - 'usercode':getusercode(rout) or '', - 'usercode1':getusercode1(rout) or '', - } - if '_' in fname: - ret['F_FUNC'] = 'F_FUNC_US' - else: - ret['F_FUNC'] = 'F_FUNC' - if '_' in name: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' - else: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' - lcb_map={} - if 'use' in rout: - for u in rout['use'].keys(): - if u in cb_rules.cb_map: - for un in cb_rules.cb_map[u]: - ln=un[0] - if 'map' in rout['use'][u]: - for k in rout['use'][u]['map'].keys(): - if rout['use'][u]['map'][k]==un[0]: ln=k;break - lcb_map[ln]=un[1] - #else: - # errmess('routsign2map: cb_map does not contain module "%s" used in "use" statement.\n'%(u)) - elif 'externals' in rout and rout['externals']: - errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'],`rout['externals']`)) - ret['callprotoargument'] = getcallprotoargument(rout,lcb_map) or '' - if isfunction(rout): - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - ret['rname']=a - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,rout) - ret['ctype']=getctype(rout['vars'][a]) - if hasresultnote(rout): - ret['resultnote']=rout['vars'][a]['note'] - rout['vars'][a]['note']=['See elsewhere.'] - if ret['ctype'] in c2buildvalue_map: - ret['rformat']=c2buildvalue_map[ret['ctype']] - else: - ret['rformat']='O' - errmess('routsign2map: no c2buildvalue key for type %s\n'%(`ret['ctype']`)) - if debugcapi(rout): - if ret['ctype'] in cformat_map: - ret['routdebugshowvalue']='debug-capi:%s=%s'%(a,cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['routdebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a,a) - if isstringfunction(rout): - ret['rlength']=getstrlength(rout['vars'][a]) - if ret['rlength']=='-1': - errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n'%(`rout['name']`)) - ret['rlength']='10' - if hasnote(rout): - ret['note']=rout['note'] - rout['note']=['See elsewhere.'] - return ret - -def modsign2map(m): - """ - modulename - """ - if ismodule(m): - ret={'f90modulename':m['name'], - 'F90MODULENAME':m['name'].upper(), - 'texf90modulename':m['name'].replace('_','\\_')} - else: - ret={'modulename':m['name'], - 'MODULENAME':m['name'].upper(), - 'texmodulename':m['name'].replace('_','\\_')} - ret['restdoc'] = getrestdoc(m) or [] - if hasnote(m): - ret['note']=m['note'] - #m['note']=['See elsewhere.'] - ret['usercode'] = getusercode(m) or '' - ret['usercode1'] = getusercode1(m) or '' - if m['body']: - ret['interface_usercode'] = getusercode(m['body'][0]) or '' - else: - ret['interface_usercode'] = '' - ret['pymethoddef'] = getpymethoddef(m) or '' - return ret - -def cb_sign2map(a,var): - ret={'varname':a} - ret['ctype']=getctype(var) - if ret['ctype'] in c2capi_map: - ret['atype']=c2capi_map[ret['ctype']] - if ret['ctype'] in cformat_map: - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isarray(var): - ret=dictappend(ret,getarrdims(a,var)) - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var) - if hasnote(var): - ret['note']=var['note'] - var['note']=['See elsewhere.'] - return ret - -def cb_routsign2map(rout,um): - """ - name,begintitle,endtitle,argname - ctype,rctype,maxnofargs,nofoptargs,returncptr - """ - ret={'name':'cb_%s_in_%s'%(rout['name'],um), - 'returncptr':''} - if isintent_callback(rout): - if '_' in rout['name']: - F_FUNC='F_FUNC_US' - else: - F_FUNC='F_FUNC' - ret['callbackname'] = '%s(%s,%s)' \ - % (F_FUNC, - rout['name'].lower(), - rout['name'].upper(), - ) - ret['static'] = 'extern' - else: - ret['callbackname'] = ret['name'] - ret['static'] = 'static' - ret['argname']=rout['name'] - ret['begintitle']=gentitle(ret['name']) - ret['endtitle']=gentitle('end of %s'%ret['name']) - ret['ctype']=getctype(rout) - ret['rctype']='void' - if ret['ctype']=='string': ret['rctype']='void' - else: - ret['rctype']=ret['ctype'] - if ret['rctype']!='void': - if iscomplexfunction(rout): - ret['returncptr'] = """ -#ifdef F2PY_CB_RETURNCOMPLEX -return_value= -#endif -""" - else: - ret['returncptr'] = 'return_value=' - if ret['ctype'] in cformat_map: - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['strlength']=getstrlength(rout) - if isfunction(rout): - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if hasnote(rout['vars'][a]): - ret['note']=rout['vars'][a]['note'] - rout['vars'][a]['note']=['See elsewhere.'] - ret['rname']=a - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,rout) - if iscomplexfunction(rout): - ret['rctype']=""" -#ifdef F2PY_CB_RETURNCOMPLEX -#ctype# -#else -void -#endif -""" - else: - if hasnote(rout): - ret['note']=rout['note'] - rout['note']=['See elsewhere.'] - nofargs=0 - nofoptargs=0 - if 'args' in rout and 'vars' in rout: - for a in rout['args']: - var=rout['vars'][a] - if l_or(isintent_in,isintent_inout)(var): - nofargs=nofargs+1 - if isoptional(var): - nofoptargs=nofoptargs+1 - ret['maxnofargs']=`nofargs` - ret['nofoptargs']=`nofoptargs` - if hasnote(rout) and isfunction(rout) and 'result' in rout: - ret['routnote']=rout['note'] - rout['note']=['See elsewhere.'] - return ret - -def common_sign2map(a,var): # obsolute - ret={'varname':a} - ret['ctype']=getctype(var) - if isstringarray(var): - ret['ctype']='char' - if ret['ctype'] in c2capi_map: - ret['atype']=c2capi_map[ret['ctype']] - if ret['ctype'] in cformat_map: - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isarray(var): - ret=dictappend(ret,getarrdims(a,var)) - elif isstring(var): - ret['size']=getstrlength(var) - ret['rank']='1' - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var) - if hasnote(var): - ret['note']=var['note'] - var['note']=['See elsewhere.'] - ret['arrdocstr']=getarrdocsign(a,var) # for strings this returns 0-rank but actually is 1-rank - return ret diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py deleted file mode 100644 index 8f6ac101a..000000000 --- a/numpy/f2py/cb_rules.py +++ /dev/null @@ -1,541 +0,0 @@ -#!/usr/bin/env python -""" - -Build call-back mechanism for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/07/20 11:27:58 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.53 $"[10:-1] - -import __version__ -f2py_version = __version__.version - - -import pprint -import sys -import time -import types -import copy -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from auxfuncs import * -import capi_maps -#from capi_maps import * -import cfuncs - -################## Rules for callback function ############## - -cb_routine_rules={ - 'cbtypedefs':'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', - 'body':""" -#begintitle# -PyObject *#name#_capi = NULL;/*was Py_None*/ -PyTupleObject *#name#_args_capi = NULL; -int #name#_nofargs = 0; -jmp_buf #name#_jmpbuf; -/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ -#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { -\tPyTupleObject *capi_arglist = #name#_args_capi; -\tPyObject *capi_return = NULL; -\tPyObject *capi_tmp = NULL; -\tint capi_j,capi_i = 0; -\tint capi_longjmp_ok = 1; -#decl# -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_start_clock(); -#endif -\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); -\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi); -\tif (#name#_capi==NULL) { -\t\tcapi_longjmp_ok = 0; -\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); -\t} -\tif (#name#_capi==NULL) { -\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); -\t\tgoto capi_fail; -\t} -\tif (PyCObject_Check(#name#_capi)) { -\t#name#_typedef #name#_cptr; -\t#name#_cptr = PyCObject_AsVoidPtr(#name#_capi); -\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); -\t#return# -\t} -\tif (capi_arglist==NULL) { -\t\tcapi_longjmp_ok = 0; -\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); -\t\tif (capi_tmp) { -\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); -\t\t\tif (capi_arglist==NULL) { -\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); -\t\t\t\tgoto capi_fail; -\t\t\t} -\t\t} else { -\t\t\tPyErr_Clear(); -\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); -\t\t} -\t} -\tif (capi_arglist == NULL) { -\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); -\t\tgoto capi_fail; -\t} -#setdims# -#pyobjfrom# -\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); -\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_start_call_clock(); -#endif -\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_stop_call_clock(); -#endif -\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return); -\tif (capi_return == NULL) { -\t\tfprintf(stderr,\"capi_return is NULL\\n\"); -\t\tgoto capi_fail; -\t} -\tif (capi_return == Py_None) { -\t\tPy_DECREF(capi_return); -\t\tcapi_return = Py_BuildValue(\"()\"); -\t} -\telse if (!PyTuple_Check(capi_return)) { -\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return); -\t} -\tcapi_j = PyTuple_Size(capi_return); -\tcapi_i = 0; -#frompyobj# -\tCFUNCSMESS(\"cb:#name#:successful\\n\"); -\tPy_DECREF(capi_return); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_stop_clock(); -#endif -\tgoto capi_return_pt; -capi_fail: -\tfprintf(stderr,\"Call-back #name# failed.\\n\"); -\tPy_XDECREF(capi_return); -\tif (capi_longjmp_ok) -\t\tlongjmp(#name#_jmpbuf,-1); -capi_return_pt: -\t; -#return# -} -#endtitle# -""", - 'need':['setjmp.h','CFUNCSMESS'], - 'maxnofargs':'#maxnofargs#', - 'nofoptargs':'#nofoptargs#', - 'docstr':"""\ -\tdef #argname#(#docsignature#): return #docreturn#\\n\\ -#docstrsigns#""", - 'latexdocstr':""" -{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} -#routnote# - -#latexdocstrsigns#""", - 'docstrshort':'def #argname#(#docsignature#): return #docreturn#' - } -cb_rout_rules=[ - {# Init - 'separatorsfor':{'decl':'\n', - 'args':',','optargs':'','pyobjfrom':'\n','freemem':'\n', - 'args_td':',','optargs_td':'', - 'args_nm':',','optargs_nm':'', - 'frompyobj':'\n','setdims':'\n', - 'docstrsigns':'\\n"\n"', - 'latexdocstrsigns':'\n', - 'latexdocstrreq':'\n','latexdocstropt':'\n', - 'latexdocstrout':'\n','latexdocstrcbs':'\n', - }, - 'decl':'/*decl*/','pyobjfrom':'/*pyobjfrom*/','frompyobj':'/*frompyobj*/', - 'args':[],'optargs':'','return':'','strarglens':'','freemem':'/*freemem*/', - 'args_td':[],'optargs_td':'','strarglens_td':'', - 'args_nm':[],'optargs_nm':'','strarglens_nm':'', - 'noargs':'', - 'setdims':'/*setdims*/', - 'docstrsigns':'','latexdocstrsigns':'', - 'docstrreq':'\tRequired arguments:', - 'docstropt':'\tOptional arguments:', - 'docstrout':'\tReturn objects:', - 'docstrcbs':'\tCall-back functions:', - 'docreturn':'','docsign':'','docsignopt':'', - 'latexdocstrreq':'\\noindent Required arguments:', - 'latexdocstropt':'\\noindent Optional arguments:', - 'latexdocstrout':'\\noindent Return objects:', - 'latexdocstrcbs':'\\noindent Call-back functions:', - 'routnote':{hasnote:'--- #note#',l_not(hasnote):''}, - },{ # Function - 'decl':'\t#ctype# return_value;', - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, - '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");', - {debugcapi:'\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'} - ], - 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'},'GETSCALARFROMPYTUPLE'], - 'return':'\treturn return_value;', - '_check':l_and(isfunction,l_not(isstringfunction),l_not(iscomplexfunction)) - }, - {# String function - 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, - 'args':'#ctype# return_value,int return_value_len', - 'args_nm':'return_value,&return_value_len', - 'args_td':'#ctype# ,int', - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->\\"");'}, - """\tif (capi_j>capi_i) -\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""", - {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} - ], - 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'}, - 'string.h','GETSTRFROMPYTUPLE'], - 'return':'return;', - '_check':isstringfunction - }, - {# Complex function - 'optargs':""" -#ifndef F2PY_CB_RETURNCOMPLEX -#ctype# *return_value -#endif -""", - 'optargs_nm':""" -#ifndef F2PY_CB_RETURNCOMPLEX -return_value -#endif -""", - 'optargs_td':""" -#ifndef F2PY_CB_RETURNCOMPLEX -#ctype# * -#endif -""", - 'decl':""" -#ifdef F2PY_CB_RETURNCOMPLEX -\t#ctype# return_value; -#endif -""", - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, - """\ -\tif (capi_j>capi_i) -#ifdef F2PY_CB_RETURNCOMPLEX -\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); -#else -\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); -#endif -""", - {debugcapi:""" -#ifdef F2PY_CB_RETURNCOMPLEX -\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); -#else -\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); -#endif - -"""} - ], - 'return':""" -#ifdef F2PY_CB_RETURNCOMPLEX -\treturn return_value; -#else -\treturn; -#endif -""", - 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'}, - 'string.h','GETSCALARFROMPYTUPLE','#ctype#'], - '_check':iscomplexfunction - }, - {'docstrout':'\t\t#pydocsignout#', - 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}', - {hasnote:'--- #note#'}], - 'docreturn':'#rname#,', - '_check':isfunction}, - {'_check':issubroutine,'return':'return;'} - ] - -cb_arg_rules=[ - { # Doc - 'docstropt':{l_and(isoptional,isintent_nothide):'\t\t#pydocsign#'}, - 'docstrreq':{l_and(isrequired,isintent_nothide):'\t\t#pydocsign#'}, - 'docstrout':{isintent_out:'\t\t#pydocsignout#'}, - 'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote:'--- #note#'}]}, - 'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote:'--- #note#'}]}, - 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote,isintent_hide):'--- #note#', - l_and(hasnote,isintent_nothide):'--- See above.'}]}, - 'docsign':{l_and(isrequired,isintent_nothide):'#varname#,'}, - 'docsignopt':{l_and(isoptional,isintent_nothide):'#varname#,'}, - 'depend':'' - }, - { - 'args':{ - l_and (isscalar,isintent_c):'#ctype# #varname#', - l_and (isscalar,l_not(isintent_c)):'#ctype# *#varname#_cb_capi', - isarray:'#ctype# *#varname#', - isstring:'#ctype# #varname#' - }, - 'args_nm':{ - l_and (isscalar,isintent_c):'#varname#', - l_and (isscalar,l_not(isintent_c)):'#varname#_cb_capi', - isarray:'#varname#', - isstring:'#varname#' - }, - 'args_td':{ - l_and (isscalar,isintent_c):'#ctype#', - l_and (isscalar,l_not(isintent_c)):'#ctype# *', - isarray:'#ctype# *', - isstring:'#ctype#' - }, - 'strarglens':{isstring:',int #varname#_cb_len'}, # untested with multiple args - 'strarglens_td':{isstring:',int'}, # untested with multiple args - 'strarglens_nm':{isstring:',#varname#_cb_len'}, # untested with multiple args - }, - { # Scalars - 'decl':{l_not(isintent_c):'\t#ctype# #varname#=(*#varname#_cb_capi);'}, - 'error': {l_and(isintent_c,isintent_out, - throw_error('intent(c,out) is forbidden for callback scalar arguments')):\ - ''}, - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, - {isintent_out:'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, - {l_and(debugcapi,l_and(l_not(iscomplex),isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",#varname#);'}, - {l_and(debugcapi,l_and(l_not(iscomplex),l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname#_cb_capi);'}, - {l_and(debugcapi,l_and(iscomplex,isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname#).r,(#varname#).i);'}, - {l_and(debugcapi,l_and(iscomplex,l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname#_cb_capi).r,(*#varname#_cb_capi).i);'}, - ], - 'need':[{isintent_out:['#ctype#_from_pyobj','GETSCALARFROMPYTUPLE']}, - {debugcapi:'CFUNCSMESS'}], - '_check':isscalar - },{ - 'pyobjfrom':[{isintent_in:"""\ -\tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname#))) -\t\t\tgoto capi_fail;"""}, - {isintent_inout:"""\ -\tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname#_cb_capi))) -\t\t\tgoto capi_fail;"""}], - 'need':[{isintent_in:'pyobj_from_#ctype#1'}, - {isintent_inout:'pyarr_from_p_#ctype#1'}, - {iscomplex:'#ctype#'}], - '_check':l_and(isscalar,isintent_nothide), - '_optional':'' - },{# String - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->\\"");'}, - """\tif (capi_j>capi_i) -\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname#,#varname#_cb_len);""", - {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname#,#varname#_cb_len);'}, - ], - 'need':['#ctype#','GETSTRFROMPYTUPLE', - {debugcapi:'CFUNCSMESS'},'string.h'], - '_check':l_and(isstring,isintent_out) - },{ - 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname#,#varname#_cb_len);'}, - {isintent_in:"""\ -\tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname#))) -\t\t\tgoto capi_fail;"""}, - {isintent_inout:"""\ -\tif (#name#_nofargs>capi_i) { -\t\tint #varname#_cb_dims[] = {#varname#_cb_len}; -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname#,#varname#_cb_dims))) -\t\t\tgoto capi_fail; -\t}"""}], - 'need':[{isintent_in:'pyobj_from_#ctype#1'}, - {isintent_inout:'pyarr_from_p_#ctype#1'}], - '_check':l_and(isstring,isintent_nothide), - '_optional':'' - }, -# Array ... - { - 'decl':'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - 'setdims':'\t#cbsetdims#;', - '_check':isarray, - '_depend':'' - }, - { - 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, - {isintent_c:"""\ -\tif (#name#_nofargs>capi_i) { -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname#_Dims,#atype#,NULL,(char*)#varname#,0,NPY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ -""", - l_not(isintent_c):"""\ -\tif (#name#_nofargs>capi_i) { -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname#_Dims,#atype#,NULL,(char*)#varname#,0,NPY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ -""", - }, - """ -\t\tif (tmp_arr==NULL) -\t\t\tgoto capi_fail; -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr)) -\t\t\tgoto capi_fail; -}"""], - '_check':l_and(isarray,isintent_nothide,l_or(isintent_in,isintent_inout)), - '_optional':'', - },{ - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, - """\tif (capi_j>capi_i) { -\t\tPyArrayObject *rv_cb_arr = NULL; -\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; -\t\trv_cb_arr = array_from_pyobj(#atype#,#varname#_Dims,#rank#,F2PY_INTENT_IN""", - {isintent_c:'|F2PY_INTENT_C'}, - """,capi_tmp); -\t\tif (rv_cb_arr == NULL) { -\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\"); -\t\t\tgoto capi_fail; -\t\t} -\t\tMEMCOPY(#varname#,rv_cb_arr->data,PyArray_NBYTES(rv_cb_arr)); -\t\tif (capi_tmp != (PyObject *)rv_cb_arr) { -\t\t\tPy_DECREF(rv_cb_arr); -\t\t} -\t}""", - {debugcapi:'\tfprintf(stderr,"<-.\\n");'}, - ], - 'need':['MEMCOPY',{iscomplexarray:'#ctype#'}], - '_check':l_and(isarray,isintent_out) - },{ - 'docreturn':'#varname#,', - '_check':isintent_out - } - ] - -################## Build call-back module ############# -cb_map={} -def buildcallbacks(m): - global cb_map - cb_map[m['name']]=[] - for bi in m['body']: - if bi['block']=='interface': - for b in bi['body']: - if b: - buildcallback(b,m['name']) - else: - errmess('warning: empty body for %s\n' % (m['name'])) - -def buildcallback(rout,um): - global cb_map - outmess('\tConstructing call-back function "cb_%s_in_%s"\n'%(rout['name'],um)) - args,depargs=getargs(rout) - capi_maps.depargs=depargs - var=rout['vars'] - vrd=capi_maps.cb_routsign2map(rout,um) - rd=dictappend({},vrd) - cb_map[um].append([rout['name'],rd['name']]) - for r in cb_rout_rules: - if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar=applyrules(r,vrd,rout) - rd=dictappend(rd,ar) - savevrd={} - for a in args: - vrd=capi_maps.cb_sign2map(a,var[a]) - savevrd[a]=vrd - for r in cb_arg_rules: - if '_depend' in r: - continue - if '_optional' in r and isoptional(var[a]): - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) - if '_break' in r: - break - for a in args: - vrd=savevrd[a] - for r in cb_arg_rules: - if '_depend' in r: - continue - if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) - if '_break' in r: - break - for a in depargs: - vrd=savevrd[a] - for r in cb_arg_rules: - if '_depend' not in r: - continue - if '_optional' in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) - if '_break' in r: - break - if 'args' in rd and 'optargs' in rd: - if type(rd['optargs'])==type([]): - rd['optargs']=rd['optargs']+[""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - rd['optargs_nm']=rd['optargs_nm']+[""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - rd['optargs_td']=rd['optargs_td']+[""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - if type(rd['docreturn'])==types.ListType: - rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']})) - optargs=stripcomma(replace('#docsignopt#', - {'docsignopt':rd['docsignopt']} - )) - if optargs=='': - rd['docsignature']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']})) - else: - rd['docsignature']=replace('#docsign#[#docsignopt#]', - {'docsign':rd['docsign'], - 'docsignopt':optargs, - }) - rd['latexdocsignature']=rd['docsignature'].replace('_','\\_') - rd['latexdocsignature']=rd['latexdocsignature'].replace(',',', ') - rd['docstrsigns']=[] - rd['latexdocstrsigns']=[] - for k in ['docstrreq','docstropt','docstrout','docstrcbs']: - if k in rd and type(rd[k])==types.ListType: - rd['docstrsigns']=rd['docstrsigns']+rd[k] - k='latex'+k - if k in rd and type(rd[k])==types.ListType: - rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ - ['\\begin{description}']+rd[k][1:]+\ - ['\\end{description}'] - if 'args' not in rd: - rd['args']='' - rd['args_td']='' - rd['args_nm']='' - if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): - rd['noargs'] = 'void' - - ar=applyrules(cb_routine_rules,rd) - cfuncs.callbacks[rd['name']]=ar['body'] - if type(ar['need'])==types.StringType: - ar['need']=[ar['need']] - - if 'need' in rd: - for t in cfuncs.typedefs.keys(): - if t in rd['need']: - ar['need'].append(t) - - cfuncs.typedefs_generated[rd['name']+'_typedef'] = ar['cbtypedefs'] - ar['need'].append(rd['name']+'_typedef') - cfuncs.needs[rd['name']]=ar['need'] - - capi_maps.lcb2_map[rd['name']]={'maxnofargs':ar['maxnofargs'], - 'nofoptargs':ar['nofoptargs'], - 'docstr':ar['docstr'], - 'latexdocstr':ar['latexdocstr'], - 'argname':rd['argname'] - } - outmess('\t %s\n'%(ar['docstrshort'])) - #print ar['body'] - return -################## Build call-back function ############# diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py deleted file mode 100644 index 8c6275ae2..000000000 --- a/numpy/f2py/cfuncs.py +++ /dev/null @@ -1,1156 +0,0 @@ -#!/usr/bin/env python -""" - -C declarations, CPP macros, and C functions for f2py2e. -Only required declarations/macros/functions will be used. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 11:42:34 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.75 $"[10:-1] - -import __version__ -f2py_version = __version__.version - -import types,sys,copy,os -errmess=sys.stderr.write - -##################### Definitions ################## - -outneeds={'includes0':[],'includes':[],'typedefs':[],'typedefs_generated':[], - 'userincludes':[], - 'cppmacros':[],'cfuncs':[],'callbacks':[],'f90modhooks':[], - 'commonhooks':[]} -needs={} -includes0={'includes0':'/*need_includes0*/'} -includes={'includes':'/*need_includes*/'} -userincludes={'userincludes':'/*need_userincludes*/'} -typedefs={'typedefs':'/*need_typedefs*/'} -typedefs_generated={'typedefs_generated':'/*need_typedefs_generated*/'} -cppmacros={'cppmacros':'/*need_cppmacros*/'} -cfuncs={'cfuncs':'/*need_cfuncs*/'} -callbacks={'callbacks':'/*need_callbacks*/'} -f90modhooks={'f90modhooks':'/*need_f90modhooks*/', - 'initf90modhooksstatic':'/*initf90modhooksstatic*/', - 'initf90modhooksdynamic':'/*initf90modhooksdynamic*/', - } -commonhooks={'commonhooks':'/*need_commonhooks*/', - 'initcommonhooks':'/*need_initcommonhooks*/', - } - -############ Includes ################### - -includes0['math.h']='#include ' -includes0['string.h']='#include ' -includes0['setjmp.h']='#include ' - -includes['Python.h']='#include "Python.h"' -needs['arrayobject.h']=['Python.h'] -includes['arrayobject.h']='''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API -#include "arrayobject.h"''' - -includes['arrayobject.h']='#include "fortranobject.h"' - -############# Type definitions ############### - -typedefs['unsigned_char']='typedef unsigned char unsigned_char;' -typedefs['unsigned_short']='typedef unsigned short unsigned_short;' -typedefs['unsigned_long']='typedef unsigned long unsigned_long;' -typedefs['signed_char']='typedef signed char signed_char;' -typedefs['long_long']="""\ -#ifdef _WIN32 -typedef __int64 long_long; -#else -typedef long long long_long; -typedef unsigned long long unsigned_long_long; -#endif -""" -typedefs['insinged_long_long']="""\ -#ifdef _WIN32 -typedef __uint64 long_long; -#else -typedef unsigned long long unsigned_long_long; -#endif -""" -typedefs['long_double']="""\ -#ifndef _LONG_DOUBLE -typedef long double long_double; -#endif -""" -typedefs['complex_long_double']='typedef struct {long double r,i;} complex_long_double;' -typedefs['complex_float']='typedef struct {float r,i;} complex_float;' -typedefs['complex_double']='typedef struct {double r,i;} complex_double;' -typedefs['string']="""typedef char * string;""" - - -############### CPP macros #################### -cppmacros['CFUNCSMESS']="""\ -#ifdef DEBUGCFUNCS -#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); -#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ -\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ -\tfprintf(stderr,\"\\n\"); -#else -#define CFUNCSMESS(mess) -#define CFUNCSMESSPY(mess,obj) -#endif -""" -cppmacros['F_FUNC']="""\ -#if defined(PREPEND_FORTRAN) -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) _##F -#else -#define F_FUNC(f,F) _##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) _##F##_ -#else -#define F_FUNC(f,F) _##f##_ -#endif -#endif -#else -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F -#else -#define F_FUNC(f,F) f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F##_ -#else -#define F_FUNC(f,F) f##_ -#endif -#endif -#endif -#if defined(UNDERSCORE_G77) -#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) -#else -#define F_FUNC_US(f,F) F_FUNC(f,F) -#endif -""" -cppmacros['F_WRAPPEDFUNC']="""\ -#if defined(PREPEND_FORTRAN) -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F -#else -#define F_WRAPPEDFUNC(f,F) _f2pywrap##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ -#else -#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ -#endif -#endif -#else -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F -#else -#define F_WRAPPEDFUNC(f,F) f2pywrap##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ -#else -#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ -#endif -#endif -#endif -#if defined(UNDERSCORE_G77) -#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) -#else -#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) -#endif -""" -cppmacros['F_MODFUNC']="""\ -#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f -#else -#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ -#endif -#endif - -#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f -#else -#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ -#endif -#endif - -#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) f ## .in. ## m -#else -#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ -#endif -#endif -/* -#if defined(UPPERCASE_FORTRAN) -#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) -#else -#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) -#endif -*/ - -#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) -""" -cppmacros['SWAPUNSAFE']="""\ -#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ - (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ - (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) -""" -cppmacros['SWAP']="""\ -#define SWAP(a,b,t) {\\ -\tt *c;\\ -\tc = a;\\ -\ta = b;\\ -\tb = c;} -""" -#cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS)' -cppmacros['PRINTPYOBJERR']="""\ -#define PRINTPYOBJERR(obj)\\ -\tfprintf(stderr,\"#modulename#.error is related to \");\\ -\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ -\tfprintf(stderr,\"\\n\"); -""" -cppmacros['MINMAX']="""\ -#ifndef MAX -#define MAX(a,b) ((a > b) ? (a) : (b)) -#endif -#ifndef MIN -#define MIN(a,b) ((a < b) ? (a) : (b)) -#endif -""" -cppmacros['len..']="""\ -#define rank(var) var ## _Rank -#define shape(var,dim) var ## _Dims[dim] -#define old_rank(var) (((PyArrayObject *)(capi_ ## var ## _tmp))->nd) -#define old_shape(var,dim) (((PyArrayObject *)(capi_ ## var ## _tmp))->dimensions[dim]) -#define fshape(var,dim) shape(var,rank(var)-dim-1) -#define len(var) shape(var,0) -#define flen(var) fshape(var,0) -#define size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp)) -/* #define index(i) capi_i ## i */ -#define slen(var) capi_ ## var ## _len -""" - -cppmacros['pyobj_from_char1']='#define pyobj_from_char1(v) (PyInt_FromLong(v))' -cppmacros['pyobj_from_short1']='#define pyobj_from_short1(v) (PyInt_FromLong(v))' -needs['pyobj_from_int1']=['signed_char'] -cppmacros['pyobj_from_int1']='#define pyobj_from_int1(v) (PyInt_FromLong(v))' -cppmacros['pyobj_from_long1']='#define pyobj_from_long1(v) (PyLong_FromLong(v))' -needs['pyobj_from_long_long1']=['long_long'] -cppmacros['pyobj_from_long_long1']="""\ -#ifdef HAVE_LONG_LONG -#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) -#else -#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. -#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) -#endif -""" -needs['pyobj_from_long_double1']=['long_double'] -cppmacros['pyobj_from_long_double1']='#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))' -cppmacros['pyobj_from_double1']='#define pyobj_from_double1(v) (PyFloat_FromDouble(v))' -cppmacros['pyobj_from_float1']='#define pyobj_from_float1(v) (PyFloat_FromDouble(v))' -needs['pyobj_from_complex_long_double1']=['complex_long_double'] -cppmacros['pyobj_from_complex_long_double1']='#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_complex_double1']=['complex_double'] -cppmacros['pyobj_from_complex_double1']='#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_complex_float1']=['complex_float'] -cppmacros['pyobj_from_complex_float1']='#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_string1']=['string'] -cppmacros['pyobj_from_string1']='#define pyobj_from_string1(v) (PyString_FromString((char *)v))' -needs['TRYPYARRAYTEMPLATE']=['PRINTPYOBJERR'] -cppmacros['TRYPYARRAYTEMPLATE']="""\ -/* New SciPy */ -#define TRYPYARRAYTEMPLATECHAR case PyArray_STRING: *(char *)(arr->data)=*v; break; -#define TRYPYARRAYTEMPLATELONG case PyArray_LONG: *(long *)(arr->data)=*v; break; -#define TRYPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data); break; - -#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ - PyArrayObject *arr = NULL;\\ - if (!obj) return -2;\\ - if (!PyArray_Check(obj)) return -1;\\ - if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (arr->descr->type==typecode) {*(ctype *)(arr->data)=*v; return 1;}\\ - switch (arr->descr->type_num) {\\ - case PyArray_DOUBLE: *(double *)(arr->data)=*v; break;\\ - case PyArray_INT: *(int *)(arr->data)=*v; break;\\ - case PyArray_LONG: *(long *)(arr->data)=*v; break;\\ - case PyArray_FLOAT: *(float *)(arr->data)=*v; break;\\ - case PyArray_CDOUBLE: *(double *)(arr->data)=*v; break;\\ - case PyArray_CFLOAT: *(float *)(arr->data)=*v; break;\\ - case PyArray_BOOL: *(npy_bool *)(arr->data)=(*v!=0); break;\\ - case PyArray_UBYTE: *(unsigned char *)(arr->data)=*v; break;\\ - case PyArray_BYTE: *(signed char *)(arr->data)=*v; break;\\ - case PyArray_SHORT: *(short *)(arr->data)=*v; break;\\ - case PyArray_USHORT: *(npy_ushort *)(arr->data)=*v; break;\\ - case PyArray_UINT: *(npy_uint *)(arr->data)=*v; break;\\ - case PyArray_ULONG: *(npy_ulong *)(arr->data)=*v; break;\\ - case PyArray_LONGLONG: *(npy_longlong *)(arr->data)=*v; break;\\ - case PyArray_ULONGLONG: *(npy_ulonglong *)(arr->data)=*v; break;\\ - case PyArray_LONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\ - case PyArray_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\ - case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data, arr); break;\\ - default: return -2;\\ - };\\ - return 1 -""" - -needs['TRYCOMPLEXPYARRAYTEMPLATE']=['PRINTPYOBJERR'] -cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\ -#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break; -#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ - PyArrayObject *arr = NULL;\\ - if (!obj) return -2;\\ - if (!PyArray_Check(obj)) return -1;\\ - if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (arr->descr->type==typecode) {\\ - *(ctype *)(arr->data)=(*v).r;\\ - *(ctype *)(arr->data+sizeof(ctype))=(*v).i;\\ - return 1;\\ - }\\ - switch (arr->descr->type_num) {\\ - case PyArray_CDOUBLE: *(double *)(arr->data)=(*v).r;*(double *)(arr->data+sizeof(double))=(*v).i;break;\\ - case PyArray_CFLOAT: *(float *)(arr->data)=(*v).r;*(float *)(arr->data+sizeof(float))=(*v).i;break;\\ - case PyArray_DOUBLE: *(double *)(arr->data)=(*v).r; break;\\ - case PyArray_LONG: *(long *)(arr->data)=(*v).r; break;\\ - case PyArray_FLOAT: *(float *)(arr->data)=(*v).r; break;\\ - case PyArray_INT: *(int *)(arr->data)=(*v).r; break;\\ - case PyArray_SHORT: *(short *)(arr->data)=(*v).r; break;\\ - case PyArray_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\ - case PyArray_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\ - case PyArray_BOOL: *(npy_bool *)(arr->data)=((*v).r!=0 && (*v).i!=0)); break;\\ - case PyArray_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\ - case PyArray_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\ - case PyArray_SHORT: *(short *)(arr->data)=(*v).r; break;\\ - case PyArray_USHORT: *(npy_ushort *)(arr->data)=(*v).r; break;\\ - case PyArray_UINT: *(npy_uint *)(arr->data)=(*v).r; break;\\ - case PyArray_ULONG: *(npy_ulong *)(arr->data)=(*v).r; break;\\ - case PyArray_LONGLONG: *(npy_longlong *)(arr->data)=(*v).r; break;\\ - case PyArray_ULONGLONG: *(npy_ulonglong *)(arr->data)=(*v).r; break;\\ - case PyArray_LONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r; break;\\ - case PyArray_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r;*(npy_longdouble *)(arr->data+sizeof(npy_longdouble))=(*v).i;break;\\ - case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;\\ - default: return -2;\\ - };\\ - return -1; -""" -## cppmacros['NUMFROMARROBJ']="""\ -## #define NUMFROMARROBJ(typenum,ctype) \\ -## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -## \tif (arr) {\\ -## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\ -## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\ -## \t\t\tgoto capi_fail;\\ -## \t\t} else {\\ -## \t\t\t(arr->descr->cast[typenum])(arr->data,1,(char*)v,1,1);\\ -## \t\t}\\ -## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -## \t\treturn 1;\\ -## \t} -## """ -## #XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ -## cppmacros['CNUMFROMARROBJ']="""\ -## #define CNUMFROMARROBJ(typenum,ctype) \\ -## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -## \tif (arr) {\\ -## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\ -## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\ -## \t\t\tgoto capi_fail;\\ -## \t\t} else {\\ -## \t\t\t(arr->descr->cast[typenum])((void *)(arr->data),1,(void *)(v),1,1);\\ -## \t\t}\\ -## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -## \t\treturn 1;\\ -## \t} -## """ - - -needs['GETSTRFROMPYTUPLE']=['STRINGCOPYN','PRINTPYOBJERR'] -cppmacros['GETSTRFROMPYTUPLE']="""\ -#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ -\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ -\t\tif (rv_cb_str == NULL)\\ -\t\t\tgoto capi_fail;\\ -\t\tif (PyString_Check(rv_cb_str)) {\\ -\t\t\tstr[len-1]='\\0';\\ -\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ -\t\t} else {\\ -\t\t\tPRINTPYOBJERR(rv_cb_str);\\ -\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\ -\t\t\tgoto capi_fail;\\ -\t\t}\\ -\t} -""" -cppmacros['GETSCALARFROMPYTUPLE']="""\ -#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ -\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ -\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ -\t\t\tgoto capi_fail;\\ -\t} -""" - -cppmacros['FAILNULL']="""\\ -#define FAILNULL(p) do { \\ - if ((p) == NULL) { \\ - PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ - goto capi_fail; \\ - } \\ -} while (0) -""" -needs['MEMCOPY']=['string.h', 'FAILNULL'] -cppmacros['MEMCOPY']="""\ -#define MEMCOPY(to,from,n)\\ - do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) -""" -cppmacros['STRINGMALLOC']="""\ -#define STRINGMALLOC(str,len)\\ -\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ -\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ -\t\tgoto capi_fail;\\ -\t} else {\\ -\t\t(str)[len] = '\\0';\\ -\t} -""" -cppmacros['STRINGFREE']="""\ -#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) -""" -needs['STRINGCOPYN']=['string.h', 'FAILNULL'] -cppmacros['STRINGCOPYN']="""\ -#define STRINGCOPYN(to,from,buf_size) \\ - do { \\ - int _m = (buf_size); \\ - char *_to = (to); \\ - char *_from = (from); \\ - FAILNULL(_to); FAILNULL(_from); \\ - (void)strncpy(_to, _from, sizeof(char)*_m); \\ - _to[_m-1] = '\\0'; \\ - /* Padding with spaces instead of nulls */ \\ - for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\ - _to[_m] = ' '; \\ - } \\ - } while (0) -""" -needs['STRINGCOPY']=['string.h', 'FAILNULL'] -cppmacros['STRINGCOPY']="""\ -#define STRINGCOPY(to,from)\\ - do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) -""" -cppmacros['CHECKGENERIC']="""\ -#define CHECKGENERIC(check,tcheck,name) \\ -\tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -cppmacros['CHECKARRAY']="""\ -#define CHECKARRAY(check,tcheck,name) \\ -\tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -cppmacros['CHECKSTRING']="""\ -#define CHECKSTRING(check,tcheck,name,show,var)\\ -\tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\tfprintf(stderr,show\"\\n\",slen(var),var);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -cppmacros['CHECKSCALAR']="""\ -#define CHECKSCALAR(check,tcheck,name,show,var)\\ -\tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\tfprintf(stderr,show\"\\n\",var);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -## cppmacros['CHECKDIMS']="""\ -## #define CHECKDIMS(dims,rank) \\ -## \tfor (int i=0;i<(rank);i++)\\ -## \t\tif (dims[i]<0) {\\ -## \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ -## \t\t\tgoto capi_fail;\\ -## \t\t} -## """ -cppmacros['ARRSIZE']='#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' -cppmacros['OLDPYNUM']="""\ -#ifdef OLDPYNUM -#error You need to intall Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369 -#endif -""" -################# C functions ############### - -cfuncs['calcarrindex']="""\ -static int calcarrindex(int *i,PyArrayObject *arr) { -\tint k,ii = i[0]; -\tfor (k=1; k < arr->nd; k++) -\t\tii += (ii*(arr->dimensions[k] - 1)+i[k]); /* assuming contiguous arr */ -\treturn ii; -}""" -cfuncs['calcarrindextr']="""\ -static int calcarrindextr(int *i,PyArrayObject *arr) { -\tint k,ii = i[arr->nd-1]; -\tfor (k=1; k < arr->nd; k++) -\t\tii += (ii*(arr->dimensions[arr->nd-k-1] - 1)+i[arr->nd-k-1]); /* assuming contiguous arr */ -\treturn ii; -}""" -cfuncs['forcomb']="""\ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { - int k; - if (dims==NULL) return 0; - if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - for (k=1;kdata,str,PyArray_NBYTES(arr)); } -\treturn 1; -capi_fail: -\tPRINTPYOBJERR(obj); -\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\"); -\treturn 0; -} -""" -needs['string_from_pyobj']=['string','STRINGMALLOC','STRINGCOPYN'] -cfuncs['string_from_pyobj']="""\ -static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) { -\tPyArrayObject *arr = NULL; -\tPyObject *tmp = NULL; -#ifdef DEBUGCFUNCS -fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj); -#endif -\tif (obj == Py_None) { -\t\tif (*len == -1) -\t\t\t*len = strlen(inistr); /* Will this cause problems? */ -\t\tSTRINGMALLOC(*str,*len); -\t\tSTRINGCOPYN(*str,inistr,*len+1); -\t\treturn 1; -\t} -\tif (PyArray_Check(obj)) { -\t\tif ((arr = (PyArrayObject *)obj) == NULL) -\t\t\tgoto capi_fail; -\t\tif (!ISCONTIGUOUS(arr)) { -\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\"); -\t\t\tgoto capi_fail; -\t\t} -\t\tif (*len == -1) -\t\t\t*len = (arr->descr->elsize)*PyArray_SIZE(arr); -\t\tSTRINGMALLOC(*str,*len); -\t\tSTRINGCOPYN(*str,arr->data,*len+1); -\t\treturn 1; -\t} -\tif (PyString_Check(obj)) { -\t\ttmp = obj; -\t\tPy_INCREF(tmp); -\t} -\telse -\t\ttmp = PyObject_Str(obj); -\tif (tmp == NULL) goto capi_fail; -\tif (*len == -1) -\t\t*len = PyString_GET_SIZE(tmp); -\tSTRINGMALLOC(*str,*len); -\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); -\tPy_DECREF(tmp); -\treturn 1; -capi_fail: -\tPy_XDECREF(tmp); -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['char_from_pyobj']=['int_from_pyobj'] -cfuncs['char_from_pyobj']="""\ -static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (char)i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['signed_char_from_pyobj']=['int_from_pyobj','signed_char'] -cfuncs['signed_char_from_pyobj']="""\ -static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (signed_char)i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['short_from_pyobj']=['int_from_pyobj'] -cfuncs['short_from_pyobj']="""\ -static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (short)i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -cfuncs['int_from_pyobj']="""\ -static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyInt_Check(obj)) { -\t\t*v = (int)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Int(obj); -\tif (tmp) { -\t\t*v = PyInt_AS_LONG(tmp); -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -cfuncs['long_from_pyobj']="""\ -static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyInt_Check(obj)) { -\t\t*v = PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Int(obj); -\tif (tmp) { -\t\t*v = PyInt_AS_LONG(tmp); -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['long_long_from_pyobj']=['long_long'] -cfuncs['long_long_from_pyobj']="""\ -static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyLong_Check(obj)) { -\t\t*v = PyLong_AsLongLong(obj); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PyInt_Check(obj)) { -\t\t*v = (long_long)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Long(obj); -\tif (tmp) { -\t\t*v = PyLong_AsLongLong(tmp); -\t\tPy_DECREF(tmp); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['long_double_from_pyobj']=['double_from_pyobj','long_double'] -cfuncs['long_double_from_pyobj']="""\ -static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { -\tdouble d=0; -\tif (PyArray_CheckScalar(obj)){ -\t\tif PyArray_IsScalar(obj, LongDouble) { -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t\treturn 1; -\t\t} -\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_LONGDOUBLE) { -\t\t\t(*v) = *((npy_longdouble *)PyArray_DATA(obj)) -\t\t\treturn 1; -\t\t} -\t} -\tif (double_from_pyobj(&d,obj,errmess)) { -\t\t*v = (long_double)d; -\t\treturn 1; -\t} -\treturn 0; -} -""" -cfuncs['double_from_pyobj']="""\ -static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyFloat_Check(obj)) { -#ifdef __sgi -\t\t*v = PyFloat_AsDouble(obj); -#else -\t\t*v = PyFloat_AS_DOUBLE(obj); -#endif -\t\treturn 1; -\t} -\ttmp = PyNumber_Float(obj); -\tif (tmp) { -#ifdef __sgi -\t\t*v = PyFloat_AsDouble(tmp); -#else -\t\t*v = PyFloat_AS_DOUBLE(tmp); -#endif -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['float_from_pyobj']=['double_from_pyobj'] -cfuncs['float_from_pyobj']="""\ -static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { -\tdouble d=0.0; -\tif (double_from_pyobj(&d,obj,errmess)) { -\t\t*v = (float)d; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['complex_long_double_from_pyobj']=['complex_long_double','long_double', - 'complex_double_from_pyobj'] -cfuncs['complex_long_double_from_pyobj']="""\ -static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { -\tcomplex_double cd={0.0,0.0}; -\tif (PyArray_CheckScalar(obj)){ -\t\tif PyArray_IsScalar(obj, CLongDouble) { -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t\treturn 1; -\t\t} -\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_CLONGDOUBLE) { -\t\t\t(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; -\t\t\t(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; -\t\t\treturn 1; -\t\t} -\t} -\tif (complex_double_from_pyobj(&cd,obj,errmess)) { -\t\t(*v).r = (long_double)cd.r; -\t\t(*v).i = (long_double)cd.i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['complex_double_from_pyobj']=['complex_double'] -cfuncs['complex_double_from_pyobj']="""\ -static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) { -\tPy_complex c; -\tif (PyComplex_Check(obj)) { -\t\tc=PyComplex_AsCComplex(obj); -\t\t(*v).r=c.real, (*v).i=c.imag; -\t\treturn 1; -\t} -\tif (PyArray_IsScalar(obj, ComplexFloating)) { -\t\tif (PyArray_IsScalar(obj, CFloat)) { -\t\t\tnpy_cfloat new; -\t\t\tPyArray_ScalarAsCtype(obj, &new); -\t\t\t(*v).r = (double)new.real; -\t\t\t(*v).i = (double)new.imag; -\t\t} -\t\telse if (PyArray_IsScalar(obj, CLongDouble)) { -\t\t\tnpy_clongdouble new; -\t\t\tPyArray_ScalarAsCtype(obj, &new); -\t\t\t(*v).r = (double)new.real; -\t\t\t(*v).i = (double)new.imag; -\t\t} -\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */ -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t} -\t\treturn 1; -\t} -\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ -\t\tPyObject *arr; -\t\tif (PyArray_Check(obj)) { -\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, PyArray_CDOUBLE); -\t\t} -\t\telse { -\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(PyArray_CDOUBLE)); -\t\t} -\t\tif (arr==NULL) return 0; -\t\t(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; -\t\t(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; -\t\treturn 1; -\t} -\t/* Python does not provide PyNumber_Complex function :-( */ -\t(*v).i=0.0; -\tif (PyFloat_Check(obj)) { -#ifdef __sgi -\t\t(*v).r = PyFloat_AsDouble(obj); -#else -\t\t(*v).r = PyFloat_AS_DOUBLE(obj); -#endif -\t\treturn 1; -\t} -\tif (PyInt_Check(obj)) { -\t\t(*v).r = (double)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\tif (PyLong_Check(obj)) { -\t\t(*v).r = PyLong_AsDouble(obj); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PySequence_Check(obj) && (!PyString_Check(obj))) { -\t\tPyObject *tmp = PySequence_GetItem(obj,0); -\t\tif (tmp) { -\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) { -\t\t\t\tPy_DECREF(tmp); -\t\t\t\treturn 1; -\t\t\t} -\t\t\tPy_DECREF(tmp); -\t\t} -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) -\t\t\terr = PyExc_TypeError; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['complex_float_from_pyobj']=['complex_float','complex_double_from_pyobj'] -cfuncs['complex_float_from_pyobj']="""\ -static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { -\tcomplex_double cd={0.0,0.0}; -\tif (complex_double_from_pyobj(&cd,obj,errmess)) { -\t\t(*v).r = (float)cd.r; -\t\t(*v).i = (float)cd.i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['try_pyarr_from_char']=['pyobj_from_char1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_char']='static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n' -needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','unsigned_char'] -cfuncs['try_pyarr_from_unsigned_char']='static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' -needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','signed_char'] -cfuncs['try_pyarr_from_signed_char']='static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' -needs['try_pyarr_from_short']=['pyobj_from_short1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_short']='static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n' -needs['try_pyarr_from_int']=['pyobj_from_int1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_int']='static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n' -needs['try_pyarr_from_long']=['pyobj_from_long1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_long']='static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n' -needs['try_pyarr_from_long_long']=['pyobj_from_long_long1','TRYPYARRAYTEMPLATE','long_long'] -cfuncs['try_pyarr_from_long_long']='static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' -needs['try_pyarr_from_float']=['pyobj_from_float1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_float']='static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n' -needs['try_pyarr_from_double']=['pyobj_from_double1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_double']='static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n' -needs['try_pyarr_from_complex_float']=['pyobj_from_complex_float1','TRYCOMPLEXPYARRAYTEMPLATE','complex_float'] -cfuncs['try_pyarr_from_complex_float']='static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' -needs['try_pyarr_from_complex_double']=['pyobj_from_complex_double1','TRYCOMPLEXPYARRAYTEMPLATE','complex_double'] -cfuncs['try_pyarr_from_complex_double']='static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' - -needs['create_cb_arglist']=['CFUNCSMESS','PRINTPYOBJERR','MINMAX'] -cfuncs['create_cb_arglist']="""\ -static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { -\tPyObject *tmp = NULL; -\tPyObject *tmp_fun = NULL; -\tint tot,opt,ext,siz,i,di=0; -\tCFUNCSMESS(\"create_cb_arglist\\n\"); -\ttot=opt=ext=siz=0; -\t/* Get the total number of arguments */ -\tif (PyFunction_Check(fun)) -\t\ttmp_fun = fun; -\telse { -\t\tdi = 1; -\t\tif (PyObject_HasAttrString(fun,\"im_func\")) { -\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\"); -\t\t} -\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) { -\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\"); -\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\")) -\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); -\t\t\telse { -\t\t\t\ttmp_fun = fun; /* built-in function */ -\t\t\t\ttot = maxnofargs; -\t\t\t\tif (xa != NULL) -\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa); -\t\t\t} -\t\t\tPy_XDECREF(tmp); -\t\t} -\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { -\t\t\ttot = maxnofargs; -\t\t\tif (xa != NULL) -\t\t\t\ttot += PyTuple_Size((PyObject *)xa); -\t\t\ttmp_fun = fun; -\t\t} -\t\telse if (PyCObject_Check(fun)) { -\t\t\ttot = maxnofargs; -\t\t\tif (xa != NULL) -\t\t\t\text = PyTuple_Size((PyObject *)xa); -\t\t\tif(ext>0) { -\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); -\t\t\t\tgoto capi_fail; -\t\t\t} -\t\t\ttmp_fun = fun; -\t\t} -\t} -if (tmp_fun==NULL) { -fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":fun->ob_type->tp_name)); -goto capi_fail; -} -\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) { -\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) -\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di; -\t\tPy_XDECREF(tmp); -\t} -\t/* Get the number of optional arguments */ -\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) -\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) -\t\t\topt = PyTuple_Size(tmp); -\t\tPy_XDECREF(tmp); -\t/* Get the number of extra arguments */ -\tif (xa != NULL) -\t\text = PyTuple_Size((PyObject *)xa); -\t/* Calculate the size of call-backs argument list */ -\tsiz = MIN(maxnofargs+ext,tot); -\t*nofargs = MAX(0,siz-ext); -#ifdef DEBUGCFUNCS -\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); -#endif -\tif (siz0: - if outneeds[n][0] not in needs: - out.append(outneeds[n][0]) - del outneeds[n][0] - else: - flag=0 - for k in outneeds[n][1:]: - if k in needs[outneeds[n][0]]: - flag=1 - break - if flag: - outneeds[n]=outneeds[n][1:]+[outneeds[n][0]] - else: - out.append(outneeds[n][0]) - del outneeds[n][0] - if saveout and (0 not in map(lambda x,y:x==y,saveout,outneeds[n])): - print n,saveout - errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') - out=out+saveout - break - saveout=copy.copy(outneeds[n]) - if out==[]: - out=[n] - res[n]=out - return res diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py deleted file mode 100644 index c4a7b5dab..000000000 --- a/numpy/f2py/common_rules.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python -""" - -Build common block mechanism for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.19 $"[10:-1] - -import __version__ -f2py_version = __version__.version - -import pprint -import sys -import time -import types -import copy -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from auxfuncs import * -import capi_maps -import cfuncs -import func2subr -from crackfortran import rmbadname -############## - -def findcommonblocks(block,top=1): - ret = [] - if hascommon(block): - for n in block['common'].keys(): - vars={} - for v in block['common'][n]: - vars[v]=block['vars'][v] - ret.append((n,block['common'][n],vars)) - elif hasbody(block): - for b in block['body']: - ret=ret+findcommonblocks(b,0) - if top: - tret=[] - names=[] - for t in ret: - if t[0] not in names: - names.append(t[0]) - tret.append(t) - return tret - return ret - -def buildhooks(m): - ret = {'commonhooks':[],'initcommonhooks':[],'docs':['"COMMON blocks:\\n"']} - fwrap = [''] - def fadd(line,s=fwrap): s[0] = '%s\n %s'%(s[0],line) - chooks = [''] - def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0],line) - ihooks = [''] - def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line) - doc = [''] - def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0],line) - for (name,vnames,vars) in findcommonblocks(m): - lower_name = name.lower() - hnames,inames = [],[] - for n in vnames: - if isintent_hide(vars[n]): hnames.append(n) - else: inames.append(n) - if hnames: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n'%(name,','.join(inames),','.join(hnames))) - else: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n'%(name,','.join(inames))) - fadd('subroutine f2pyinit%s(setupfunc)'%name) - fadd('external setupfunc') - for n in vnames: - fadd(func2subr.var2fixfortran(vars,n)) - if name=='_BLNK_': - fadd('common %s'%(','.join(vnames))) - else: - fadd('common /%s/ %s'%(name,','.join(vnames))) - fadd('call setupfunc(%s)'%(','.join(inames))) - fadd('end\n') - cadd('static FortranDataDef f2py_%s_def[] = {'%(name)) - idims=[] - for n in inames: - ct = capi_maps.getctype(vars[n]) - at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n,vars[n]) - if dm['dims']: idims.append('(%s)'%(dm['dims'])) - else: idims.append('') - dms=dm['dims'].strip() - if not dms: dms='-1' - cadd('\t{\"%s\",%s,{{%s}},%s},'%(n,dm['rank'],dms,at)) - cadd('\t{NULL}\n};') - inames1 = rmbadname(inames) - inames1_tps = ','.join(map(lambda s:'char *'+s,inames1)) - cadd('static void f2py_setup_%s(%s) {'%(name,inames1_tps)) - cadd('\tint i_f2py=0;') - for n in inames1: - cadd('\tf2py_%s_def[i_f2py++].data = %s;'%(name,n)) - cadd('}') - if '_' in lower_name: - F_FUNC='F_FUNC_US' - else: - F_FUNC='F_FUNC' - cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'\ - %(F_FUNC,lower_name,name.upper(), - ','.join(['char*']*len(inames1)))) - cadd('static void f2py_init_%s(void) {'%name) - cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ - %(F_FUNC,lower_name,name.upper(),name)) - cadd('}\n') - iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(name,name,name)) - tname = name.replace('_','\\_') - dadd('\\subsection{Common block \\texttt{%s}}\n'%(tname)) - dadd('\\begin{description}') - for n in inames: - dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,vars[n]))) - if hasnote(vars[n]): - note = vars[n]['note'] - if type(note) is type([]): note='\n'.join(note) - dadd('--- %s'%(note)) - dadd('\\end{description}') - ret['docs'].append('"\t/%s/ %s\\n"'%(name,','.join(map(lambda v,d:v+d,inames,idims)))) - ret['commonhooks']=chooks - ret['initcommonhooks']=ihooks - ret['latexdoc']=doc[0] - if len(ret['docs'])<=1: ret['docs']='' - return ret,fwrap[0] diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py deleted file mode 100755 index 0e7dbd44c..000000000 --- a/numpy/f2py/crackfortran.py +++ /dev/null @@ -1,2750 +0,0 @@ -#!/usr/bin/env python -""" -crackfortran --- read fortran (77,90) code and extract declaration information. - Usage is explained in the comment block below. - -Copyright 1999-2004 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/09/27 07:13:49 $ -Pearu Peterson -""" -__version__ = "$Revision: 1.177 $"[10:-1] - -import __version__ -import string -f2py_version = __version__.version - -""" - Usage of crackfortran: - ====================== - Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h - -m ,--ignore-contains - Functions: crackfortran, crack2fortran - The following Fortran statements/constructions are supported - (or will be if needed): - block data,byte,call,character,common,complex,contains,data, - dimension,double complex,double precision,end,external,function, - implicit,integer,intent,interface,intrinsic, - logical,module,optional,parameter,private,public, - program,real,(sequence?),subroutine,type,use,virtual, - include,pythonmodule - Note: 'virtual' is mapped to 'dimension'. - Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). - Note: code after 'contains' will be ignored until its scope ends. - Note: 'common' statement is extended: dimensions are moved to variable definitions - Note: f2py directive: f2py is read as - Note: pythonmodule is introduced to represent Python module - - Usage: - `postlist=crackfortran(files,funcs)` - `postlist` contains declaration information read from the list of files `files`. - `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file - - `postlist` has the following structure: - *** it is a list of dictionaries containing `blocks': - B = {'block','body','vars','parent_block'[,'name','prefix','args','result', - 'implicit','externals','interfaced','common','sortvars', - 'commonvars','note']} - B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | - 'program' | 'block data' | 'type' | 'pythonmodule' - B['body'] --- list containing `subblocks' with the same structure as `blocks' - B['parent_block'] --- dictionary of a parent block: - C['body'][]['parent_block'] is C - B['vars'] --- dictionary of variable definitions - B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) - B['name'] --- name of the block (not if B['block']=='interface') - B['prefix'] --- prefix string (only if B['block']=='function') - B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' - B['result'] --- name of the return value (only if B['block']=='function') - B['implicit'] --- dictionary {'a':,'b':...} | None - B['externals'] --- list of variables being external - B['interfaced'] --- list of variables being external and defined - B['common'] --- dictionary of common blocks (list of objects) - B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) - B['from'] --- string showing the 'parents' of the current block - B['use'] --- dictionary of modules used in current block: - {:{['only':<0|1>],['map':{:,...}]}} - B['note'] --- list of LaTeX comments on the block - B['f2pyenhancements'] --- optional dictionary - {'threadsafe':'','fortranname':, - 'callstatement':|, - 'callprotoargument':, - 'usercode':|, - 'pymethoddef:' - } - B['entry'] --- dictionary {entryname:argslist,..} - B['varnames'] --- list of variable names given in the order of reading the - Fortran code, useful for derived types. - *** Variable definition is a dictionary - D = B['vars'][] = - {'typespec'[,'attrspec','kindselector','charselector','=','typename']} - D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | - 'double precision' | 'integer' | 'logical' | 'real' | 'type' - D['attrspec'] --- list of attributes (e.g. 'dimension()', - 'external','intent(in|out|inout|hide|c|callback|cache)', - 'optional','required', etc) - K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = - 'complex' | 'integer' | 'logical' | 'real' ) - C = D['charselector'] = {['*','len','kind']} - (only if D['typespec']=='character') - D['='] --- initialization expression string - D['typename'] --- name of the type if D['typespec']=='type' - D['dimension'] --- list of dimension bounds - D['intent'] --- list of intent specifications - D['depend'] --- list of variable names on which current variable depends on - D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised - D['note'] --- list of LaTeX comments on the variable - *** Meaning of kind/char selectors (few examples): - D['typespec>']*K['*'] - D['typespec'](kind=K['kind']) - character*C['*'] - character(len=C['len'],kind=C['kind']) - (see also fortran type declaration statement formats below) - - Fortran 90 type declaration statement format (F77 is subset of F90) -==================================================================== - (Main source: IBM XL Fortran 5.1 Language Reference Manual) - type declaration = [[]::] - = byte | - character[] | - complex[] | - double complex | - double precision | - integer[] | - logical[] | - real[] | - type() - = * | - ([len=][,[kind=]]) | - (kind=[,len=]) - = * | - ([kind=]) - = comma separated list of attributes. - Only the following attributes are used in - building up the interface: - external - (parameter --- affects '=' key) - optional - intent - Other attributes are ignored. - = in | out | inout - = comma separated list of dimension bounds. - = [[*][()] | [()]*] - [// | =] [,] - - In addition, the following attributes are used: check,depend,note - - TODO: - * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' - -> 'real x(2)') - The above may be solved by creating appropriate preprocessor program, for example. -""" -# -import sys -import string -import fileinput -import re -import pprint -import os -import copy -from auxfuncs import * - -# Global flags: -strictf77=1 # Ignore `!' comments unless line[0]=='!' -sourcecodeform='fix' # 'fix','free' -quiet=0 # Be verbose if 0 (Obsolete: not used any more) -verbose=1 # Be quiet if 0, extra verbose if > 1. -tabchar=4*' ' -pyffilename='' -f77modulename='' -skipemptyends=0 # for old F77 programs without 'program' statement -ignorecontains=1 -dolowercase=1 -debug=[] -## do_analyze = 1 - -###### global variables - -## use reload(crackfortran) to reset these variables - -groupcounter=0 -grouplist={groupcounter:[]} -neededmodule=-1 -expectbegin=1 -skipblocksuntil=-1 -usermodules=[] -f90modulevars={} -gotnextfile=1 -filepositiontext='' -currentfilename='' -skipfunctions=[] -skipfuncs=[] -onlyfuncs=[] -include_paths=[] -previous_context = None - -###### Some helper functions -def show(o,f=0):pprint.pprint(o) -errmess=sys.stderr.write -def outmess(line,flag=1): - global filepositiontext - if not verbose: return - if not quiet: - if flag:sys.stdout.write(filepositiontext) - sys.stdout.write(line) -re._MAXCACHE=50 -defaultimplicitrules={} -for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c]={'typespec':'real'} -for c in "ijklmn": defaultimplicitrules[c]={'typespec':'integer'} -del c -badnames={} -invbadnames={} -for n in ['int','double','float','char','short','long','void','case','while', - 'return','signed','unsigned','if','for','typedef','sizeof','union', - 'struct','static','register','new','break','do','goto','switch', - 'continue','else','inline','extern','delete','const','auto', - 'len','rank','shape','index','slen','size','_i', - 'flen','fshape', - 'string','complex_double','float_double','stdin','stderr','stdout', - 'type','default']: - badnames[n]=n+'_bn' - invbadnames[n+'_bn']=n -def rmbadname1(name): - if name in badnames: - errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name,badnames[name])) - return badnames[name] - return name -def rmbadname(names): return map(rmbadname1,names) - -def undo_rmbadname1(name): - if name in invbadnames: - errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'\ - %(name,invbadnames[name])) - return invbadnames[name] - return name -def undo_rmbadname(names): return map(undo_rmbadname1,names) - -def getextension(name): - i=name.rfind('.') - if i==-1: return '' - if '\\' in name[i:]: return '' - if '/' in name[i:]: return '' - return name[i+1:] - -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search -_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]',re.I).match -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - f = open(file,'r') - line = f.readline() - n = 15 # the number of non-comment lines to scan for hints - if _has_f_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - if line[0]!='!' and line.strip(): - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-2:-1]=='&': - result = 1 - break - line = f.readline() - f.close() - return result - - -####### Read fortran (77,90) code -def readfortrancode(ffile,dowithline=show,istop=1): - """ - Read fortran codes from files and - 1) Get rid of comments, line continuations, and empty lines; lower cases. - 2) Call dowithline(line) on every line. - 3) Recursively call itself when statement \"include ''\" is met. - """ - global gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\ - beginpattern,quiet,verbose,dolowercase,include_paths - if not istop: - saveglobals=gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\ - beginpattern,quiet,verbose,dolowercase - if ffile==[]: return - localdolowercase = dolowercase - cont=0 - finalline='' - ll='' - commentline=re.compile(r'(?P([^"]*"[^"]*"[^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!]*))!{1}(?P.*)') - includeline=re.compile(r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")',re.I) - cont1=re.compile(r'(?P.*)&\s*\Z') - cont2=re.compile(r'(\s*&|)(?P.*)') - mline_mark = re.compile(r".*?'''") - if istop: dowithline('',-1) - ll,l1='','' - spacedigits=[' ']+map(str,range(10)) - filepositiontext='' - fin=fileinput.FileInput(ffile) - while 1: - l=fin.readline() - if not l: break - if fin.isfirstline(): - filepositiontext='' - currentfilename=fin.filename() - gotnextfile=1 - l1=l - strictf77=0 - sourcecodeform='fix' - ext = os.path.splitext(currentfilename)[1] - if is_f_file(currentfilename) and \ - not (_has_f90_header(l) or _has_fix_header(l)): - strictf77=1 - elif is_free_format(currentfilename) and not _has_fix_header(l): - sourcecodeform='free' - if strictf77: beginpattern=beginpattern77 - else: beginpattern=beginpattern90 - outmess('\tReading file %s (format:%s%s)\n'\ - %(`currentfilename`,sourcecodeform, - strictf77 and ',strict' or '')) - - l=l.expandtabs().replace('\xa0',' ') - while not l=='': # Get rid of newline characters - if l[-1] not in "\n\r\f": break - l=l[:-1] - if not strictf77: - r=commentline.match(l) - if r: - l=r.group('line')+' ' # Strip comments starting with `!' - rl=r.group('rest') - if rl[:4].lower()=='f2py': # f2py directive - l = l + 4*' ' - r=commentline.match(rl[4:]) - if r: l=l+r('line') - else: l = l + rl[4:] - if l.strip()=='': # Skip empty line - cont=0 - continue - if sourcecodeform=='fix': - if l[0] in ['*','c','!','C','#']: - if l[1:5].lower()=='f2py': # f2py directive - l=' '+l[5:] - else: # Skip comment line - cont=0 - continue - elif strictf77: - if len(l)>72: l=l[:72] - if not (l[0] in spacedigits): - raise 'readfortrancode: Found non-(space,digit) char in the first column.\n\tAre you sure that this code is in fix form?\n\tline=%s'%`l` - - if (not cont or strictf77) and (len(l)>5 and not l[5]==' '): - # Continuation of a previous line - ll=ll+l[6:] - finalline='' - origfinalline='' - else: - if not strictf77: - # F90 continuation - r=cont1.match(l) - if r: l=r.group('line') # Continuation follows .. - if cont: - ll=ll+cont2.match(l).group('line') - finalline='' - origfinalline='' - else: - l=' '+l[5:] # clean up line beginning from possible digits. - if localdolowercase: finalline=ll.lower() - else: finalline=ll - origfinalline=ll - ll=l - cont=(r is not None) - else: - l=' '+l[5:] # clean up line beginning from possible digits. - if localdolowercase: finalline=ll.lower() - else: finalline=ll - origfinalline =ll - ll=l - - elif sourcecodeform=='free': - if not cont and ext=='.pyf' and mline_mark.match(l): - l = l + '\n' - while 1: - lc = fin.readline() - if not lc: - errmess('Unexpected end of file when reading multiline\n') - break - l = l + lc - if mline_mark.match(lc): - break - l = l.rstrip() - r=cont1.match(l) - if r: l=r.group('line') # Continuation follows .. - if cont: - ll=ll+cont2.match(l).group('line') - finalline='' - origfinalline='' - else: - if localdolowercase: finalline=ll.lower() - else: finalline=ll - origfinalline =ll - ll=l - cont=(r is not None) - else: - raise ValueError,"Flag sourcecodeform must be either 'fix' or 'free': %s"%`sourcecodeform` - filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1) - m=includeline.match(origfinalline) - if m: - fn=m.group('name') - if os.path.isfile(fn): - readfortrancode(fn,dowithline=dowithline,istop=0) - else: - include_dirs = [os.path.dirname(currentfilename)] + include_paths - foundfile = 0 - for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir,fn) - if os.path.isfile(fn1): - foundfile = 1 - readfortrancode(fn1,dowithline=dowithline,istop=0) - break - if not foundfile: - outmess('readfortrancode: could not find include file %s. Ignoring.\n'%(`fn`)) - else: - dowithline(finalline) - l1=ll - if localdolowercase: - finalline=ll.lower() - else: finalline=ll - origfinalline = ll - filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1) - m=includeline.match(origfinalline) - if m: - fn=m.group('name') - fn1=os.path.join(os.path.dirname(currentfilename),fn) - if os.path.isfile(fn): - readfortrancode(fn,dowithline=dowithline,istop=0) - elif os.path.isfile(fn1): - readfortrancode(fn1,dowithline=dowithline,istop=0) - else: - outmess('readfortrancode: could not find include file %s. Ignoring.\n'%(`fn`)) - else: - dowithline(finalline) - filepositiontext='' - fin.close() - if istop: dowithline('',1) - else: - gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\ - beginpattern,quiet,verbose,dolowercase=saveglobals - -########### Crack line -beforethisafter=r'\s*(?P%s(?=\s*(\b(%s)\b)))'+ \ - r'\s*(?P(\b(%s)\b))'+ \ - r'\s*(?P%s)\s*\Z' -## -fortrantypes='character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' -typespattern=re.compile(beforethisafter%('',fortrantypes,fortrantypes,'.*'),re.I),'type' -typespattern4implicit=re.compile(beforethisafter%('',fortrantypes+'|static|automatic|undefined',fortrantypes+'|static|automatic|undefined','.*'),re.I) -# -functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)','function','function','.*'),re.I),'begin' -subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?','subroutine','subroutine','.*'),re.I),'begin' -#modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' -# -groupbegins77=r'program|block\s*data' -beginpattern77=re.compile(beforethisafter%('',groupbegins77,groupbegins77,'.*'),re.I),'begin' -groupbegins90=groupbegins77+r'|module|python\s*module|interface|type(?!\s*\()' -beginpattern90=re.compile(beforethisafter%('',groupbegins90,groupbegins90,'.*'),re.I),'begin' -groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface' -endpattern=re.compile(beforethisafter%('',groupends,groupends,'[\w\s]*'),re.I),'end' -#endifs='end\s*(if|do|where|select|while|forall)' -endifs='(end\s*(if|do|where|select|while|forall))|(module\s*procedure)' -endifpattern=re.compile(beforethisafter%('[\w]*?',endifs,endifs,'[\w\s]*'),re.I),'endif' -# -implicitpattern=re.compile(beforethisafter%('','implicit','implicit','.*'),re.I),'implicit' -dimensionpattern=re.compile(beforethisafter%('','dimension|virtual','dimension|virtual','.*'),re.I),'dimension' -externalpattern=re.compile(beforethisafter%('','external','external','.*'),re.I),'external' -optionalpattern=re.compile(beforethisafter%('','optional','optional','.*'),re.I),'optional' -requiredpattern=re.compile(beforethisafter%('','required','required','.*'),re.I),'required' -publicpattern=re.compile(beforethisafter%('','public','public','.*'),re.I),'public' -privatepattern=re.compile(beforethisafter%('','private','private','.*'),re.I),'private' -intrisicpattern=re.compile(beforethisafter%('','intrisic','intrisic','.*'),re.I),'intrisic' -intentpattern=re.compile(beforethisafter%('','intent|depend|note|check','intent|depend|note|check','\s*\(.*?\).*'),re.I),'intent' -parameterpattern=re.compile(beforethisafter%('','parameter','parameter','\s*\(.*'),re.I),'parameter' -datapattern=re.compile(beforethisafter%('','data','data','.*'),re.I),'data' -callpattern=re.compile(beforethisafter%('','call','call','.*'),re.I),'call' -entrypattern=re.compile(beforethisafter%('','entry','entry','.*'),re.I),'entry' -callfunpattern=re.compile(beforethisafter%('','callfun','callfun','.*'),re.I),'callfun' -commonpattern=re.compile(beforethisafter%('','common','common','.*'),re.I),'common' -usepattern=re.compile(beforethisafter%('','use','use','.*'),re.I),'use' -containspattern=re.compile(beforethisafter%('','contains','contains',''),re.I),'contains' -formatpattern=re.compile(beforethisafter%('','format','format','.*'),re.I),'format' -## Non-fortran and f2py-specific statements -f2pyenhancementspattern=re.compile(beforethisafter%('','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','.*'),re.I|re.S),'f2pyenhancements' -multilinepattern = re.compile(r"\s*(?P''')(?P.*?)(?P''')\s*\Z",re.S),'multiline' -## - -def _simplifyargs(argsline): - a = [] - for n in markoutercomma(argsline).split('@,@'): - for r in '(),': - n = n.replace(r,'_') - a.append(n) - return ','.join(a) - -crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+[\w]*\b)\s*[=].*',re.I) -def crackline(line,reset=0): - """ - reset=-1 --- initialize - reset=0 --- crack the line - reset=1 --- final check if mismatch of blocks occured - - Cracked data is saved in grouplist[0]. - """ - global beginpattern,groupcounter,groupname,groupcache,grouplist,gotnextfile,\ - filepositiontext,currentfilename,neededmodule,expectbegin,skipblocksuntil,\ - skipemptyends,previous_context - if ';' in line and not (f2pyenhancementspattern[0].match(line) or - multilinepattern[0].match(line)): - for l in line.split(';'): - assert reset==0,`reset` # XXX: non-zero reset values need testing - crackline(l,reset) - return - if reset<0: - groupcounter=0 - groupname={groupcounter:''} - groupcache={groupcounter:{}} - grouplist={groupcounter:[]} - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['vars']={} - groupcache[groupcounter]['block']='' - groupcache[groupcounter]['name']='' - neededmodule=-1 - skipblocksuntil=-1 - return - if reset>0: - fl=0 - if f77modulename and neededmodule==groupcounter: fl=2 - while groupcounter>fl: - outmess('crackline: groupcounter=%s groupname=%s\n'%(`groupcounter`,`groupname`)) - outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 - if f77modulename and neededmodule==groupcounter: - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end interface - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end module - neededmodule=-1 - return - if line=='': return - flag=0 - for pat in [dimensionpattern,externalpattern,intentpattern,optionalpattern, - requiredpattern, - parameterpattern,datapattern,publicpattern,privatepattern, - intrisicpattern, - endifpattern,endpattern, - formatpattern, - beginpattern,functionpattern,subroutinepattern, - implicitpattern,typespattern,commonpattern, - callpattern,usepattern,containspattern, - entrypattern, - f2pyenhancementspattern, - multilinepattern - ]: - m = pat[0].match(line) - if m: - break - flag=flag+1 - if not m: - re_1 = crackline_re_1 - if 0<=skipblocksuntil<=groupcounter:return - if 'externals' in groupcache[groupcounter]: - for name in groupcache[groupcounter]['externals']: - if name in invbadnames: - name=invbadnames[name] - if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: - continue - m1=re.match(r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z'%name,markouterparen(line),re.I) - if m1: - m2 = re_1.match(m1.group('before')) - a = _simplifyargs(m1.group('args')) - if m2: - line='callfun %s(%s) result (%s)'%(name,a,m2.group('result')) - else: line='callfun %s(%s)'%(name,a) - m = callfunpattern[0].match(line) - if not m: - outmess('crackline: could not resolve function call for line=%s.\n'%`line`) - return - analyzeline(m,'callfun',line) - return - if verbose>1: - previous_context = None - outmess('crackline:%d: No pattern for line\n'%(groupcounter)) - return - elif pat[1]=='end': - if 0<=skipblocksuntil(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P.*)\Z',re.I) -nameargspattern=re.compile(r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*(result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))*\s*\Z',re.I) -callnameargspattern=re.compile(r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z',re.I) -real16pattern = re.compile(r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') -real8pattern = re.compile(r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') - -_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b',re.I) -def _is_intent_callback(vdecl): - for a in vdecl.get('attrspec',[]): - if _intentcallbackpattern.match(a): - return 1 - return 0 - -def _resolvenameargspattern(line): - line = markouterparen(line) - m1=nameargspattern.match(line) - if m1: return m1.group('name'),m1.group('args'),m1.group('result') - m1=callnameargspattern.match(line) - if m1: return m1.group('name'),m1.group('args'),None - return None,[],None - -def analyzeline(m,case,line): - global groupcounter,groupname,groupcache,grouplist,filepositiontext,\ - currentfilename,f77modulename,neededinterface,neededmodule,expectbegin,\ - gotnextfile,previous_context - block=m.group('this') - if case != 'multiline': - previous_context = None - if expectbegin and case not in ['begin','call','callfun','type'] \ - and not skipemptyends and groupcounter<1: - newname=os.path.basename(currentfilename).split('.')[0] - outmess('analyzeline: no group yet. Creating program group with name "%s".\n'%newname) - gotnextfile=0 - groupcounter=groupcounter+1 - groupname[groupcounter]='program' - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['vars']={} - groupcache[groupcounter]['block']='program' - groupcache[groupcounter]['name']=newname - groupcache[groupcounter]['from']='fromsky' - expectbegin=0 - if case in ['begin','call','callfun']: - # Crack line => block,name,args,result - block = block.lower() - if re.match(r'block\s*data',block,re.I): block='block data' - if re.match(r'python\s*module',block,re.I): block='python module' - name,args,result = _resolvenameargspattern(m.group('after')) - if name is None: - if block=='block data': - name = '_BLOCK_DATA_' - else: - name = '' - if block not in ['interface','block data']: - outmess('analyzeline: No name/args pattern found for line.\n') - - previous_context = (block,name,groupcounter) - if args: args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) - else: args=[] - if '' in args: - while '' in args: - args.remove('') - outmess('analyzeline: argument list is malformed (missing argument).\n') - - # end of crack line => block,name,args,result - needmodule=0 - needinterface=0 - - if case in ['call','callfun']: - needinterface=1 - if 'args' not in groupcache[groupcounter]: - return - if name not in groupcache[groupcounter]['args']: - return - for it in grouplist[groupcounter]: - if it['name']==name: - return - if name in groupcache[groupcounter]['interfaced']: - return - block={'call':'subroutine','callfun':'function'}[case] - if f77modulename and neededmodule==-1 and groupcounter<=1: - neededmodule=groupcounter+2 - needmodule=1 - needinterface=1 - # Create new block(s) - groupcounter=groupcounter+1 - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - if needmodule: - if verbose>1: - outmess('analyzeline: Creating module block %s\n'%`f77modulename`,0) - groupname[groupcounter]='module' - groupcache[groupcounter]['block']='python module' - groupcache[groupcounter]['name']=f77modulename - groupcache[groupcounter]['from']='' - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['interfaced']=[] - groupcache[groupcounter]['vars']={} - groupcounter=groupcounter+1 - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - if needinterface: - if verbose>1: - outmess('analyzeline: Creating additional interface block.\n',0) - groupname[groupcounter]='interface' - groupcache[groupcounter]['block']='interface' - groupcache[groupcounter]['name']='unknown_interface' - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name']) - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['interfaced']=[] - groupcache[groupcounter]['vars']={} - groupcounter=groupcounter+1 - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - groupname[groupcounter]=block - groupcache[groupcounter]['block']=block - if not name: name='unknown_'+block - groupcache[groupcounter]['prefix']=m.group('before') - groupcache[groupcounter]['name']=rmbadname1(name) - groupcache[groupcounter]['result']=result - if groupcounter==1: - groupcache[groupcounter]['from']=currentfilename - else: - if f77modulename and groupcounter==3: - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],currentfilename) - else: - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name']) - for k in groupcache[groupcounter].keys(): - if not groupcache[groupcounter][k]: del groupcache[groupcounter][k] - groupcache[groupcounter]['args']=args - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['interfaced']=[] - groupcache[groupcounter]['vars']={} - groupcache[groupcounter]['entry']={} - # end of creation - if block=='type': - groupcache[groupcounter]['varnames'] = [] - - if case in ['call','callfun']: # set parents variables - if name not in groupcache[groupcounter-2]['externals']: - groupcache[groupcounter-2]['externals'].append(name) - groupcache[groupcounter]['vars']=copy.deepcopy(groupcache[groupcounter-2]['vars']) - #try: del groupcache[groupcounter]['vars'][groupcache[groupcounter-2]['name']] - #except: pass - try: del groupcache[groupcounter]['vars'][name][groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] - except: pass - if block in ['function','subroutine']: # set global attributes - try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter-2]['vars']['']) - except: pass - if case=='callfun': # return type - if result and result in groupcache[groupcounter]['vars']: - if not name==result: - groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter]['vars'][result]) - #if groupcounter>1: # name is interfaced - try: groupcache[groupcounter-2]['interfaced'].append(name) - except: pass - if block=='function': - t=typespattern[0].match(m.group('before')+' '+name) - if t: - typespec,selector,attr,edecl=cracktypespec0(t.group('this'),t.group('after')) - updatevars(typespec,selector,attr,edecl) - if case in ['call','callfun']: - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end routine - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end interface - elif case=='entry': - name,args,result=_resolvenameargspattern(m.group('after')) - if name is not None: - if args: - args=rmbadname([x.strip() for x in markoutercomma(args).strip('@,@')]) - else: args=[] - assert result is None,`result` - groupcache[groupcounter]['entry'][name] = args - previous_context = ('entry',name,groupcounter) - elif case=='type': - typespec,selector,attr,edecl=cracktypespec0(block,m.group('after')) - last_name = updatevars(typespec,selector,attr,edecl) - if last_name is not None: - previous_context = ('variable',last_name,groupcounter) - elif case in ['dimension','intent','optional','required','external','public','private','intrisic']: - edecl=groupcache[groupcounter]['vars'] - ll=m.group('after').strip() - i=ll.find('::') - if i<0 and case=='intent': - i=markouterparen(ll).find('@)@')-2 - ll=ll[:i+1]+'::'+ll[i+1:] - i=ll.find('::') - if ll[i:]=='::' and 'args' in groupcache[groupcounter]: - outmess('All arguments will have attribute %s%s\n'%(m.group('this'),ll[:i])) - ll = ll + ','.join(groupcache[groupcounter]['args']) - if i<0:i=0;pl='' - else: pl=ll[:i].strip();ll=ll[i+2:] - ch = markoutercomma(pl).split('@,@') - if len(ch)>1: - pl = ch[0] - outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (','.join(ch[1:]))) - last_name = None - for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: - m1=namepattern.match(e) - if not m1: - if case in ['public','private']: k='' - else: - print m.groupdict() - outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case,`e`)) - continue - else: - k=rmbadname1(m1.group('name')) - if k not in edecl: - edecl[k]={} - if case=='dimension': - ap=case+m1.group('after') - if case=='intent': - ap=m.group('this')+pl - if _intentcallbackpattern.match(ap): - if k not in groupcache[groupcounter]['args']: - if groupcounter>1 and \ - '__user__' in groupcache[groupcounter-2]['name']: - outmess('analyzeline: appending intent(callback) %s'\ - ' to %s arguments\n' % (k,groupcache[groupcounter]['name'])) - groupcache[groupcounter]['args'].append(k) - else: - errmess('analyzeline: intent(callback) %s is already'\ - ' in argument list' % (k)) - if case in ['optional','required','public','external','private','intrisic']: - ap=case - if 'attrspec' in edecl[k]: - edecl[k]['attrspec'].append(ap) - else: - edecl[k]['attrspec']=[ap] - if case=='external': - if groupcache[groupcounter]['block']=='program': - outmess('analyzeline: ignoring program arguments\n') - continue - if k not in groupcache[groupcounter]['args']: - #outmess('analyzeline: ignoring external %s (not in arguments list)\n'%(`k`)) - continue - if 'externals' not in groupcache[groupcounter]: - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['externals'].append(k) - last_name = k - groupcache[groupcounter]['vars']=edecl - if last_name is not None: - previous_context = ('variable',last_name,groupcounter) - elif case=='parameter': - edecl=groupcache[groupcounter]['vars'] - ll=m.group('after').strip()[1:-1] - last_name = None - for e in markoutercomma(ll).split('@,@'): - try: - k,initexpr=[x.strip() for x in e.split('=')] - except: - outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e,ll));continue - params = get_parameters(edecl) - k=rmbadname1(k) - if k not in edecl: - edecl[k]={} - if '=' in edecl[k] and (not edecl[k]['=']==initexpr): - outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k,edecl[k]['='],initexpr)) - t = determineexprtype(initexpr,params) - if t: - if t.get('typespec')=='real': - tt = list(initexpr) - for m in real16pattern.finditer(initexpr): - tt[m.start():m.end()] = list(\ - initexpr[m.start():m.end()].lower().replace('d', 'e')) - initexpr = ''.join(tt) - elif t.get('typespec')=='complex': - initexpr = initexpr[1:].lower().replace('d','e').\ - replace(',','+1j*(') - try: - v = eval(initexpr,{},params) - except (SyntaxError,NameError),msg: - errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\ - % (initexpr, msg)) - continue - edecl[k]['='] = repr(v) - if 'attrspec' in edecl[k]: - edecl[k]['attrspec'].append('parameter') - else: edecl[k]['attrspec']=['parameter'] - last_name = k - groupcache[groupcounter]['vars']=edecl - if last_name is not None: - previous_context = ('variable',last_name,groupcounter) - elif case=='implicit': - if m.group('after').strip().lower()=='none': - groupcache[groupcounter]['implicit']=None - elif m.group('after'): - if 'implicit' in groupcache[groupcounter]: - impl=groupcache[groupcounter]['implicit'] - else: impl={} - if impl is None: - outmess('analyzeline: Overwriting earlier "implicit none" statement.\n') - impl={} - for e in markoutercomma(m.group('after')).split('@,@'): - decl={} - m1=re.match(r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z',e,re.I) - if not m1: - outmess('analyzeline: could not extract info of implicit statement part "%s"\n'%(e));continue - m2=typespattern4implicit.match(m1.group('this')) - if not m2: - outmess('analyzeline: could not extract types pattern of implicit statement part "%s"\n'%(e));continue - typespec,selector,attr,edecl=cracktypespec0(m2.group('this'),m2.group('after')) - kindselect,charselect,typename=cracktypespec(typespec,selector) - decl['typespec']=typespec - decl['kindselector']=kindselect - decl['charselector']=charselect - decl['typename']=typename - for k in decl.keys(): - if not decl[k]: del decl[k] - for r in markoutercomma(m1.group('after')).split('@,@'): - if '-' in r: - try: begc,endc=[x.strip() for x in r.split('-')] - except: - outmess('analyzeline: expected "-" instead of "%s" in range list of implicit statement\n'%r);continue - else: begc=endc=r.strip() - if not len(begc)==len(endc)==1: - outmess('analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n'%r);continue - for o in range(ord(begc),ord(endc)+1): - impl[chr(o)]=decl - groupcache[groupcounter]['implicit']=impl - elif case=='data': - ll=[] - dl='';il='';f=0;fc=1;inp=0 - for c in m.group('after'): - if not inp: - if c=="'": fc=not fc - if c=='/' and fc: f=f+1;continue - if c=='(': inp = inp + 1 - elif c==')': inp = inp - 1 - if f==0: dl=dl+c - elif f==1: il=il+c - elif f==2: - dl = dl.strip() - if dl.startswith(','): - dl = dl[1:].strip() - ll.append([dl,il]) - dl=c;il='';f=0 - if f==2: - dl = dl.strip() - if dl.startswith(','): - dl = dl[1:].strip() - ll.append([dl,il]) - vars={} - if 'vars' in groupcache[groupcounter]: - vars=groupcache[groupcounter]['vars'] - last_name = None - for l in ll: - l=[x.strip() for x in l] - if l[0][0]==',':l[0]=l[0][1:] - if l[0][0]=='(': - outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%l[0]) - continue - #if '(' in l[0]: - # #outmess('analyzeline: ignoring this data statement.\n') - # continue - i=0;j=0;llen=len(l[1]) - for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): - if v[0]=='(': - outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%v) - # XXX: subsequent init expressions may get wrong values. - # Ignoring since data statements are irrelevant for wrapping. - continue - fc=0 - while (i=3: - bn = bn.strip() - if not bn: bn='_BLNK_' - cl.append([bn,ol]) - f=f-2;bn='';ol='' - if f%2: bn=bn+c - else: ol=ol+c - bn = bn.strip() - if not bn: bn='_BLNK_' - cl.append([bn,ol]) - commonkey={} - if 'common' in groupcache[groupcounter]: - commonkey=groupcache[groupcounter]['common'] - for c in cl: - if c[0] in commonkey: - outmess('analyzeline: previously defined common block encountered. Skipping.\n') - continue - commonkey[c[0]]=[] - for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: - if i: commonkey[c[0]].append(i) - groupcache[groupcounter]['common']=commonkey - previous_context = ('common',bn,groupcounter) - elif case=='use': - m1=re.match(r'\A\s*(?P\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z',m.group('after'),re.I) - if m1: - mm=m1.groupdict() - if 'use' not in groupcache[groupcounter]: - groupcache[groupcounter]['use']={} - name=m1.group('name') - groupcache[groupcounter]['use'][name]={} - isonly=0 - if 'list' in mm and mm['list'] is not None: - if 'notonly' in mm and mm['notonly'] is None: - isonly=1 - groupcache[groupcounter]['use'][name]['only']=isonly - ll=[x.strip() for x in mm['list'].split(',')] - rl={} - for l in ll: - if '=' in l: - m2=re.match(r'\A\s*(?P\b[\w]+\b)\s*=\s*>\s*(?P\b[\w]+\b)\s*\Z',l,re.I) - if m2: rl[m2.group('local').strip()]=m2.group('use').strip() - else: - outmess('analyzeline: Not local=>use pattern found in %s\n'%`l`) - else: - rl[l]=l - groupcache[groupcounter]['use'][name]['map']=rl - else: - pass - - else: - print m.groupdict() - outmess('analyzeline: Could not crack the use statement.\n') - elif case in ['f2pyenhancements']: - if 'f2pyenhancements' not in groupcache[groupcounter]: - groupcache[groupcounter]['f2pyenhancements'] = {} - d = groupcache[groupcounter]['f2pyenhancements'] - if m.group('this')=='usercode' and 'usercode' in d: - if type(d['usercode']) is type(''): - d['usercode'] = [d['usercode']] - d['usercode'].append(m.group('after')) - else: - d[m.group('this')] = m.group('after') - elif case=='multiline': - if previous_context is None: - if verbose: - outmess('analyzeline: No context for multiline block.\n') - return - gc = groupcounter - #gc = previous_context[2] - appendmultiline(groupcache[gc], - previous_context[:2], - m.group('this')) - else: - if verbose>1: - print m.groupdict() - outmess('analyzeline: No code implemented for line.\n') - -def appendmultiline(group, context_name,ml): - if 'f2pymultilines' not in group: - group['f2pymultilines'] = {} - d = group['f2pymultilines'] - if context_name not in d: - d[context_name] = [] - d[context_name].append(ml) - return - -def cracktypespec0(typespec,ll): - selector=None - attr=None - if re.match(r'double\s*complex',typespec,re.I): typespec='double complex' - elif re.match(r'double\s*precision',typespec,re.I): typespec='double precision' - else: typespec=typespec.strip().lower() - m1=selectpattern.match(markouterparen(ll)) - if not m1: - outmess('cracktypespec0: no kind/char_selector pattern found for line.\n') - return - d=m1.groupdict() - for k in d.keys(): d[k]=unmarkouterparen(d[k]) - if typespec in ['complex','integer','logical','real','character','type']: - selector=d['this'] - ll=d['after'] - i=ll.find('::') - if i>=0: - attr=ll[:i].strip() - ll=ll[i+2:] - return typespec,selector,attr,ll -##### -namepattern=re.compile(r'\s*(?P\b[\w]+\b)\s*(?P.*)\s*\Z',re.I) -kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|[*]\s*(?P.*?))\s*\Z',re.I) -charselector=re.compile(r'\s*(\((?P.*)\)|[*]\s*(?P.*))\s*\Z',re.I) -lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)|))\s*\Z',re.I) -lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*[*]\s*(?P.*?)|([*]\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z',re.I) -def removespaces(expr): - expr=expr.strip() - if len(expr)<=1: return expr - expr2=expr[0] - for i in range(1,len(expr)-1): - if expr[i]==' ' and \ - ((expr[i+1] in "()[]{}= ") or (expr[i-1] in "()[]{}= ")): continue - expr2=expr2+expr[i] - expr2=expr2+expr[-1] - return expr2 -def markinnerspaces(line): - l='';f=0 - cc='\'' - cc1='"' - cb='' - for c in line: - if cb=='\\' and c in ['\\','\'','"']: - l=l+c; - cb=c - continue - if f==0 and c in ['\'','"']: cc=c; cc1={'\'':'"','"':'\''}[c] - if c==cc:f=f+1 - elif c==cc:f=f-1 - elif c==' ' and f==1: l=l+'@_@'; continue - l=l+c;cb=c - return l -def updatevars(typespec,selector,attrspec,entitydecl): - global groupcache,groupcounter - last_name = None - kindselect,charselect,typename=cracktypespec(typespec,selector) - if attrspec: - attrspec=[x.strip() for x in markoutercomma(attrspec).split('@,@')] - l = [] - c = re.compile(r'(?P[a-zA-Z]+)') - for a in attrspec: - m = c.match(a) - if m: - s = m.group('start').lower() - a = s + a[len(s):] - l.append(a) - attrspec = l - el=[x.strip() for x in markoutercomma(entitydecl).split('@,@')] - el1=[] - for e in el: - for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)),comma=' ').split('@ @')]: - if e1: el1.append(e1.replace('@_@',' ')) - for e in el1: - m=namepattern.match(e) - if not m: - outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(`e`)) - continue - ename=rmbadname1(m.group('name')) - edecl={} - if ename in groupcache[groupcounter]['vars']: - edecl=groupcache[groupcounter]['vars'][ename].copy() - not_has_typespec = 'typespec' not in edecl - if not_has_typespec: - edecl['typespec']=typespec - elif typespec and (not typespec==edecl['typespec']): - outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typespec'],typespec)) - if 'kindselector' not in edecl: - edecl['kindselector']=copy.copy(kindselect) - elif kindselect: - for k in kindselect.keys(): - if k in edecl['kindselector'] and (not kindselect[k]==edecl['kindselector'][k]): - outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['kindselector'][k],kindselect[k])) - else: edecl['kindselector'][k]=copy.copy(kindselect[k]) - if 'charselector' not in edecl and charselect: - if not_has_typespec: - edecl['charselector']=charselect - else: - errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \ - %(ename,charselect)) - elif charselect: - for k in charselect.keys(): - if k in edecl['charselector'] and (not charselect[k]==edecl['charselector'][k]): - outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['charselector'][k],charselect[k])) - else: edecl['charselector'][k]=copy.copy(charselect[k]) - if 'typename' not in edecl: - edecl['typename']=typename - elif typename and (not edecl['typename']==typename): - outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typename'],typename)) - if 'attrspec' not in edecl: - edecl['attrspec']=copy.copy(attrspec) - elif attrspec: - for a in attrspec: - if a not in edecl['attrspec']: - edecl['attrspec'].append(a) - else: - edecl['typespec']=copy.copy(typespec) - edecl['kindselector']=copy.copy(kindselect) - edecl['charselector']=copy.copy(charselect) - edecl['typename']=typename - edecl['attrspec']=copy.copy(attrspec) - if m.group('after'): - m1=lenarraypattern.match(markouterparen(m.group('after'))) - if m1: - d1=m1.groupdict() - for lk in ['len','array','init']: - if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2'] - for k in d1.keys(): - if d1[k] is not None: d1[k]=unmarkouterparen(d1[k]) - else: del d1[k] - if 'len' in d1 and 'array' in d1: - if d1['len']=='': - d1['len']=d1['array'] - del d1['array'] - else: - d1['array']=d1['array']+','+d1['len'] - del d1['len'] - errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec,e,typespec,ename,d1['array'])) - if 'array' in d1: - dm = 'dimension(%s)'%d1['array'] - if 'attrspec' not in edecl or (not edecl['attrspec']): - edecl['attrspec']=[dm] - else: - edecl['attrspec'].append(dm) - for dm1 in edecl['attrspec']: - if dm1[:9]=='dimension' and dm1!=dm: - del edecl['attrspec'][-1] - errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' \ - % (ename,dm1,dm)) - break - - if 'len' in d1: - if typespec in ['complex','integer','logical','real']: - if ('kindselector' not in edecl) or (not edecl['kindselector']): - edecl['kindselector']={} - edecl['kindselector']['*']=d1['len'] - elif typespec == 'character': - if ('charselector' not in edecl) or (not edecl['charselector']): - edecl['charselector']={} - if 'len' in edecl['charselector']: - del edecl['charselector']['len'] - edecl['charselector']['*']=d1['len'] - if 'init' in d1: - if '=' in edecl and (not edecl['=']==d1['init']): - outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['='],d1['init'])) - else: - edecl['=']=d1['init'] - else: - outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n'%(ename+m.group('after'))) - for k in edecl.keys(): - if not edecl[k]: - del edecl[k] - groupcache[groupcounter]['vars'][ename]=edecl - if 'varnames' in groupcache[groupcounter]: - groupcache[groupcounter]['varnames'].append(ename) - last_name = ename - return last_name - -def cracktypespec(typespec,selector): - kindselect=None - charselect=None - typename=None - if selector: - if typespec in ['complex','integer','logical','real']: - kindselect=kindselector.match(selector) - if not kindselect: - outmess('cracktypespec: no kindselector pattern found for %s\n'%(`selector`)) - return - kindselect=kindselect.groupdict() - kindselect['*']=kindselect['kind2'] - del kindselect['kind2'] - for k in kindselect.keys(): - if not kindselect[k]: del kindselect[k] - for k,i in kindselect.items(): - kindselect[k] = rmbadname1(i) - elif typespec=='character': - charselect=charselector.match(selector) - if not charselect: - outmess('cracktypespec: no charselector pattern found for %s\n'%(`selector`)) - return - charselect=charselect.groupdict() - charselect['*']=charselect['charlen'] - del charselect['charlen'] - if charselect['lenkind']: - lenkind=lenkindpattern.match(markoutercomma(charselect['lenkind'])) - lenkind=lenkind.groupdict() - for lk in ['len','kind']: - if lenkind[lk+'2']: - lenkind[lk]=lenkind[lk+'2'] - charselect[lk]=lenkind[lk] - del lenkind[lk+'2'] - del charselect['lenkind'] - for k in charselect.keys(): - if not charselect[k]: del charselect[k] - for k,i in charselect.items(): - charselect[k] = rmbadname1(i) - elif typespec=='type': - typename=re.match(r'\s*\(\s*(?P\w+)\s*\)',selector,re.I) - if typename: typename=typename.group('name') - else: outmess('cracktypespec: no typename found in %s\n'%(`typespec+selector`)) - else: - outmess('cracktypespec: no selector used for %s\n'%(`selector`)) - return kindselect,charselect,typename -###### -def setattrspec(decl,attr,force=0): - if not decl: - decl={} - if not attr: - return decl - if 'attrspec' not in decl: - decl['attrspec']=[attr] - return decl - if force: decl['attrspec'].append(attr) - if attr in decl['attrspec']: return decl - if attr=='static' and 'automatic' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr=='automatic' and 'static' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr=='public' and 'private' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr=='private' and 'public' not in decl['attrspec']: - decl['attrspec'].append(attr) - else: - decl['attrspec'].append(attr) - return decl - -def setkindselector(decl,sel,force=0): - if not decl: - decl={} - if not sel: - return decl - if 'kindselector' not in decl: - decl['kindselector']=sel - return decl - for k in sel.keys(): - if force or k not in decl['kindselector']: - decl['kindselector'][k]=sel[k] - return decl - -def setcharselector(decl,sel,force=0): - if not decl: - decl={} - if not sel: - return decl - if 'charselector' not in decl: - decl['charselector']=sel - return decl - for k in sel.keys(): - if force or k not in decl['charselector']: - decl['charselector'][k]=sel[k] - return decl - -def getblockname(block,unknown='unknown'): - if 'name' in block: - return block['name'] - return unknown - -###### post processing - -def setmesstext(block): - global filepositiontext - try: - filepositiontext='In: %s:%s\n'%(block['from'],block['name']) - except: - pass - -def get_usedict(block): - usedict = {} - if 'parent_block' in block: - usedict = get_usedict(block['parent_block']) - if 'use' in block: - usedict.update(block['use']) - return usedict - -def get_useparameters(block, param_map=None): - global f90modulevars - if param_map is None: - param_map = {} - usedict = get_usedict(block) - if not usedict: - return param_map - for usename,mapping in usedict.items(): - usename = usename.lower() - if usename not in f90modulevars: - continue - mvars = f90modulevars[usename] - params = get_parameters(mvars) - if not params: - continue - # XXX: apply mapping - if mapping: - errmess('get_useparameters: mapping for %s not impl.' % (mapping)) - for k,v in params.items(): - if k in param_map: - outmess('get_useparameters: overriding parameter %s with'\ - ' value from module %s' % (`k`,`usename`)) - param_map[k] = v - return param_map - -def postcrack2(block,tab='',param_map=None): - global f90modulevars - if not f90modulevars: - return block - if type(block)==types.ListType: - ret = [] - for g in block: - g = postcrack2(g,tab=tab+'\t',param_map=param_map) - ret.append(g) - return ret - setmesstext(block) - outmess('%sBlock: %s\n'%(tab,block['name']),0) - - if param_map is None: - param_map = get_useparameters(block) - - if param_map is not None and 'vars' in block: - vars = block['vars'] - for n in vars.keys(): - var = vars[n] - if 'kindselector' in var: - kind = var['kindselector'] - if 'kind' in kind: - val = kind['kind'] - if val in param_map: - kind['kind'] = param_map[val] - new_body = [] - for b in block['body']: - b = postcrack2(b,tab=tab+'\t',param_map=param_map) - new_body.append(b) - block['body'] = new_body - - return block - -def postcrack(block,args=None,tab=''): - """ - TODO: - function return values - determine expression types if in argument list - """ - global usermodules,onlyfunctions - if type(block)==types.ListType: - gret=[] - uret=[] - for g in block: - setmesstext(g) - g=postcrack(g,tab=tab+'\t') - if 'name' in g and '__user__' in g['name']: # sort user routines to appear first - uret.append(g) - else: - gret.append(g) - return uret+gret - setmesstext(block) - if (not type(block)==types.DictType) and 'block' not in block: - raise 'postcrack: Expected block dictionary instead of ',block - if 'name' in block and not block['name']=='unknown_interface': - outmess('%sBlock: %s\n'%(tab,block['name']),0) - blocktype=block['block'] - block=analyzeargs(block) - block=analyzecommon(block) - block['vars']=analyzevars(block) - block['sortvars']=sortvarnames(block['vars']) - if 'args' in block and block['args']: - args=block['args'] - block['body']=analyzebody(block,args,tab=tab) - - userisdefined=[] -## fromuser = [] - if 'use' in block: - useblock=block['use'] - for k in useblock.keys(): - if '__user__' in k: - userisdefined.append(k) -## if 'map' in useblock[k]: -## for n in useblock[k]['map'].values(): -## if n not in fromuser: fromuser.append(n) - else: useblock={} - name='' - if 'name' in block: - name=block['name'] - if 'externals' in block and block['externals']:# and not userisdefined: # Build a __user__ module - interfaced=[] - if 'interfaced' in block: - interfaced=block['interfaced'] - mvars=copy.copy(block['vars']) - if name: - mname=name+'__user__routines' - else: - mname='unknown__user__routines' - if mname in userisdefined: - i=1 - while '%s_%i'%(mname,i) in userisdefined: i=i+1 - mname='%s_%i'%(mname,i) - interface={'block':'interface','body':[],'vars':{},'name':name+'_user_interface'} - for e in block['externals']: -## if e in fromuser: -## outmess(' Skipping %s that is defined explicitly in another use statement\n'%(`e`)) -## continue - if e in interfaced: - edef=[] - j=-1 - for b in block['body']: - j=j+1 - if b['block']=='interface': - i=-1 - for bb in b['body']: - i=i+1 - if 'name' in bb and bb['name']==e: - edef=copy.copy(bb) - del b['body'][i] - break - if edef: - if not b['body']: del block['body'][j] - del interfaced[interfaced.index(e)] - break - interface['body'].append(edef) - else: - if e in mvars and not isexternal(mvars[e]): - interface['vars'][e]=mvars[e] - if interface['vars'] or interface['body']: - block['interfaced']=interfaced - mblock={'block':'python module','body':[interface],'vars':{},'name':mname,'interfaced':block['externals']} - useblock[mname]={} - usermodules.append(mblock) - if useblock: - block['use']=useblock - return block - -def sortvarnames(vars): - indep = [] - dep = [] - for v in vars.keys(): - if 'depend' in vars[v] and vars[v]['depend']: - dep.append(v) - #print '%s depends on %s'%(v,vars[v]['depend']) - else: indep.append(v) - n = len(dep) - i = 0 - while dep: #XXX: How to catch dependence cycles correctly? - v = dep[0] - fl = 0 - for w in dep[1:]: - if w in vars[v]['depend']: - fl = 1 - break - if fl: - dep = dep[1:]+[v] - i = i + 1 - if i>n: - errmess('sortvarnames: failed to compute dependencies because' - ' of cyclic dependencies between ' - +', '.join(dep)+'\n') - indep = indep + dep - break - else: - indep.append(v) - dep = dep[1:] - n = len(dep) - i = 0 - #print indep - return indep - -def analyzecommon(block): - if not hascommon(block): return block - commonvars=[] - for k in block['common'].keys(): - comvars=[] - for e in block['common'][k]: - m=re.match(r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z',e,re.I) - if m: - dims=[] - if m.group('dims'): - dims=[x.strip() for x in markoutercomma(m.group('dims')).split('@,@')] - n=m.group('name').strip() - if n in block['vars']: - if 'attrspec' in block['vars'][n]: - block['vars'][n]['attrspec'].append('dimension(%s)'%(','.join(dims))) - else: - block['vars'][n]['attrspec']=['dimension(%s)'%(','.join(dims))] - else: - if dims: - block['vars'][n]={'attrspec':['dimension(%s)'%(','.join(dims))]} - else: block['vars'][n]={} - if n not in commonvars: commonvars.append(n) - else: - n=e - errmess('analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n'%(e,k)) - comvars.append(n) - block['common'][k]=comvars - if 'commonvars' not in block: - block['commonvars']=commonvars - else: - block['commonvars']=block['commonvars']+commonvars - return block - -def analyzebody(block,args,tab=''): - global usermodules,skipfuncs,onlyfuncs,f90modulevars - setmesstext(block) - body=[] - for b in block['body']: - b['parent_block'] = block - if b['block'] in ['function','subroutine']: - if args is not None and b['name'] not in args: - continue - else: - as_=b['args'] - if b['name'] in skipfuncs: - continue - if onlyfuncs and b['name'] not in onlyfuncs: - continue - else: as_=args - b=postcrack(b,as_,tab=tab+'\t') - if b['block']=='interface' and not b['body']: - if 'f2pyenhancements' not in b: - continue - if b['block'].replace(' ','')=='pythonmodule': - usermodules.append(b) - else: - if b['block']=='module': - f90modulevars[b['name']] = b['vars'] - body.append(b) - return body - -def buildimplicitrules(block): - setmesstext(block) - implicitrules=defaultimplicitrules - attrrules={} - if 'implicit' in block: - if block['implicit'] is None: - implicitrules=None - if verbose>1: - outmess('buildimplicitrules: no implicit rules for routine %s.\n'%`block['name']`) - else: - for k in block['implicit'].keys(): - if block['implicit'][k].get('typespec') not in ['static','automatic']: - implicitrules[k]=block['implicit'][k] - else: - attrrules[k]=block['implicit'][k]['typespec'] - return implicitrules,attrrules - -def myeval(e,g=None,l=None): - r = eval(e,g,l) - if type(r) in [type(0),type(0.0)]: - return r - raise ValueError,'r=%r' % (r) - -getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z',re.I) -def getlincoef(e,xset): # e = a*x+b ; x in xset - try: - c = int(myeval(e,{},{})) - return 0,c,None - except: pass - if getlincoef_re_1.match(e): - return 1,0,e - len_e = len(e) - for x in xset: - if len(x)>len_e: continue - re_1 = re.compile(r'(?P.*?)\b'+x+r'\b(?P.*)',re.I) - m = re_1.match(e) - if m: - try: - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'),0,m1.group('after')) - m1 = re_1.match(ee) - b = myeval(ee,{},{}) - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'),1,m1.group('after')) - m1 = re_1.match(ee) - a = myeval(ee,{},{}) - b - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'),0.5,m1.group('after')) - m1 = re_1.match(ee) - c = myeval(ee,{},{}) - if (a*0.5+b==c): - return a,b,x - except: pass - break - return None,None,None - -_varname_match = re.compile(r'\A[a-z]\w*\Z').match -def getarrlen(dl,args,star='*'): - edl = [] - try: edl.append(myeval(dl[0],{},{})) - except: edl.append(dl[0]) - try: edl.append(myeval(dl[1],{},{})) - except: edl.append(dl[1]) - if type(edl[0]) is type(0): - p1 = 1-edl[0] - if p1==0: d = str(dl[1]) - elif p1<0: d = '%s-%s'%(dl[1],-p1) - else: d = '%s+%s'%(dl[1],p1) - elif type(edl[1]) is type(0): - p1 = 1+edl[1] - if p1==0: d='-(%s)' % (dl[0]) - else: d='%s-(%s)' % (p1,dl[0]) - else: d = '%s-(%s)+1'%(dl[1],dl[0]) - try: return `myeval(d,{},{})`,None,None - except: pass - d1,d2=getlincoef(dl[0],args),getlincoef(dl[1],args) - if None not in [d1[0],d2[0]]: - if (d1[0],d2[0])==(0,0): - return `d2[1]-d1[1]+1`,None,None - b = d2[1] - d1[1] + 1 - d1 = (d1[0],0,d1[2]) - d2 = (d2[0],b,d2[2]) - if d1[0]==0 and d2[2] in args: - if b<0: return '%s * %s - %s'%(d2[0],d2[2],-b),d2[2],'+%s)/(%s)'%(-b,d2[0]) - elif b: return '%s * %s + %s'%(d2[0],d2[2],b),d2[2],'-%s)/(%s)'%(b,d2[0]) - else: return '%s * %s'%(d2[0],d2[2]),d2[2],')/(%s)'%(d2[0]) - if d2[0]==0 and d1[2] in args: - - if b<0: return '%s * %s - %s'%(-d1[0],d1[2],-b),d1[2],'+%s)/(%s)'%(-b,-d1[0]) - elif b: return '%s * %s + %s'%(-d1[0],d1[2],b),d1[2],'-%s)/(%s)'%(b,-d1[0]) - else: return '%s * %s'%(-d1[0],d1[2]),d1[2],')/(%s)'%(-d1[0]) - if d1[2]==d2[2] and d1[2] in args: - a = d2[0] - d1[0] - if not a: return `b`,None,None - if b<0: return '%s * %s - %s'%(a,d1[2],-b),d2[2],'+%s)/(%s)'%(-b,a) - elif b: return '%s * %s + %s'%(a,d1[2],b),d2[2],'-%s)/(%s)'%(b,a) - else: return '%s * %s'%(a,d1[2]),d2[2],')/(%s)'%(a) - if d1[0]==d2[0]==1: - c = str(d1[2]) - if c not in args: - if _varname_match(c): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) - c = '(%s)'%c - if b==0: d='%s-%s' % (d2[2],c) - elif b<0: d='%s-%s-%s' % (d2[2],c,-b) - else: d='%s-%s+%s' % (d2[2],c,b) - elif d1[0]==0: - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)'%c2 - if d2[0]==1: pass - elif d2[0]==-1: c2='-%s' %c2 - else: c2='%s*%s'%(d2[0],c2) - - if b==0: d=c2 - elif b<0: d='%s-%s' % (c2,-b) - else: d='%s+%s' % (c2,b) - elif d2[0]==0: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)'%c1 - if d1[0]==1: c1='-%s'%c1 - elif d1[0]==-1: c1='+%s'%c1 - elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1) - else: c1 = '-%s*%s' % (d1[0],c1) - - if b==0: d=c1 - elif b<0: d='%s-%s' % (c1,-b) - else: d='%s+%s' % (c1,b) - else: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)'%c1 - if d1[0]==1: c1='-%s'%c1 - elif d1[0]==-1: c1='+%s'%c1 - elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1) - else: c1 = '-%s*%s' % (d1[0],c1) - - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)'%c2 - if d2[0]==1: pass - elif d2[0]==-1: c2='-%s' %c2 - else: c2='%s*%s'%(d2[0],c2) - - if b==0: d='%s%s' % (c2,c1) - elif b<0: d='%s%s-%s' % (c2,c1,-b) - else: d='%s%s+%s' % (c2,c1,b) - return d,None,None - -word_pattern = re.compile(r'\b[a-z][\w$]*\b',re.I) - -def _get_depend_dict(name, vars, deps): - if name in vars: - words = vars[name].get('depend',[]) - - if '=' in vars[name] and not isstring(vars[name]): - for word in word_pattern.findall(vars[name]['=']): - if word not in words and word in vars: - words.append(word) - for word in words[:]: - for w in deps.get(word,[]) \ - or _get_depend_dict(word, vars, deps): - if w not in words: - words.append(w) - else: - outmess('_get_depend_dict: no dependence info for %s\n' % (`name`)) - words = [] - deps[name] = words - return words - -def _calc_depend_dict(vars): - names = vars.keys() - depend_dict = {} - for n in names: - _get_depend_dict(n, vars, depend_dict) - return depend_dict - -def get_sorted_names(vars): - """ - """ - depend_dict = _calc_depend_dict(vars) - names = [] - for name in depend_dict.keys(): - if not depend_dict[name]: - names.append(name) - del depend_dict[name] - while depend_dict: - for name, lst in depend_dict.items(): - new_lst = [n for n in lst if n in depend_dict] - if not new_lst: - names.append(name) - del depend_dict[name] - else: - depend_dict[name] = new_lst - return [name for name in names if name in vars] - -def _kind_func(string): - #XXX: return something sensible. - if string[0] in "'\"": - string = string[1:-1] - if real16pattern.match(string): - return 16 - elif real8pattern.match(string): - return 8 - return 'kind('+string+')' - -def _selected_int_kind_func(r): - #XXX: This should be processor dependent - m = 10**r - if m<=2**8: return 1 - if m<=2**16: return 2 - if m<=2**32: return 4 - if m<=2**64: return 8 - if m<=2**128: return 16 - return -1 - -def get_parameters(vars, global_params={}): - params = copy.copy(global_params) - g_params = copy.copy(global_params) - for name,func in [('kind',_kind_func), - ('selected_int_kind',_selected_int_kind_func), - ]: - if name not in g_params: - g_params[name] = func - param_names = [] - for n in get_sorted_names(vars): - if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: - param_names.append(n) - kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)',re.I) - selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)',re.I) - for n in param_names: - if '=' in vars[n]: - v = vars[n]['='] - if islogical(vars[n]): - v = v.lower() - for repl in [ - ('.false.','False'), - ('.true.','True'), - #TODO: test .eq., .neq., etc replacements. - ]: - v = v.replace(*repl) - v = kind_re.sub(r'kind("\1")',v) - v = selected_int_kind_re.sub(r'selected_int_kind(\1)',v) - if isinteger(vars[n]) and not selected_int_kind_re.match(v): - v = v.split('_')[0] - if isdouble(vars[n]): - tt = list(v) - for m in real16pattern.finditer(v): - tt[m.start():m.end()] = list(\ - v[m.start():m.end()].lower().replace('d', 'e')) - v = ''.join(tt) - if iscomplex(vars[n]): - if v[0]=='(' and v[-1]==')': - l = markoutercomma(v[1:-1]).split('@,@') - print n,params - try: - params[n] = eval(v,g_params,params) - except Exception,msg: - params[n] = v - #print params - outmess('get_parameters: got "%s" on %s\n' % (msg,`v`)) - if isstring(vars[n]) and type(params[n]) is type(0): - params[n] = chr(params[n]) - nl = n.lower() - if nl!=n: - params[nl] = params[n] - else: - print vars[n] - outmess('get_parameters:parameter %s does not have value?!\n'%(`n`)) - return params - -def _eval_length(length,params): - if length in ['(:)','(*)','*']: - return '(*)' - return _eval_scalar(length,params) - -_is_kind_number = re.compile('\d+_').match - -def _eval_scalar(value,params): - if _is_kind_number(value): - value = value.split('_')[0] - try: - value = str(eval(value,{},params)) - except (NameError, SyntaxError): - return value - except Exception,msg: - errmess('"%s" in evaluating %r '\ - '(available names: %s)\n' \ - % (msg,value,params.keys())) - return value - -def analyzevars(block): - global f90modulevars - setmesstext(block) - implicitrules,attrrules=buildimplicitrules(block) - vars=copy.copy(block['vars']) - if block['block']=='function' and block['name'] not in vars: - vars[block['name']]={} - if '' in block['vars']: - del vars[''] - if 'attrspec' in block['vars']['']: - gen=block['vars']['']['attrspec'] - for n in vars.keys(): - for k in ['public','private']: - if k in gen: - vars[n]=setattrspec(vars[n],k) - svars=[] - args = block['args'] - for a in args: - try: - vars[a] - svars.append(a) - except KeyError: - pass - for n in vars.keys(): - if n not in args: svars.append(n) - - params = get_parameters(vars, get_useparameters(block)) - - dep_matches = {} - name_match = re.compile(r'\w[\w\d_$]*').match - for v in vars.keys(): - m = name_match(v) - if m: - n = v[m.start():m.end()] - try: - dep_matches[n] - except KeyError: - dep_matches[n] = re.compile(r'.*\b%s\b'%(v),re.I).match - for n in svars: - if n[0] in attrrules.keys(): - vars[n]=setattrspec(vars[n],attrrules[n[0]]) - if 'typespec' not in vars[n]: - if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): - if implicitrules: - ln0 = n[0].lower() - for k in implicitrules[ln0].keys(): - if k=='typespec' and implicitrules[ln0][k]=='undefined': - continue - if k not in vars[n]: - vars[n][k]=implicitrules[ln0][k] - elif k=='attrspec': - for l in implicitrules[ln0][k]: - vars[n]=setattrspec(vars[n],l) - elif n in block['args']: - outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(`n`,block['name'])) - - if 'charselector' in vars[n]: - if 'len' in vars[n]['charselector']: - l = vars[n]['charselector']['len'] - try: - l = str(eval(l,{},params)) - except: - pass - vars[n]['charselector']['len'] = l - - if 'kindselector' in vars[n]: - if 'kind' in vars[n]['kindselector']: - l = vars[n]['kindselector']['kind'] - try: - l = str(eval(l,{},params)) - except: - pass - vars[n]['kindselector']['kind'] = l - - savelindims = {} - if 'attrspec' in vars[n]: - attr=vars[n]['attrspec'] - attr.reverse() - vars[n]['attrspec']=[] - dim,intent,depend,check,note=None,None,None,None,None - for a in attr: - if a[:9]=='dimension': dim=(a[9:].strip())[1:-1] - elif a[:6]=='intent': intent=(a[6:].strip())[1:-1] - elif a[:6]=='depend': depend=(a[6:].strip())[1:-1] - elif a[:5]=='check': check=(a[5:].strip())[1:-1] - elif a[:4]=='note': note=(a[4:].strip())[1:-1] - else: vars[n]=setattrspec(vars[n],a) - if intent: - if 'intent' not in vars[n]: - vars[n]['intent']=[] - for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: - if not c in vars[n]['intent']: - vars[n]['intent'].append(c) - intent=None - if note: - note=note.replace('\\n\\n','\n\n') - note=note.replace('\\n ','\n') - if 'note' not in vars[n]: - vars[n]['note']=[note] - else: - vars[n]['note'].append(note) - note=None - if depend is not None: - if 'depend' not in vars[n]: - vars[n]['depend']=[] - for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): - if c not in vars[n]['depend']: - vars[n]['depend'].append(c) - depend=None - if check is not None: - if 'check' not in vars[n]: - vars[n]['check']=[] - for c in [x.strip() for x in markoutercomma(check).split('@,@')]: - if not c in vars[n]['check']: - vars[n]['check'].append(c) - check=None - if dim and 'dimension' not in vars[n]: - vars[n]['dimension']=[] - for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): - star = '*' - if d==':': - star=':' - if d in params: - d = str(params[d]) - for p in params.keys(): - m = re.match(r'(?P.*?)\b'+p+r'\b(?P.*)',d,re.I) - if m: - #outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`)) - d = m.group('before')+str(params[p])+m.group('after') - if d==star: - dl = [star] - else: - dl=markoutercomma(d,':').split('@:@') - if len(dl)==2 and '*' in dl: # e.g. dimension(5:*) - dl = ['*'] - d = '*' - if len(dl)==1 and not dl[0]==star: dl = ['1',dl[0]] - if len(dl)==2: - d,v,di = getarrlen(dl,block['vars'].keys()) - if d[:4] == '1 * ': d = d[4:] - if di and di[-4:] == '/(1)': di = di[:-4] - if v: savelindims[d] = v,di - vars[n]['dimension'].append(d) - if 'dimension' in vars[n]: - if isintent_c(vars[n]): - shape_macro = 'shape' - else: - shape_macro = 'shape'#'fshape' - if isstringarray(vars[n]): - if 'charselector' in vars[n]: - d = vars[n]['charselector'] - if '*' in d: - d = d['*'] - errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'\ - %(d,n, - ','.join(vars[n]['dimension']), - n,','.join(vars[n]['dimension']+[d]))) - vars[n]['dimension'].append(d) - del vars[n]['charselector'] - if 'intent' not in vars[n]: - vars[n]['intent'] = [] - if 'c' not in vars[n]['intent']: - vars[n]['intent'].append('c') - else: - errmess("analyzevars: charselector=%r unhandled." % (d)) - if 'check' not in vars[n] and 'args' in block and n in block['args']: - flag = 'depend' not in vars[n] - if flag: - vars[n]['depend']=[] - vars[n]['check']=[] - if 'dimension' in vars[n]: - #/----< no check - #vars[n]['check'].append('rank(%s)==%s'%(n,len(vars[n]['dimension']))) - i=-1; ni=len(vars[n]['dimension']) - for d in vars[n]['dimension']: - ddeps=[] # dependecies of 'd' - ad='' - pd='' - #origd = d - if d not in vars: - if d in savelindims: - pd,ad='(',savelindims[d][1] - d = savelindims[d][0] - else: - for r in block['args']: - #for r in block['vars'].keys(): - if r not in vars: - continue - if re.match(r'.*?\b'+r+r'\b',d,re.I): - ddeps.append(r) - if d in vars: - if 'attrspec' in vars[d]: - for aa in vars[d]['attrspec']: - if aa[:6]=='depend': - ddeps += aa[6:].strip()[1:-1].split(',') - if 'depend' in vars[d]: - ddeps=ddeps+vars[d]['depend'] - i=i+1 - if d in vars and ('depend' not in vars[d]) \ - and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ - and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): - vars[d]['depend']=[n] - if ni>1: - vars[d]['=']='%s%s(%s,%s)%s'% (pd,shape_macro,n,i,ad) - else: - vars[d]['=']='%slen(%s)%s'% (pd,n,ad) - # /---< no check - if 1 and 'check' not in vars[d]: - if ni>1: - vars[d]['check']=['%s%s(%s,%i)%s==%s'\ - %(pd,shape_macro,n,i,ad,d)] - else: - vars[d]['check']=['%slen(%s)%s>=%s'%(pd,n,ad,d)] - if 'attrspec' not in vars[d]: - vars[d]['attrspec']=['optional'] - if ('optional' not in vars[d]['attrspec']) and\ - ('required' not in vars[d]['attrspec']): - vars[d]['attrspec'].append('optional') - elif d not in ['*',':']: - #/----< no check - #if ni>1: vars[n]['check'].append('shape(%s,%i)==%s'%(n,i,d)) - #else: vars[n]['check'].append('len(%s)>=%s'%(n,d)) - if flag: - if d in vars: - if n not in ddeps: - vars[n]['depend'].append(d) - else: - vars[n]['depend'] = vars[n]['depend'] + ddeps - elif isstring(vars[n]): - length='1' - if 'charselector' in vars[n]: - if '*' in vars[n]['charselector']: - length = _eval_length(vars[n]['charselector']['*'], - params) - vars[n]['charselector']['*']=length - elif 'len' in vars[n]['charselector']: - length = _eval_length(vars[n]['charselector']['len'], - params) - del vars[n]['charselector']['len'] - vars[n]['charselector']['*']=length - - if not vars[n]['check']: - del vars[n]['check'] - if flag and not vars[n]['depend']: - del vars[n]['depend'] - if '=' in vars[n]: - if 'attrspec' not in vars[n]: - vars[n]['attrspec']=[] - if ('optional' not in vars[n]['attrspec']) and \ - ('required' not in vars[n]['attrspec']): - vars[n]['attrspec'].append('optional') - if 'depend' not in vars[n]: - vars[n]['depend']=[] - for v,m in dep_matches.items(): - if m(vars[n]['=']): vars[n]['depend'].append(v) - if not vars[n]['depend']: del vars[n]['depend'] - if isscalar(vars[n]): - vars[n]['='] = _eval_scalar(vars[n]['='],params) - - for n in vars.keys(): - if n==block['name']: # n is block name - if 'note' in vars[n]: - block['note']=vars[n]['note'] - if block['block']=='function': - if 'result' in block and block['result'] in vars: - vars[n]=appenddecl(vars[n],vars[block['result']]) - if 'prefix' in block: - pr=block['prefix']; ispure=0; isrec=1 - pr1=pr.replace('pure','') - ispure=(not pr==pr1) - pr=pr1.replace('recursive','') - isrec=(not pr==pr1) - m=typespattern[0].match(pr) - if m: - typespec,selector,attr,edecl=cracktypespec0(m.group('this'),m.group('after')) - kindselect,charselect,typename=cracktypespec(typespec,selector) - vars[n]['typespec']=typespec - if kindselect: - if 'kind' in kindselect: - try: - kindselect['kind'] = eval(kindselect['kind'],{},params) - except: - pass - vars[n]['kindselector']=kindselect - if charselect: vars[n]['charselector']=charselect - if typename: vars[n]['typename']=typename - if ispure: vars[n]=setattrspec(vars[n],'pure') - if isrec: vars[n]=setattrspec(vars[n],'recursive') - else: - outmess('analyzevars: prefix (%s) were not used\n'%`block['prefix']`) - if not block['block'] in ['module','pythonmodule','python module','block data']: - if 'commonvars' in block: - neededvars=copy.copy(block['args']+block['commonvars']) - else: - neededvars=copy.copy(block['args']) - for n in vars.keys(): - if l_or(isintent_callback,isintent_aux)(vars[n]): - neededvars.append(n) - if 'entry' in block: - neededvars.extend(block['entry'].keys()) - for k in block['entry'].keys(): - for n in block['entry'][k]: - if n not in neededvars: - neededvars.append(n) - if block['block']=='function': - if 'result' in block: - neededvars.append(block['result']) - else: - neededvars.append(block['name']) - if block['block'] in ['subroutine','function']: - name = block['name'] - if name in vars and 'intent' in vars[name]: - block['intent'] = vars[name]['intent'] - if block['block'] == 'type': - neededvars.extend(vars.keys()) - for n in vars.keys(): - if n not in neededvars: - del vars[n] - return vars - -analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z',re.I) -def analyzeargs(block): - setmesstext(block) - implicitrules,attrrules=buildimplicitrules(block) - if 'args' not in block: - block['args']=[] - args=[] - re_1 = analyzeargs_re_1 - for a in block['args']: - if not re_1.match(a): # `a` is an expression - at=determineexprtype(a,block['vars'],implicitrules) - na='e_' - for c in a: - if c not in string.lowercase+string.digits: c='_' - na=na+c - if na[-1]=='_': na=na+'e' - else: na=na+'_e' - a=na - while a in block['vars'] or a in block['args']: - a=a+'r' - block['vars'][a]=at - args.append(a) - if a not in block['vars']: - block['vars'][a]={} - if 'externals' in block and a in block['externals']+block['interfaced']: - block['vars'][a]=setattrspec(block['vars'][a],'external') - block['args']=args - - if 'entry' in block: - for k,args1 in block['entry'].items(): - for a in args1: - if a not in block['vars']: - block['vars'][a]={} - - for b in block['body']: - if b['name'] in args: - if 'externals' not in block: - block['externals']=[] - if b['name'] not in block['externals']: - block['externals'].append(b['name']) - if 'result' in block and block['result'] not in block['vars']: - block['vars'][block['result']]={} - return block - -determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z',re.I) -determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P[\w]+)|)\Z',re.I) -determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P[\w]+)|)\Z',re.I) -determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z',re.I) -determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z',re.I) -def _ensure_exprdict(r): - if type(r) is type(0): - return {'typespec':'integer'} - if type(r) is type(0.0): - return {'typespec':'real'} - if type(r) is type(0j): - return {'typespec':'complex'} - assert type(r) is type({}),`r` - return r - -def determineexprtype(expr,vars,rules={}): - if expr in vars: - return _ensure_exprdict(vars[expr]) - expr=expr.strip() - if determineexprtype_re_1.match(expr): - return {'typespec':'complex'} - m=determineexprtype_re_2.match(expr) - if m: - if 'name' in m.groupdict() and m.group('name'): - outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`) - return {'typespec':'integer'} - m = determineexprtype_re_3.match(expr) - if m: - if 'name' in m.groupdict() and m.group('name'): - outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`) - return {'typespec':'real'} - for op in ['+','-','*','/']: - for e in [x.strip() for x in markoutercomma(expr,comma=op).split('@'+op+'@')]: - if e in vars: - return _ensure_exprdict(vars[e]) - t={} - if determineexprtype_re_4.match(expr): # in parenthesis - t=determineexprtype(expr[1:-1],vars,rules) - else: - m = determineexprtype_re_5.match(expr) - if m: - rn=m.group('name') - t=determineexprtype(m.group('name'),vars,rules) - if t and 'attrspec' in t: - del t['attrspec'] - if not t: - if rn[0] in rules: - return _ensure_exprdict(rules[rn[0]]) - if expr[0] in '\'"': - return {'typespec':'character','charselector':{'*':'*'}} - if not t: - outmess('determineexprtype: could not determine expressions (%s) type.\n'%(`expr`)) - return t - -###### -def crack2fortrangen(block,tab='\n'): - global skipfuncs, onlyfuncs - setmesstext(block) - ret='' - if type(block) is type([]): - for g in block: - if g['block'] in ['function','subroutine']: - if g['name'] in skipfuncs: - continue - if onlyfuncs and g['name'] not in onlyfuncs: - continue - ret=ret+crack2fortrangen(g,tab) - return ret - prefix='' - name='' - args='' - blocktype=block['block'] - if blocktype=='program': return '' - al=[] - if 'name' in block: - name=block['name'] - if 'args' in block: - vars = block['vars'] - al = [a for a in block['args'] if not isintent_callback(vars[a])] - if block['block']=='function' or al: - args='(%s)'%','.join(al) - f2pyenhancements = '' - if 'f2pyenhancements' in block: - for k in block['f2pyenhancements'].keys(): - f2pyenhancements = '%s%s%s %s'%(f2pyenhancements,tab+tabchar,k,block['f2pyenhancements'][k]) - intent_lst = block.get('intent',[])[:] - if blocktype=='function' and 'callback' in intent_lst: - intent_lst.remove('callback') - if intent_lst: - f2pyenhancements = '%s%sintent(%s) %s'%\ - (f2pyenhancements,tab+tabchar, - ','.join(intent_lst),name) - use='' - if 'use' in block: - use=use2fortran(block['use'],tab+tabchar) - common='' - if 'common' in block: - common=common2fortran(block['common'],tab+tabchar) - if name=='unknown_interface': name='' - result='' - if 'result' in block: - result=' result (%s)'%block['result'] - if block['result'] not in al: - al.append(block['result']) - #if 'prefix' in block: - # prefix=block['prefix']+' ' - body=crack2fortrangen(block['body'],tab+tabchar) - vars=vars2fortran(block,block['vars'],al,tab+tabchar) - mess='' - if 'from' in block: - mess='! in %s'%block['from'] - if 'entry' in block: - entry_stmts = '' - for k,i in block['entry'].items(): - entry_stmts = '%s%sentry %s(%s)' \ - % (entry_stmts,tab+tabchar,k,','.join(i)) - body = body + entry_stmts - if blocktype=='block data' and name=='_BLOCK_DATA_': - name = '' - ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab,prefix,blocktype,name,args,result,mess,f2pyenhancements,use,vars,common,body,tab,blocktype,name) - return ret - -def common2fortran(common,tab=''): - ret='' - for k in common.keys(): - if k=='_BLNK_': - ret='%s%scommon %s'%(ret,tab,','.join(common[k])) - else: - ret='%s%scommon /%s/ %s'%(ret,tab,k,','.join(common[k])) - return ret - -def use2fortran(use,tab=''): - ret='' - for m in use.keys(): - ret='%s%suse %s,'%(ret,tab,m) - if use[m]=={}: - if ret and ret[-1]==',': ret=ret[:-1] - continue - if 'only' in use[m] and use[m]['only']: - ret='%s,only:'%(ret) - if 'map' in use[m] and use[m]['map']: - c=' ' - for k in use[m]['map'].keys(): - if k==use[m]['map'][k]: - ret='%s%s%s'%(ret,c,k); c=',' - else: - ret='%s%s%s=>%s'%(ret,c,k,use[m]['map'][k]); c=',' - if ret and ret[-1]==',': ret=ret[:-1] - return ret - -def true_intent_list(var): - lst = var['intent'] - ret = [] - for intent in lst: - try: - exec('c = isintent_%s(var)' % intent) - except NameError: - c = 0 - if c: - ret.append(intent) - return ret - -def vars2fortran(block,vars,args,tab=''): - """ - TODO: - public sub - ... - """ - setmesstext(block) - ret='' - nout=[] - for a in args: - if a in block['vars']: - nout.append(a) - if 'commonvars' in block: - for a in block['commonvars']: - if a in vars: - if a not in nout: - nout.append(a) - else: - errmess('vars2fortran: Confused?!: "%s" is not defined in vars.\n'%a) - if 'varnames' in block: - nout.extend(block['varnames']) - for a in vars.keys(): - if a not in nout: - nout.append(a) - for a in nout: - if 'depend' in vars[a]: - for d in vars[a]['depend']: - if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: - errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a,d)) - if 'externals' in block and a in block['externals']: - if isintent_callback(vars[a]): - ret='%s%sintent(callback) %s'%(ret,tab,a) - ret='%s%sexternal %s'%(ret,tab,a) - if isoptional(vars[a]): - ret='%s%soptional %s'%(ret,tab,a) - if a in vars and 'typespec' not in vars[a]: - continue - cont=1 - for b in block['body']: - if a==b['name'] and b['block']=='function': - cont=0;break - if cont: - continue - if a not in vars: - show(vars) - outmess('vars2fortran: No definition for argument "%s".\n'%a) - continue - if a==block['name'] and not block['block']=='function': - continue - if 'typespec' not in vars[a]: - if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: - if a in args: - ret='%s%sexternal %s'%(ret,tab,a) - continue - show(vars[a]) - outmess('vars2fortran: No typespec for argument "%s".\n'%a) - continue - vardef=vars[a]['typespec'] - if vardef=='type' and 'typename' in vars[a]: - vardef='%s(%s)'%(vardef,vars[a]['typename']) - selector={} - if 'kindselector' in vars[a]: - selector=vars[a]['kindselector'] - elif 'charselector' in vars[a]: - selector=vars[a]['charselector'] - if '*' in selector: - if selector['*'] in ['*',':']: - vardef='%s*(%s)'%(vardef,selector['*']) - else: - vardef='%s*%s'%(vardef,selector['*']) - else: - if 'len' in selector: - vardef='%s(len=%s'%(vardef,selector['len']) - if 'kind' in selector: - vardef='%s,kind=%s)'%(vardef,selector['kind']) - else: - vardef='%s)'%(vardef) - elif 'kind' in selector: - vardef='%s(kind=%s)'%(vardef,selector['kind']) - c=' ' - if 'attrspec' in vars[a]: - attr=[] - for l in vars[a]['attrspec']: - if l not in ['external']: - attr.append(l) - if attr: - vardef='%s %s'%(vardef,','.join(attr)) - c=',' - if 'dimension' in vars[a]: -# if not isintent_c(vars[a]): -# vars[a]['dimension'].reverse() - vardef='%s%sdimension(%s)'%(vardef,c,','.join(vars[a]['dimension'])) - c=',' - if 'intent' in vars[a]: - lst = true_intent_list(vars[a]) - if lst: - vardef='%s%sintent(%s)'%(vardef,c,','.join(lst)) - c=',' - if 'check' in vars[a]: - vardef='%s%scheck(%s)'%(vardef,c,','.join(vars[a]['check'])) - c=',' - if 'depend' in vars[a]: - vardef='%s%sdepend(%s)'%(vardef,c,','.join(vars[a]['depend'])) - c=',' - if '=' in vars[a]: - v = vars[a]['='] - if vars[a]['typespec'] in ['complex','double complex']: - try: - v = eval(v) - v = '(%s,%s)' % (v.real,v.imag) - except: - pass - vardef='%s :: %s=%s'%(vardef,a,v) - else: - vardef='%s :: %s'%(vardef,a) - ret='%s%s%s'%(ret,tab,vardef) - return ret -###### - -def crackfortran(files): - global usermodules - outmess('Reading fortran codes...\n',0) - readfortrancode(files,crackline) - outmess('Post-processing...\n',0) - usermodules=[] - postlist=postcrack(grouplist[0]) - outmess('Post-processing (stage 2)...\n',0) - postlist=postcrack2(postlist) - return usermodules+postlist - -def crack2fortran(block): - global f2py_version - pyf=crack2fortrangen(block)+'\n' - header="""! -*- f90 -*- -! Note: the context of this file is case sensitive. -""" - footer=""" -! This file was auto-generated with f2py (version:%s). -! See http://cens.ioc.ee/projects/f2py2e/ -"""%(f2py_version) - return header+pyf+footer - -if __name__ == "__main__": - files=[] - funcs=[] - f=1;f2=0;f3=0 - showblocklist=0 - for l in sys.argv[1:]: - if l=='': pass - elif l[0]==':': - f=0 - elif l=='-quiet': - quiet=1 - verbose=0 - elif l=='-verbose': - verbose=2 - quiet=0 - elif l=='-fix': - if strictf77: - outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n',0) - skipemptyends=1 - sourcecodeform='fix' - elif l=='-skipemptyends': - skipemptyends=1 - elif l=='--ignore-contains': - ignorecontains=1 - elif l=='-f77': - strictf77=1 - sourcecodeform='fix' - elif l=='-f90': - strictf77=0 - sourcecodeform='free' - skipemptyends=1 - elif l=='-h': - f2=1 - elif l=='-show': - showblocklist=1 - elif l=='-m': - f3=1 - elif l[0]=='-': - errmess('Unknown option %s\n'%`l`) - elif f2: - f2=0 - pyffilename=l - elif f3: - f3=0 - f77modulename=l - elif f: - try: - open(l).close() - files.append(l) - except IOError,detail: - errmess('IOError: %s\n'%str(detail)) - else: - funcs.append(l) - if not strictf77 and f77modulename and not skipemptyends: - outmess("""\ - Warning: You have specifyied module name for non Fortran 77 code - that should not need one (expect if you are scanning F90 code - for non module blocks but then you should use flag -skipemptyends - and also be sure that the files do not contain programs without program statement). -""",0) - - postlist=crackfortran(files,funcs) - if pyffilename: - outmess('Writing fortran code to file %s\n'%`pyffilename`,0) - pyf=crack2fortran(postlist) - f=open(pyffilename,'w') - f.write(pyf) - f.close() - if showblocklist: - show(postlist) diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py deleted file mode 100644 index c270c597c..000000000 --- a/numpy/f2py/diagnose.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/env python - -import os,sys,tempfile - -def run_command(cmd): - print 'Running %r:' % (cmd) - s = os.system(cmd) - print '------' -def run(): - _path = os.getcwd() - os.chdir(tempfile.gettempdir()) - print '------' - print 'os.name=%r' % (os.name) - print '------' - print 'sys.platform=%r' % (sys.platform) - print '------' - print 'sys.version:' - print sys.version - print '------' - print 'sys.prefix:' - print sys.prefix - print '------' - print 'sys.path=%r' % (':'.join(sys.path)) - print '------' - try: - import Numeric - has_Numeric = 1 - except ImportError: - print 'Failed to import Numeric:',sys.exc_value - has_Numeric = 0 - try: - import numarray - has_numarray = 1 - except ImportError: - print 'Failed to import numarray:',sys.exc_value - has_numarray = 0 - try: - import numpy - has_newnumpy = 1 - except ImportError: - print 'Failed to import new numpy:', sys.exc_value - has_newnumpy = 0 - try: - import f2py2e - has_f2py2e = 1 - except ImportError: - print 'Failed to import f2py2e:',sys.exc_value - has_f2py2e = 0 - try: - import numpy.distutils - has_numpy_distutils = 2 - except ImportError: - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError: - print 'Failed to import numpy_distutils:',sys.exc_value - has_numpy_distutils = 0 - if has_Numeric: - try: - print 'Found Numeric version %r in %s' % \ - (Numeric.__version__,Numeric.__file__) - except Exception,msg: - print 'error:',msg - print '------' - if has_numarray: - try: - print 'Found numarray version %r in %s' % \ - (numarray.__version__,numarray.__file__) - except Exception,msg: - print 'error:',msg - print '------' - if has_newnumpy: - try: - print 'Found new numpy version %r in %s' % \ - (numpy.__version__, numpy.__file__) - except Exception,msg: - print 'error:', msg - print '------' - if has_f2py2e: - try: - print 'Found f2py2e version %r in %s' % \ - (f2py2e.__version__.version,f2py2e.__file__) - except Exception,msg: - print 'error:',msg - print '------' - if has_numpy_distutils: - try: - if has_numpy_distutils==2: - print 'Found numpy.distutils version %r in %r' % (\ - numpy.distutils.__version__, - numpy.distutils.__file__) - else: - print 'Found numpy_distutils version %r in %r' % (\ - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__) - print '------' - except Exception,msg: - print 'error:',msg - print '------' - try: - if has_numpy_distutils==1: - print 'Importing numpy_distutils.command.build_flib ...', - import numpy_distutils.command.build_flib as build_flib - print 'ok' - print '------' - try: - print 'Checking availability of supported Fortran compilers:' - for compiler_class in build_flib.all_compilers: - compiler_class(verbose=1).is_available() - print '------' - except Exception,msg: - print 'error:',msg - print '------' - except Exception,msg: - print 'error:',msg,'(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)' - print '------' - try: - if has_numpy_distutils==2: - print 'Importing numpy.distutils.fcompiler ...', - import numpy.distutils.fcompiler as fcompiler - else: - print 'Importing numpy_distutils.fcompiler ...', - import numpy_distutils.fcompiler as fcompiler - print 'ok' - print '------' - try: - print 'Checking availability of supported Fortran compilers:' - fcompiler.show_fcompilers() - print '------' - except Exception,msg: - print 'error:',msg - print '------' - except Exception,msg: - print 'error:',msg - print '------' - try: - if has_numpy_distutils==2: - print 'Importing numpy.distutils.cpuinfo ...', - from numpy.distutils.cpuinfo import cpuinfo - print 'ok' - print '------' - else: - try: - print 'Importing numpy_distutils.command.cpuinfo ...', - from numpy_distutils.command.cpuinfo import cpuinfo - print 'ok' - print '------' - except Exception,msg: - print 'error:',msg,'(ignore it)' - print 'Importing numpy_distutils.cpuinfo ...', - from numpy_distutils.cpuinfo import cpuinfo - print 'ok' - print '------' - cpu = cpuinfo() - print 'CPU information:', - for name in dir(cpuinfo): - if name[0]=='_' and name[1]!='_' and getattr(cpu,name[1:])(): - print name[1:], - print '------' - except Exception,msg: - print 'error:',msg - print '------' - os.chdir(_path) -if __name__ == "__main__": - run() diff --git a/numpy/f2py/doc/Makefile b/numpy/f2py/doc/Makefile deleted file mode 100644 index 2f241da0a..000000000 --- a/numpy/f2py/doc/Makefile +++ /dev/null @@ -1,76 +0,0 @@ -# Makefile for compiling f2py2e documentation (dvi, ps, html) -# Pearu Peterson - -REL=4 -TOP = usersguide -LATEXSRC = bugs.tex commands.tex f2py2e.tex intro.tex notes.tex signaturefile.tex -MAINLATEX = f2py2e - -LATEX = latex -PDFLATEX = pdflatex - -COLLECTINPUT = ./collectinput.py -INSTALLDATA = install -m 644 -c - -TTH = tth -TTHFILTER = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -L$(MAINLATEX) -i -TTHFILTER2 = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -Lpython9 -i -TTHFILTER3 = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -Lfortranobject -i -TTHMISSING = "\ -***************************************************************\n\ -Warning: Could not find tth (a TeX to HTML translator) \n\ - or an error arised was by tth\n\ -You can download tth from http://hutchinson.belmont.ma.us/tth/ \n\ -or\n\ -use your favorite LaTeX to HTML translator on file tmp_main.tex\n\ -***************************************************************\ -" - -all: dvi ps html clean -$(MAINLATEX).dvi: $(LATEXSRC) - $(LATEX) $(MAINLATEX).tex - $(LATEX) $(MAINLATEX).tex - $(LATEX) $(MAINLATEX).tex - $(PDFLATEX) $(MAINLATEX).tex -$(TOP).dvi: $(MAINLATEX).dvi - cp -f $(MAINLATEX).dvi $(TOP).dvi - mv -f $(MAINLATEX).pdf $(TOP).pdf -$(TOP).ps: $(TOP).dvi - dvips $(TOP).dvi -o -$(TOP).html: $(LATEXSRC) - $(COLLECTINPUT) < $(MAINLATEX).tex > tmp_$(MAINLATEX).tex - @test `which $(TTH)` && cat tmp_$(MAINLATEX).tex | $(TTHFILTER) > $(TOP).html\ - || echo -e $(TTHMISSING) -dvi: $(TOP).dvi -ps: $(TOP).ps - gzip -f $(TOP).ps -html: $(TOP).html - -python9: - cp -f python9.tex f2python9-final/src/ - cd f2python9-final && mk_html.sh - cd f2python9-final && mk_ps.sh - cd f2python9-final && mk_pdf.sh -pyfobj: - $(LATEX) fortranobject.tex - $(LATEX) fortranobject.tex - $(LATEX) fortranobject.tex - @test `which $(TTH)` && cat fortranobject.tex | $(TTHFILTER3) > pyfobj.html\ - || echo -e $(TTHMISSING) - dvips fortranobject.dvi -o pyfobj.ps - gzip -f pyfobj.ps - pdflatex fortranobject.tex - mv fortranobject.pdf pyfobj.pdf - -WWWDIR=/net/cens/home/www/unsecure/projects/f2py2e/ -wwwpage: all - $(INSTALLDATA) index.html $(TOP).html $(TOP).ps.gz $(TOP).dvi $(TOP).pdf \ - Release-$(REL).x.txt ../NEWS.txt win32_notes.txt $(WWWDIR) - $(INSTALLDATA) pyfobj.{ps.gz,pdf,html} $(WWWDIR) - $(INSTALLDATA) f2python9-final/f2python9.{ps.gz,pdf,html} f2python9-final/{flow,structure,aerostructure}.jpg $(WWWDIR) -clean: - rm -f tmp_$(MAINLATEX).* $(MAINLATEX).{aux,dvi,log,toc} -distclean: - rm -f tmp_$(MAINLATEX).* $(MAINLATEX).{aux,dvi,log,toc} - rm -f $(TOP).{ps,dvi,html,pdf,ps.gz} - rm -f *~ diff --git a/numpy/f2py/doc/Release-1.x.txt b/numpy/f2py/doc/Release-1.x.txt deleted file mode 100644 index 46d6fbf09..000000000 --- a/numpy/f2py/doc/Release-1.x.txt +++ /dev/null @@ -1,27 +0,0 @@ - -I am pleased to announce the first public release of f2py 1.116: - -Writing Python C/API wrappers for Fortran routines can be a very -tedious task, especially if a Fortran routine takes more than 20 -arguments but only few of them are relevant for the problems that they -solve. - -The Fortran to Python Interface Generator, or FPIG for short, is a -command line tool (f2py) for generating Python C/API modules for -wrapping Fortran 77 routines, accessing common blocks from Python, and -calling Python functions from Fortran (call-backs). - -The tool can be downloaded from - - http://cens.ioc.ee/projects/f2py2e/ - -where you can find also information about f2py features and its User's -Guide. - -f2py is released under the LGPL license. - -With regards, - Pearu Peterson - -

f2py 1.116 - The -Fortran to Python Interface Generator (25-Jan-00) diff --git a/numpy/f2py/doc/Release-2.x.txt b/numpy/f2py/doc/Release-2.x.txt deleted file mode 100644 index 807eb0ca8..000000000 --- a/numpy/f2py/doc/Release-2.x.txt +++ /dev/null @@ -1,77 +0,0 @@ - -FPIG - Fortran to Python Interface Generator - -I am pleased to announce the second public release of f2py -(version 2.264): - - http://cens.ioc.ee/projects/f2py2e/ - -f2py is a command line tool for binding Python and Fortran codes. It -scans Fortran 77/90/95 codes and generates a Python C/API module that -makes it possible to call Fortran routines from Python. No Fortran or -C expertise is required for using this tool. - -Features include: - - *** All basic Fortran types are supported: - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], - character[ | *(*) | *1 | *2 | *3 | ... ] - real[ | *4 | *8 | *16 ], double precision, - complex[ | *8 | *16 | *32 ] - - *** Multi-dimensional arrays of (almost) all basic types. - Dimension specifications: - | : | * | : - - *** Supported attributes: - intent([ in | inout | out | hide | in,out | inout,out ]) - dimension() - depend([]) - check([]) - note() - optional, required, external - - *** Calling Fortran 77/90/95 subroutines and functions. Also - Fortran 90/95 module routines. Internal initialization of - optional arguments. - - *** Accessing COMMON blocks from Python. Accessing Fortran 90/95 - module data coming soon. - - *** Call-back functions: calling Python functions from Fortran with - very flexible hooks. - - *** In Python, arguments of the interfaced functions may be of - different type - necessary type conversations are done - internally in C level. - - *** Automatically generates documentation (__doc__,LaTeX) for - interface functions. - - *** Automatically generates signature files --- user has full - control over the interface constructions. Automatically - detects the signatures of call-back functions, solves argument - dependencies, etc. - - *** Automatically generates Makefile for compiling Fortran and C - codes and linking them to a shared module. Many compilers are - supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft - F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha - Linux, HP-UX, IRIX64. - - *** Complete User's Guide in various formats (html,ps,pdf,dvi). - - *** f2py users list is available for support, feedback, etc. - -More information about f2py, see - - http://cens.ioc.ee/projects/f2py2e/ - -f2py is released under the LGPL license. - -Sincerely, - Pearu Peterson - September 12, 2000 - -

f2py 2.264 - The -Fortran to Python Interface Generator (12-Sep-00) diff --git a/numpy/f2py/doc/Release-3.x.txt b/numpy/f2py/doc/Release-3.x.txt deleted file mode 100644 index 940771015..000000000 --- a/numpy/f2py/doc/Release-3.x.txt +++ /dev/null @@ -1,87 +0,0 @@ - -F2PY - Fortran to Python Interface Generator - -I am pleased to announce the third public release of f2py -(version 2.3.321): - - http://cens.ioc.ee/projects/f2py2e/ - -f2py is a command line tool for binding Python and Fortran codes. It -scans Fortran 77/90/95 codes and generates a Python C/API module that -makes it possible to call Fortran subroutines from Python. No Fortran or -C expertise is required for using this tool. - -Features include: - - *** All basic Fortran types are supported: - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], - character[ | *(*) | *1 | *2 | *3 | ... ] - real[ | *4 | *8 | *16 ], double precision, - complex[ | *8 | *16 | *32 ] - - *** Multi-dimensional arrays of (almost) all basic types. - Dimension specifications: - | : | * | : - - *** Supported attributes and statements: - intent([ in | inout | out | hide | in,out | inout,out ]) - dimension() - depend([]) - check([]) - note() - optional, required, external -NEW: intent(c), threadsafe, fortranname - - *** Calling Fortran 77/90/95 subroutines and functions. Also - Fortran 90/95 module subroutines are supported. Internal - initialization of optional arguments. - - *** Accessing COMMON blocks from Python. -NEW: Accessing Fortran 90/95 module data. - - *** Call-back functions: calling Python functions from Fortran with - very flexible hooks. - - *** In Python, arguments of the interfaced functions may be of - different type - necessary type conversations are done - internally in C level. - - *** Automatically generates documentation (__doc__,LaTeX) for - interfaced functions. - - *** Automatically generates signature files --- user has full - control over the interface constructions. Automatically - detects the signatures of call-back functions, solves argument - dependencies, etc. - -NEW: * Automatically generates setup_.py for building - extension modules using tools from distutils and - fortran_support module (SciPy). - - *** Automatically generates Makefile for compiling Fortran and C - codes and linking them to a shared module. Many compilers are - supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft - F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha - Linux, HP-UX, IRIX64. - - *** Complete User's Guide in various formats (html,ps,pdf,dvi). - - *** f2py users list is available for support, feedback, etc. - -NEW: * Installation with distutils. - - *** And finally, many bugs are fixed. - -More information about f2py, see - - http://cens.ioc.ee/projects/f2py2e/ - -LICENSE: - f2py is released under the LGPL. - -Sincerely, - Pearu Peterson - December 4, 2001 - -

f2py 2.3.321 - The -Fortran to Python Interface Generator (04-Dec-01) diff --git a/numpy/f2py/doc/Release-4.x.txt b/numpy/f2py/doc/Release-4.x.txt deleted file mode 100644 index ed071a0cb..000000000 --- a/numpy/f2py/doc/Release-4.x.txt +++ /dev/null @@ -1,91 +0,0 @@ - -F2PY - Fortran to Python Interface Generator - -I am pleased to announce the fourth public release of f2py -(version 2.4.366): - - http://cens.ioc.ee/projects/f2py2e/ - -f2py is a command line tool for binding Python and Fortran codes. It -scans Fortran 77/90/95 codes and generates a Python C/API module that -makes it possible to call Fortran subroutines from Python. No Fortran or -C expertise is required for using this tool. - -New features: - *** Win32 support. - *** Better Python C/API generated code (-Wall is much less verbose). - -Features include: - - *** All basic Fortran types are supported: - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], - character[ | *(*) | *1 | *2 | *3 | ... ] - real[ | *4 | *8 | *16 ], double precision, - complex[ | *8 | *16 | *32 ] - - *** Multi-dimensional arrays of (almost) all basic types. - Dimension specifications: - | : | * | : - - *** Supported attributes and statements: - intent([ in | inout | out | hide | in,out | inout,out ]) - dimension() - depend([]) - check([]) - note() - optional, required, external - intent(c), threadsafe, fortranname - - *** Calling Fortran 77/90/95 subroutines and functions. Also - Fortran 90/95 module subroutines are supported. Internal - initialization of optional arguments. - - *** Accessing COMMON blocks from Python. - Accessing Fortran 90/95 module data. - - *** Call-back functions: calling Python functions from Fortran with - very flexible hooks. - - *** In Python, arguments of the interfaced functions may be of - different type - necessary type conversations are done - internally in C level. - - *** Automatically generates documentation (__doc__,LaTeX) for - interfaced functions. - - *** Automatically generates signature files --- user has full - control over the interface constructions. Automatically - detects the signatures of call-back functions, solves argument - dependencies, etc. - - *** Automatically generates setup_.py for building - extension modules using tools from distutils and - fortran_support module (SciPy). - - *** Automatically generates Makefile for compiling Fortran and C - codes and linking them to a shared module. Many compilers are - supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft - F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha - Linux, HP-UX, IRIX64. - - *** Complete User's Guide in various formats (html,ps,pdf,dvi). - - *** f2py users list is available for support, feedback, etc. - - *** Installation with distutils. - - *** And finally, many bugs are fixed. - -More information about f2py, see - - http://cens.ioc.ee/projects/f2py2e/ - -LICENSE: - f2py is released under the LGPL. - -Sincerely, - Pearu Peterson - December 17, 2001 - -

f2py 2.4.366 - The -Fortran to Python Interface Generator (17-Dec-01) diff --git a/numpy/f2py/doc/apps.tex b/numpy/f2py/doc/apps.tex deleted file mode 100644 index 513c048bd..000000000 --- a/numpy/f2py/doc/apps.tex +++ /dev/null @@ -1,71 +0,0 @@ - -\section{Applications} -\label{sec:apps} - - -\subsection{Example: wrapping C library \texttt{fftw}} -\label{sec:wrapfftw} - -Here follows a simple example how to use \fpy to generate a wrapper -for C functions. Let us create a FFT code using the functions in FFTW -library. I'll assume that the library \texttt{fftw} is configured with -\texttt{-{}-enable-shared} option. - -Here is the wrapper for the typical usage of FFTW: -\begin{verbatim} -/* File: wrap_dfftw.c */ -#include - -extern void dfftw_one(fftw_complex *in,fftw_complex *out,int *n) { - fftw_plan p; - p = fftw_create_plan(*n,FFTW_FORWARD,FFTW_ESTIMATE); - fftw_one(p,in,out); - fftw_destroy_plan(p); -} -\end{verbatim} -and here follows the corresponding siganture file (created manually): -\begin{verbatim} -!%f90 -! File: fftw.f90 -module fftw - interface - subroutine dfftw_one(in,out,n) - integer n - complex*16 in(n),out(n) - intent(out) out - intent(hide) n - end subroutine dfftw_one - end interface -end module fftw -\end{verbatim} - -Now let us generate the Python C/API module with \fpy: -\begin{verbatim} -f2py fftw.f90 -\end{verbatim} -and compile it -\begin{verbatim} -gcc -shared -I/numeric/include -I`f2py -I` -L/numeric/lib -ldfftw \ - -o fftwmodule.so -DNO_APPEND_FORTRAN fftwmodule.c wrap_dfftw.c -\end{verbatim} - -In Python: -\begin{verbatim} ->>> from Numeric import * ->>> from fftw import * ->>> print dfftw_one.__doc__ -Function signature: - out = dfftw_one(in) -Required arguments: - in : input rank-1 array('D') with bounds (n) -Return objects: - out : rank-1 array('D') with bounds (n) ->>> print dfftw_one([1,2,3,4]) -[ 10.+0.j -2.+2.j -2.+0.j -2.-2.j] ->>> -\end{verbatim} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: diff --git a/numpy/f2py/doc/bugs.tex b/numpy/f2py/doc/bugs.tex deleted file mode 100644 index 699ecf530..000000000 --- a/numpy/f2py/doc/bugs.tex +++ /dev/null @@ -1,109 +0,0 @@ - -\section{Bugs, Plans, and Feedback} -\label{sec:bugs} - -Currently no bugs have found that I was not able to fix. I will be -happy to receive bug reports from you (so that I could fix them and -keep the first sentence of this paragraph as true as possible ;-). -Note that \fpy is developed to work properly with gcc/g77 -compilers. -\begin{description} -\item[NOTE:] Wrapping callback functions returning \texttt{COMPLEX} - may fail on some systems. Workaround: avoid it by using callback - subroutines. -\end{description} - -Here follows a list of things that I plan to implement in (near) future: -\begin{enumerate} -\item recognize file types by their extension (signatures: - \texttt{*.pyf}, Fortran 77, Fortran 90 fixed: \texttt{*.f, *.for, *.F, *.FOR}, - Fortran 90 free: \texttt{*.F90, *.f90, *.m, *.f95, *.F95}); [DONE] -\item installation using \texttt{distutils} (when it will be stable); -\item put out to the web examples of \fpy usages in real situations: - wrapping \texttt{vode}, for example; -\item implement support for \texttt{PARAMETER} statement; [DONE] -\item rewrite test-site; -\item ... -\end{enumerate} -and here are things that I plan to do in future: -\begin{enumerate} -\item implement \texttt{intent(cache)} attribute for an optional work - arrays with a feature of allocating additional memory if needed; -\item use \fpy for wrapping Fortran 90/95 codes. \fpy should scan - Fortran 90/95 codes with no problems, what needs to be done is find - out how to call a Fortran 90/95 function (from a module) from - C. Anybody there willing to test \fpy with Fortran 90/95 modules? [DONE] -\item implement support for Fortran 90/95 module data; [DONE] -\item implement support for \texttt{BLOCK DATA} blocks (if needed); -\item test/document \fpy for \texttt{CHARACTER} arrays; -\item decide whether internal transposition of multi-dimensional - arrays is reasonable (need efficient code then), even if this is - controlled by the user trough some additional keyword; need - consistent and safe policy here; -\item use \fpy for generating wrapper functions also for C programs (a - kind of SWIG, only between Python and C). For that \fpy needs a - command line switch to inform itself that C scalars are passed in by - their value, not by their reference, for instance; -\item introduce a counter that counts the number of inefficient usages - of wrapper functions (copying caused by type-casting, non-contiguous - arrays); -\item if needed, make \texttt{DATA} statement to work properly for - arrays; -\item rewrite \texttt{COMMON} wrapper; [DONE] -\item ... -\end{enumerate} -I'll appreciate any feedback that will improve \fpy (bug reports, -suggestions, etc). If you find a correct Fortran code that fails with -\fpy, try to send me a minimal version of it so that I could track -down the cause of the failure. Note also that there is no sense to -send me files that are auto-generated with \fpy (I can generate them -myself); the version of \fpy that you are using (run \texttt{\fpy\ - -v}), and the relevant fortran codes or modified signature files -should be enough information to fix the bugs. Also add some -information on compilers and linkers that you use to the bug report. - - -\section{History of \fpy} -\label{sec:history} - -\begin{enumerate} -\item I was driven to start developing a tool such as \fpy after I had - wrote several Python C/API modules for interfacing various Fortran - routines from the Netlib. This work was tedious (some of functions - had more than 20 arguments, only few of them made sense for the - problems that they solved). I realized that most of the writing - could be done automatically. -\item On 9th of July, 1999, the first lines of the tool was written. A - prototype of the tool was ready to use in only three weeks. During - this time Travis Oliphant joined to the project and shared his - valuable knowledge and experience; the call-back mechanism is his - major contribution. Then I gave the tool to public under the name - FPIG --- \emph{Fortran to Python Interface Generator}. The tool contained - only one file \texttt{f2py.py}. -\item By autumn, it was clear that a better implementation was needed - as the debugging process became very tedious. So, I reserved some - time and rewrote the tool from scratch. The most important result of - this rewriting was the code that reads real Fortran codes and - determines the signatures of the Fortran routines. The main - attention was payed in particular to this part so that the tool - could read arbitrary Fortran~77/90/95 codes. As a result, the other - side of the tools task, that is, generating Python C/API functions, - was not so great. In public, this version of the tool was called - \texttt{f2py2e} --- \emph{Fortran to Python C/API generator, the - Second Edition}. -\item So, a month before The New Year 2000, I started the third - iteration of the \fpy development. Now the main attention was to - have a good C/API module constructing code. By 21st of January, - 2000, the tool of generating wrapper functions for Fortran routines - was ready. It had many new features and was more robust than ever. -\item In 25th of January, 2000, the first public release of \fpy was - announced (version 1.116). -\item In 12th of September, 2000, the second public release of \fpy was - announced (version 2.264). It now has among other changes a support - for Fortran 90/95 module routines. -\end{enumerate} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: diff --git a/numpy/f2py/doc/collectinput.py b/numpy/f2py/doc/collectinput.py deleted file mode 100755 index 54a908fcc..000000000 --- a/numpy/f2py/doc/collectinput.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -""" -collectinput - Collects all files that are included to a main Latex document - with \input or \include commands. These commands must be - in separate lines. - -Copyright 1999 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -Pearu Peterson - -Usage: - collectinput - collectinput # =inputless_ - collectinput # in and out are stdin and stdout -""" - -__version__ = "0.0" - -stdoutflag=0 -import sys -import os -import fileinput -import re -import commands - -try: fn=sys.argv[2] -except: - try: fn='inputless_'+sys.argv[1] - except: stdoutflag=1 -try: fi=sys.argv[1] -except: fi=() -if not stdoutflag: - sys.stdout=open(fn,'w') - -nonverb=r'[\w\s\\&=\^\*\.\{\(\)\[\?\+\$/]*(?!\\verb.)' -input=re.compile(nonverb+r'\\(input|include)\*?\s*\{?.*}?') -comment=re.compile(r'[^%]*%') - -for l in fileinput.input(fi): - l=l[:-1] - l1='' - if comment.match(l): - m=comment.match(l) - l1=l[m.end()-1:] - l=l[:m.end()-1] - m=input.match(l) - if m: - l=l.strip() - if l[-1]=='}': l=l[:-1] - i=m.end()-2 - sys.stderr.write('>>>>>>') - while i>-1 and (l[i] not in [' ','{']): i=i-1 - if i>-1: - fn=l[i+1:] - try: f=open(fn,'r'); flag=1; f.close() - except: - try: f=open(fn+'.tex','r'); flag=1;fn=fn+'.tex'; f.close() - except: flag=0 - if flag==0: - sys.stderr.write('Could not open a file: '+fn+'\n') - print l+l1 - continue - elif flag==1: - sys.stderr.write(fn+'\n') - print '%%%%% Begin of '+fn - print commands.getoutput(sys.argv[0]+' < '+fn) - print '%%%%% End of '+fn - else: - sys.stderr.write('Could not extract a file name from: '+l) - print l+l1 - else: - print l+l1 -sys.stdout.close() diff --git a/numpy/f2py/doc/commands.tex b/numpy/f2py/doc/commands.tex deleted file mode 100644 index 5101a9ff5..000000000 --- a/numpy/f2py/doc/commands.tex +++ /dev/null @@ -1,20 +0,0 @@ -\usepackage{xspace} -\usepackage{verbatim} - -%%tth:\newcommand{\xspace}{ } - -\newcommand{\fpy}{\texttt{f2py}\xspace} - -\newcommand{\bs}{\symbol{`\\}} -% need bs here: -%%tth:\newcommand{\bs}{\texttt{}} - -\newcommand{\shell}[1]{\hspace*{1em}\texttt{sh> \begin{minipage}[t]{0.8\textwidth}#1\end{minipage}}} - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: - - diff --git a/numpy/f2py/doc/ex1/arr.f b/numpy/f2py/doc/ex1/arr.f deleted file mode 100644 index c4e49988f..000000000 --- a/numpy/f2py/doc/ex1/arr.f +++ /dev/null @@ -1,4 +0,0 @@ - subroutine arr(l,m,n,a) - integer l,m,n - real*8 a(l,m,n) - end diff --git a/numpy/f2py/doc/ex1/bar.f b/numpy/f2py/doc/ex1/bar.f deleted file mode 100644 index c723b5af1..000000000 --- a/numpy/f2py/doc/ex1/bar.f +++ /dev/null @@ -1,4 +0,0 @@ - function bar(a,b) - integer a,b,bar - bar = a + b - end diff --git a/numpy/f2py/doc/ex1/foo.f b/numpy/f2py/doc/ex1/foo.f deleted file mode 100644 index cdcac4103..000000000 --- a/numpy/f2py/doc/ex1/foo.f +++ /dev/null @@ -1,5 +0,0 @@ - subroutine foo(a) - integer a -cf2py intent(in,out) :: a - a = a + 5 - end diff --git a/numpy/f2py/doc/ex1/foobar-smart.f90 b/numpy/f2py/doc/ex1/foobar-smart.f90 deleted file mode 100644 index 61385a685..000000000 --- a/numpy/f2py/doc/ex1/foobar-smart.f90 +++ /dev/null @@ -1,24 +0,0 @@ -!%f90 -module foobar ! in - note(This module contains two examples that are used in & - \texttt{f2py} documentation.) foobar - interface ! in :foobar - subroutine foo(a) ! in :foobar:foo.f - note(Example of a wrapper function of a Fortran subroutine.) foo - integer intent(inout),& - note(5 is added to the variable {{}\verb@a@{}} ``in place''.) :: a - end subroutine foo - function bar(a,b) result (ab) ! in :foobar:bar.f - integer :: a - integer :: b - integer :: ab - note(The first value.) a - note(The second value.) b - note(Add two values.) bar - note(The result.) ab - end function bar - end interface -end module foobar - -! This file was auto-generated with f2py (version:0.95). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/numpy/f2py/doc/ex1/foobar.f90 b/numpy/f2py/doc/ex1/foobar.f90 deleted file mode 100644 index 53ac5b506..000000000 --- a/numpy/f2py/doc/ex1/foobar.f90 +++ /dev/null @@ -1,16 +0,0 @@ -!%f90 -module foobar ! in - interface ! in :foobar - subroutine foo(a) ! in :foobar:foo.f - integer intent(inout) :: a - end subroutine foo - function bar(a,b) ! in :foobar:bar.f - integer :: a - integer :: b - integer :: bar - end function bar - end interface -end module foobar - -! This file was auto-generated with f2py (version:0.95). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/numpy/f2py/doc/ex1/foobarmodule.tex b/numpy/f2py/doc/ex1/foobarmodule.tex deleted file mode 100644 index 32411ec03..000000000 --- a/numpy/f2py/doc/ex1/foobarmodule.tex +++ /dev/null @@ -1,36 +0,0 @@ -% This file is auto-generated with f2py (version:2.266) -\section{Module \texttt{foobar}} - -This module contains two examples that are used in \texttt{f2py} documentation. - -\subsection{Wrapper function \texttt{foo}} - - -\noindent{{}\verb@foo@{}}\texttt{(a)} ---- Example of a wrapper function of a Fortran subroutine. - -\noindent Required arguments: -\begin{description} -\item[]{{}\verb@a : in/output rank-0 array(int,'i')@{}} ---- 5 is added to the variable {{}\verb@a@{}} ``in place''. -\end{description} - -\subsection{Wrapper function \texttt{bar}} - - -\noindent{{}\verb@bar = bar@{}}\texttt{(a, b)} ---- Add two values. - -\noindent Required arguments: -\begin{description} -\item[]{{}\verb@a : input int@{}} ---- The first value. -\item[]{{}\verb@b : input int@{}} ---- The second value. -\end{description} -\noindent Return objects: -\begin{description} -\item[]{{}\verb@bar : int@{}} ---- See elsewhere. -\end{description} - diff --git a/numpy/f2py/doc/ex1/runme b/numpy/f2py/doc/ex1/runme deleted file mode 100755 index 2aac6158e..000000000 --- a/numpy/f2py/doc/ex1/runme +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -f2py2e='python ../../f2py2e.py' -PYINC=`$f2py2e -pyinc` -$f2py2e foobar-smart.pyf --short-latex --overwrite-makefile -makefile foo.f bar.f -gmake -f Makefile-foobar -#gcc -O3 -I$PYINC -I$PYINC/Numeric -shared -o foobarmodule.so foobarmodule.c foo.f bar.f -python -c ' -import foobar -print foobar.__doc__ -print foobar.bar(2,3) -from Numeric import * -a=array(3) -print a,foobar.foo(a),a -print foobar.foo.__doc__ -print foobar.bar.__doc__ -print "ok" -' diff --git a/numpy/f2py/doc/f2py2e.tex b/numpy/f2py/doc/f2py2e.tex deleted file mode 100644 index 6e3e9d68c..000000000 --- a/numpy/f2py/doc/f2py2e.tex +++ /dev/null @@ -1,50 +0,0 @@ -\documentclass{article} -\usepackage{a4wide} - -\input commands - -\title{\fpy\\Fortran to Python Interface Generator\\{\large Second Edition}} -\author{Pearu Peterson \texttt{}} -\date{$Revision: 1.16 $\\\today} -\begin{document} -\special{html: If equations does not show Greek letters or large - brackets correctly, then your browser configuration needs some - adjustment. Read the notes for Enabling Symbol - Fonts in Netscape under X . In addition, the browser must be set - to use document fonts. -} - -\maketitle -\begin{abstract} - \fpy is a Python program that generates Python C/API modules for - wrapping Fortran~77/90/95 codes to Python. The user can influence the - process by modifying the signature files that \fpy generates when - scanning the Fortran codes. This document describes the syntax of - the signature files and the ways how the user can dictate the tool - to produce wrapper functions with desired Python signatures. Also - how to call the wrapper functions from Python is discussed. - - See \texttt{http://cens.ioc.ee/projects/f2py2e/} for updates of this - document and the tool. -\end{abstract} - -\tableofcontents - -\input intro -\input signaturefile -\input notes -\input options -\input bugs - -\appendix -\input ex1/foobarmodule -\input apps -\end{document} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: t -%%% End: - - diff --git a/numpy/f2py/doc/f2python9-final/README.txt b/numpy/f2py/doc/f2python9-final/README.txt deleted file mode 100644 index b907216b6..000000000 --- a/numpy/f2py/doc/f2python9-final/README.txt +++ /dev/null @@ -1,38 +0,0 @@ - -This directory contains the source of the paper - - "Fortran to Python Interface Generator with an Application - to Aerospace Engineering" - -by - Pearu Peterson (the corresponding author) - Joaquim R. R. A. Martins - Juan J. Alonso - -for The 9th International Python Conference, March 5-8, 2001, Long Beach, California. - -The paper is provided here is in the HTML format: - - f2python9.html (size=48151 bytes) - -Note that this file includes the following JPG images - - flow.jpg (size=13266) - structure.jpg (size=17860) - aerostructure.jpg (size=72247) - -PS: -The HTML file f2python9.html is generated using TTH (http://hutchinson.belmont.ma.us/tth/) -from the LaTeX source file `python9.tex'. The source can be found in the - src/ -directory. This directory contains also the following EPS files - flow.eps - structure.eps - aerostructure.eps -and the text files - examples/{exp1.f,exp1mess.txt,exp1session.txt,foo.pyf,foom.pyf} -that are used by the LaTeX source python9.tex. - -Regards, - Pearu -January 15, 2001 diff --git a/numpy/f2py/doc/f2python9-final/aerostructure.jpg b/numpy/f2py/doc/f2python9-final/aerostructure.jpg deleted file mode 100644 index 896ad6e12..000000000 Binary files a/numpy/f2py/doc/f2python9-final/aerostructure.jpg and /dev/null differ diff --git a/numpy/f2py/doc/f2python9-final/flow.jpg b/numpy/f2py/doc/f2python9-final/flow.jpg deleted file mode 100644 index cfe0f85f3..000000000 Binary files a/numpy/f2py/doc/f2python9-final/flow.jpg and /dev/null differ diff --git a/numpy/f2py/doc/f2python9-final/mk_html.sh b/numpy/f2py/doc/f2python9-final/mk_html.sh deleted file mode 100755 index 944110e93..000000000 --- a/numpy/f2py/doc/f2python9-final/mk_html.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -cd src - -test -f aerostructure.eps || convert ../aerostructure.jpg aerostructure.eps -test -f flow.eps || convert ../flow.jpg flow.eps -test -f structure.eps || convert ../structure.jpg structure.eps - -latex python9.tex -latex python9.tex -latex python9.tex - -test `which tth` && cat python9.tex | sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | tth -Lpython9 -i > ../f2python9.html -cd .. diff --git a/numpy/f2py/doc/f2python9-final/mk_pdf.sh b/numpy/f2py/doc/f2python9-final/mk_pdf.sh deleted file mode 100755 index b773028b7..000000000 --- a/numpy/f2py/doc/f2python9-final/mk_pdf.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -cd src - -test -f aerostructure.pdf || convert ../aerostructure.jpg aerostructure.pdf -test -f flow.pdf || convert ../flow.jpg flow.pdf -test -f structure.pdf || convert ../structure.jpg structure.pdf - -cat python9.tex | sed -e "s/eps,/pdf,/g" > python9pdf.tex -pdflatex python9pdf.tex -pdflatex python9pdf.tex -pdflatex python9pdf.tex - -mv python9pdf.pdf ../f2python9.pdf \ No newline at end of file diff --git a/numpy/f2py/doc/f2python9-final/mk_ps.sh b/numpy/f2py/doc/f2python9-final/mk_ps.sh deleted file mode 100755 index 4b0863fcd..000000000 --- a/numpy/f2py/doc/f2python9-final/mk_ps.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -cd src - -test -f aerostructure.eps || convert ../aerostructure.jpg aerostructure.eps -test -f flow.eps || convert ../flow.jpg flow.eps -test -f structure.eps || convert ../structure.jpg structure.eps - -latex python9.tex -latex python9.tex -latex python9.tex - -dvips python9.dvi -o ../f2python9.ps -cd .. -gzip -f f2python9.ps diff --git a/numpy/f2py/doc/f2python9-final/src/examples/exp1.f b/numpy/f2py/doc/f2python9-final/src/examples/exp1.f deleted file mode 100644 index 36bee50b0..000000000 --- a/numpy/f2py/doc/f2python9-final/src/examples/exp1.f +++ /dev/null @@ -1,26 +0,0 @@ - subroutine exp1(l,u,n) -C Input: n is number of iterations -C Output: l,u are such that -C l(1)/l(2) < exp(1) < u(1)/u(2) -C -Cf2py integer*4 :: n = 1 -Cf2py intent(out) l,u - integer*4 n,i - real*8 l(2),u(2),t,t1,t2,t3,t4 - l(2) = 1 - l(1) = 0 - u(2) = 0 - u(1) = 1 - do 10 i=0,n - t1 = 4 + 32*(1+i)*i - t2 = 11 + (40+32*i)*i - t3 = 3 + (24+32*i)*i - t4 = 8 + 32*(1+i)*i - t = u(1) - u(1) = l(1)*t1 + t*t2 - l(1) = l(1)*t3 + t*t4 - t = u(2) - u(2) = l(2)*t1 + t*t2 - l(2) = l(2)*t3 + t*t4 - 10 continue - end diff --git a/numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt b/numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt deleted file mode 100644 index ae1545718..000000000 --- a/numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt +++ /dev/null @@ -1,17 +0,0 @@ -Reading fortran codes... - Reading file 'exp1.f' -Post-processing... - Block: foo - Block: exp1 -Creating 'Makefile-foo'... - Linker: ld ('GNU ld' 2.9.5) - Fortran compiler: f77 ('g77 2.x.x' 2.95.2) - C compiler: cc ('gcc 2.x.x' 2.95.2) -Building modules... - Building module "foo"... - Constructing wrapper function "exp1"... - l,u = exp1([n]) - Wrote C/API module "foo" to file "foomodule.c" - Documentation is saved to file "foomodule.tex" -Run GNU make to build shared modules: - gmake -f Makefile- [test] diff --git a/numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt b/numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt deleted file mode 100644 index 9bec9198e..000000000 --- a/numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt +++ /dev/null @@ -1,20 +0,0 @@ ->>> import foo,Numeric ->>> print foo.exp1.__doc__ -exp1 - Function signature: - l,u = exp1([n]) -Optional arguments: - n := 1 input int -Return objects: - l : rank-1 array('d') with bounds (2) - u : rank-1 array('d') with bounds (2) - ->>> l,u = foo.exp1() ->>> print l,u -[ 1264. 465.] [ 1457. 536.] ->>> print l[0]/l[1], u[0]/u[1]-l[0]/l[1] -2.71827956989 2.25856657199e-06 ->>> l,u = foo.exp1(2) ->>> print l,u -[ 517656. 190435.] [ 566827. 208524.] ->>> print l[0]/l[1], u[0]/u[1]-l[0]/l[1] -2.71828182845 1.36437527942e-11 \ No newline at end of file diff --git a/numpy/f2py/doc/f2python9-final/src/examples/foo.pyf b/numpy/f2py/doc/f2python9-final/src/examples/foo.pyf deleted file mode 100644 index 516bb292f..000000000 --- a/numpy/f2py/doc/f2python9-final/src/examples/foo.pyf +++ /dev/null @@ -1,13 +0,0 @@ -!%f90 -*- f90 -*- -python module foo - interface - subroutine exp1(l,u,n) - real*8 dimension(2) :: l - real*8 dimension(2) :: u - integer*4 :: n - end subroutine exp1 - end interface -end python module foo -! This file was auto-generated with f2py -! (version:2.298). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/numpy/f2py/doc/f2python9-final/src/examples/foom.pyf b/numpy/f2py/doc/f2python9-final/src/examples/foom.pyf deleted file mode 100644 index 6392ebc95..000000000 --- a/numpy/f2py/doc/f2python9-final/src/examples/foom.pyf +++ /dev/null @@ -1,14 +0,0 @@ -!%f90 -*- f90 -*- -python module foo - interface - subroutine exp1(l,u,n) - real*8 dimension(2) :: l - real*8 dimension(2) :: u - intent(out) l,u - integer*4 optional :: n = 1 - end subroutine exp1 - end interface -end python module foo -! This file was auto-generated with f2py -! (version:2.298) and modified by pearu. -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/numpy/f2py/doc/f2python9-final/structure.jpg b/numpy/f2py/doc/f2python9-final/structure.jpg deleted file mode 100644 index 9aa691339..000000000 Binary files a/numpy/f2py/doc/f2python9-final/structure.jpg and /dev/null differ diff --git a/numpy/f2py/doc/fortranobject.tex b/numpy/f2py/doc/fortranobject.tex deleted file mode 100644 index dbb244cdd..000000000 --- a/numpy/f2py/doc/fortranobject.tex +++ /dev/null @@ -1,574 +0,0 @@ -\documentclass{article} - -\headsep=0pt -\topmargin=0pt -\headheight=0pt -\oddsidemargin=0pt -\textwidth=6.5in -\textheight=9in - -\usepackage{xspace} -\usepackage{verbatim} -\newcommand{\fpy}{\texttt{f2py}\xspace} -\newcommand{\bs}{\symbol{`\\}} -\newcommand{\email}[1]{\special{html:}\texttt{<#1>}\special{html:}} -\title{\texttt{PyFortranObject} --- example usages} -\author{ -\large Pearu Peterson\\ -\small \email{pearu@cens.ioc.ee} -} - -\begin{document} - -\maketitle - -\special{html: Other formats of this document: -Gzipped PS, -PDF -} - -\tableofcontents - -\section{Introduction} -\label{sec:intro} - -Fortran language defines the following concepts that we would like to -access from Python: functions, subroutines, data in \texttt{COMMON} blocks, -F90 module functions and subroutines, F90 module data (both static and -allocatable arrays). - -In the following we shall assume that we know the signatures (full -specifications of routine arguments and variables) of these concepts -from their Fortran source codes. Now, in order to call or use them -from C, one needs to have pointers to the corresponding objects. The -pointers to Fortran 77 objects (routines, data in \texttt{COMMON} -blocks) are readily available to C codes (there are various sources -available about mixing Fortran 77 and C codes). On the other hand, F90 -module specifications are highly compiler dependent and sometimes it -is not even possible to access F90 module objects from C (at least, -not directly, see remark about MIPSPro 7 Compilers). But using some -tricks (described below), the pointers to F90 module objects can be -determined in runtime providing a compiler independent solution. - -To use Fortran objects from Python in unified manner, \fpy introduces -\texttt{PyFortranObject} to hold pointers of the Fortran objects and -the corresponing wrapper functions. In fact, \texttt{PyFortranObject} -does much more: it generates documentation strings in run-time (for -items in \texttt{COMMON} blocks and data in F90 modules), provides -methods for accessing Fortran data and for calling Fortran routines, -etc. - -\section{\texttt{PyFortranObject}} -\label{sec:pyfortobj} - -\texttt{PyFortranObject} is defined as follows -\begin{verbatim} -typedef struct { - PyObject_HEAD - int len; /* Number of attributes */ - FortranDataDef *defs; /* An array of FortranDataDef's */ - PyObject *dict; /* Fortran object attribute dictionary */ -} PyFortranObject; -\end{verbatim} -where \texttt{FortranDataDef} is -\begin{verbatim} -typedef struct { - char *name; /* attribute (array||routine) name */ - int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, - || rank=-1 for Fortran routine */ - struct {int d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ - int type; /* PyArray_ || not used */ - char *data; /* pointer to array || Fortran routine */ - void (*func)(); /* initialization function for - allocatable arrays: - func(&rank,dims,set_ptr_func,name,len(name)) - || C/API wrapper for Fortran routine */ - char *doc; /* documentation string; only recommended - for routines. */ -} FortranDataDef; -\end{verbatim} -In the following we demonstrate typical usages of -\texttt{PyFortranObject}. Just relevant code fragments will be given. - - -\section{Fortran 77 subroutine} -\label{sec:f77subrout} - -Consider Fortran 77 subroutine -\begin{verbatim} -subroutine bar() -end -\end{verbatim} -The corresponding \texttt{PyFortranObject} is defined in C as follows: -\begin{verbatim} -static char doc_bar[] = "bar()"; -static PyObject *c_bar(PyObject *self, PyObject *args, - PyObject *keywds, void (*f2py_func)()) { - static char *capi_kwlist[] = {NULL}; - if (!PyArg_ParseTupleAndKeywords(args,keywds,"|:bar",capi_kwlist)) - return NULL; - (*f2py_func)(); - return Py_BuildValue(""); -} -extern void F_FUNC(bar,BAR)(); -static FortranDataDef f2py_routines_def[] = { - {"bar",-1, {-1}, 0, (char *)F_FUNC(bar,BAR),(void*)c_bar,doc_bar}, - {NULL} -}; -void initfoo() { - - d = PyModule_GetDict(m); - PyDict_SetItemString(d, f2py_routines_def[0].name, - PyFortranObject_NewAsAttr(&f2py_routines_def[0])); -} -\end{verbatim} -where CPP macro \texttt{F\_FUNC} defines how Fortran 77 routines are -seen in C. -In Python, Fortran subroutine \texttt{bar} is called as follows -\begin{verbatim} ->>> import foo ->>> foo.bar() -\end{verbatim} - -\section{Fortran 77 function} -\label{sec:f77func} -Consider Fortran 77 function -\begin{verbatim} -function bar() -complex bar -end -\end{verbatim} -The corresponding \texttt{PyFortranObject} is defined in C as in -previous example but with the following changes: -\begin{verbatim} -static char doc_bar[] = "bar = bar()"; -static PyObject *c_bar(PyObject *self, PyObject *args, - PyObject *keywds, void (*f2py_func)()) { - complex_float bar; - static char *capi_kwlist[] = {NULL}; - if (!PyArg_ParseTupleAndKeywords(args,keywds,"|:bar",capi_kwlist)) - return NULL; - (*f2py_func)(&bar); - return Py_BuildValue("O",pyobj_from_complex_float1(bar)); -} -extern void F_WRAPPEDFUNC(bar,BAR)(); -static FortranDataDef f2py_routines_def[] = { - {"bar",-1,{-1},0,(char *)F_WRAPPEDFUNC(bar,BAR),(void *)c_bar,doc_bar}, - {NULL} -}; -\end{verbatim} -where CPP macro \texttt{F\_WRAPPEDFUNC} gives the pointer to the following -Fortran 77 subroutine: -\begin{verbatim} -subroutine f2pywrapbar (barf2pywrap) -external bar -complex bar, barf2pywrap -barf2pywrap = bar() -end -\end{verbatim} -With these hooks, calling Fortran functions returning composed types -becomes platform/compiler independent. - - -\section{\texttt{COMMON} block data} -\label{sec:commondata} - -Consider Fortran 77 \texttt{COMMON} block -\begin{verbatim} -integer i -COMMON /bar/ i -\end{verbatim} -In order to access the variable \texttt{i} from Python, -\texttt{PyFortranObject} is defined as follows: -\begin{verbatim} -static FortranDataDef f2py_bar_def[] = { - {"i",0,{-1},PyArray_INT}, - {NULL} -}; -static void f2py_setup_bar(char *i) { - f2py_bar_def[0].data = i; -} -extern void F_FUNC(f2pyinitbar,F2PYINITBAR)(); -static void f2py_init_bar() { - F_FUNC(f2pyinitbar,F2PYINITBAR)(f2py_setup_bar); -} -void initfoo() { - - PyDict_SetItemString(d, "bar", PyFortranObject_New(f2py_bar_def,f2py_init_bar)); -} -\end{verbatim} -where auxiliary Fortran function \texttt{f2pyinitbar} is defined as follows -\begin{verbatim} -subroutine f2pyinitbar(setupfunc) -external setupfunc -integer i -common /bar/ i -call setupfunc(i) -end -\end{verbatim} -and it is called in \texttt{PyFortranObject\_New}. - - -\section{Fortran 90 module subroutine} -\label{sec:f90modsubrout} - -Consider -\begin{verbatim} -module fun - subroutine bar() - end subroutine bar -end module fun -\end{verbatim} -\texttt{PyFortranObject} is defined as follows -\begin{verbatim} -static char doc_fun_bar[] = "fun.bar()"; -static PyObject *c_fun_bar(PyObject *self, PyObject *args, - PyObject *keywds, void (*f2py_func)()) { - static char *kwlist[] = {NULL}; - if (!PyArg_ParseTupleAndKeywords(args,keywds,"",kwlist)) - return NULL; - (*f2py_func)(); - return Py_BuildValue(""); -} -static FortranDataDef f2py_fun_def[] = { - {"bar",-1,{-1},0,NULL,(void *)c_fun_bar,doc_fun_bar}, - {NULL} -}; -static void f2py_setup_fun(char *bar) { - f2py_fun_def[0].data = bar; -} -extern void F_FUNC(f2pyinitfun,F2PYINITFUN)(); -static void f2py_init_fun() { - F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun); -} -void initfoo () { - - PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun)); -} -\end{verbatim} -where auxiliary Fortran function \texttt{f2pyinitfun} is defined as -follows -\begin{verbatim} -subroutine f2pyinitfun(f2pysetupfunc) -use fun -external f2pysetupfunc -call f2pysetupfunc(bar) -end subroutine f2pyinitfun -\end{verbatim} -The following Python session demonstrates how to call Fortran 90 -module function \texttt{bar}: -\begin{verbatim} ->>> import foo ->>> foo.fun.bar() -\end{verbatim} - -\section{Fortran 90 module function} -\label{sec:f90modfunc} - -Consider -\begin{verbatim} -module fun - function bar() - complex bar - end subroutine bar -end module fun -\end{verbatim} -\texttt{PyFortranObject} is defined as follows -\begin{verbatim} -static char doc_fun_bar[] = "bar = fun.bar()"; -static PyObject *c_fun_bar(PyObject *self, PyObject *args, - PyObject *keywds, void (*f2py_func)()) { - complex_float bar; - static char *kwlist[] = {NULL}; - if (!PyArg_ParseTupleAndKeywords(args,keywds,"",kwlist)) - return NULL; - (*f2py_func)(&bar); - return Py_BuildValue("O",pyobj_from_complex_float1(bar)); -} -static FortranDataDef f2py_fun_def[] = { - {"bar",-1,{-1},0,NULL,(void *)c_fun_bar,doc_fun_bar}, - {NULL} -}; -static void f2py_setup_fun(char *bar) { - f2py_fun_def[0].data = bar; -} -extern void F_FUNC(f2pyinitfun,F2PYINITFUN)(); -static void f2py_init_fun() { - F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun); -} -void initfoo() { - - PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun)); -} -\end{verbatim} -where -\begin{verbatim} -subroutine f2pywrap_fun_bar (barf2pywrap) -use fun -complex barf2pywrap -barf2pywrap = bar() -end - -subroutine f2pyinitfun(f2pysetupfunc) -external f2pysetupfunc,f2pywrap_fun_bar -call f2pysetupfunc(f2pywrap_fun_bar) -end -\end{verbatim} - - -\section{Fortran 90 module data} -\label{sec:f90moddata} - -Consider -\begin{verbatim} -module fun - integer i -end module fun -\end{verbatim} -Then -\begin{verbatim} -static FortranDataDef f2py_fun_def[] = { - {"i",0,{-1},PyArray_INT}, - {NULL} -}; -static void f2py_setup_fun(char *i) { - f2py_fun_def[0].data = i; -} -extern void F_FUNC(f2pyinitfun,F2PYINITFUN)(); -static void f2py_init_fun() { - F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun); -} -void initfoo () { - - PyDict_SetItemString(d, "fun", - PyFortranObject_New(f2py_fun_def,f2py_init_fun)); -} -\end{verbatim} -where -\begin{verbatim} -subroutine f2pyinitfun(f2pysetupfunc) -use fun -external f2pysetupfunc -call f2pysetupfunc(i) -end subroutine f2pyinitfun -\end{verbatim} -Example usage in Python: -\begin{verbatim} ->>> import foo ->>> foo.fun.i = 4 -\end{verbatim} - -\section{Fortran 90 module allocatable array} -\label{sec:f90modallocarr} - -Consider -\begin{verbatim} -module fun - real, allocatable :: r(:) -end module fun -\end{verbatim} -Then -\begin{verbatim} -static FortranDataDef f2py_fun_def[] = { - {"r",1,{-1},PyArray_FLOAT}, - {NULL} -}; -static void f2py_setup_fun(void (*r)()) { - f2py_fun_def[0].func = r; -} -extern void F_FUNC(f2pyinitfun,F2PYINITFUN)(); -static void f2py_init_fun() { - F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun); -} -void initfoo () { - - PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun)); -} -\end{verbatim} -where -\begin{verbatim} -subroutine f2py_fun_getdims_r(r,s,f2pysetdata) -use fun, only: d => r -external f2pysetdata -logical ns -integer s(*),r,i,j -ns = .FALSE. -if (allocated(d)) then - do i=1,r - if ((size(d,r-i+1).ne.s(i)).and.(s(i).ge.0)) then - ns = .TRUE. - end if - end do - if (ns) then - deallocate(d) - end if -end if -if ((.not.allocated(d)).and.(s(1).ge.1)) then - allocate(d(s(1))) -end if -if (allocated(d)) then - do i=1,r - s(i) = size(d,r-i+1) - end do -end if -call f2pysetdata(d,allocated(d)) -end subroutine f2py_fun_getdims_r - -subroutine f2pyinitfun(f2pysetupfunc) -use fun -external f2pysetupfunc,f2py_fun_getdims_r -call f2pysetupfunc(f2py_fun_getdims_r) -end subroutine f2pyinitfun -\end{verbatim} -Usage in Python: -\begin{verbatim} ->>> import foo ->>> foo.fun.r = [1,2,3,4] -\end{verbatim} - -\section{Callback subroutine} -\label{sec:cbsubr} - -Thanks to Travis Oliphant for working out the basic idea of the -following callback mechanism. - -Consider -\begin{verbatim} -subroutine fun(bar) -external bar -call bar(1) -end -\end{verbatim} -Then -\begin{verbatim} -static char doc_foo8_fun[] = " -Function signature: - fun(bar,[bar_extra_args]) -Required arguments: - bar : call-back function -Optional arguments: - bar_extra_args := () input tuple -Call-back functions: - def bar(e_1_e): return - Required arguments: - e_1_e : input int"; -static PyObject *foo8_fun(PyObject *capi_self, PyObject *capi_args, - PyObject *capi_keywds, void (*f2py_func)()) { - PyObject *capi_buildvalue = NULL; - PyObject *bar_capi = Py_None; - PyTupleObject *bar_xa_capi = NULL; - PyTupleObject *bar_args_capi = NULL; - jmp_buf bar_jmpbuf; - int bar_jmpbuf_flag = 0; - int bar_nofargs_capi = 0; - static char *capi_kwlist[] = {"bar","bar_extra_args",NULL}; - - if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\ - "O!|O!:foo8.fun",\ - capi_kwlist,&PyFunction_Type,&bar_capi,&PyTuple_Type,&bar_xa_capi)) - goto capi_fail; - - bar_nofargs_capi = cb_bar_in_fun__user__routines_nofargs; - if (create_cb_arglist(bar_capi,bar_xa_capi,1,0, - &cb_bar_in_fun__user__routines_nofargs,&bar_args_capi)) { - if ((PyErr_Occurred())==NULL) - PyErr_SetString(foo8_error,"failed in processing argument list for call-back bar." ); - goto capi_fail; - } - - SWAP(bar_capi,cb_bar_in_fun__user__routines_capi,PyObject); - SWAP(bar_args_capi,cb_bar_in_fun__user__routines_args_capi,PyTupleObject); - memcpy(&bar_jmpbuf,&cb_bar_in_fun__user__routines_jmpbuf,sizeof(jmp_buf)); - bar_jmpbuf_flag = 1; - - if ((setjmp(cb_bar_in_fun__user__routines_jmpbuf))) { - if ((PyErr_Occurred())==NULL) - PyErr_SetString(foo8_error,"Failure of a callback function"); - goto capi_fail; - } else - (*f2py_func)(cb_bar_in_fun__user__routines); - - capi_buildvalue = Py_BuildValue(""); -capi_fail: - - if (bar_jmpbuf_flag) { - cb_bar_in_fun__user__routines_capi = bar_capi; - Py_DECREF(cb_bar_in_fun__user__routines_args_capi); - cb_bar_in_fun__user__routines_args_capi = bar_args_capi; - cb_bar_in_fun__user__routines_nofargs = bar_nofargs_capi; - memcpy(&cb_bar_in_fun__user__routines_jmpbuf,&bar_jmpbuf,sizeof(jmp_buf)); - bar_jmpbuf_flag = 0; - } - return capi_buildvalue; -} -extern void F_FUNC(fun,FUN)(); -static FortranDataDef f2py_routine_defs[] = { - {"fun",-1,{-1},0,(char *)F_FUNC(fun,FUN),(void *)foo8_fun,doc_foo8_fun}, - {NULL} -}; -void initfoo8 () { - - PyDict_SetItemString(d, f2py_routine_defs[0].name, - PyFortranObject_NewAsAttr(&f2py_routine_defs[0])); -} -\end{verbatim} -where -\begin{verbatim} -PyObject *cb_bar_in_fun__user__routines_capi = Py_None; -PyTupleObject *cb_bar_in_fun__user__routines_args_capi = NULL; -int cb_bar_in_fun__user__routines_nofargs = 0; -jmp_buf cb_bar_in_fun__user__routines_jmpbuf; -static void cb_bar_in_fun__user__routines (int *e_1_e_cb_capi) { - PyTupleObject *capi_arglist = cb_bar_in_fun__user__routines_args_capi; - PyObject *capi_return = NULL; - PyObject *capi_tmp = NULL; - int capi_j,capi_i = 0; - - int e_1_e=(*e_1_e_cb_capi); - if (capi_arglist == NULL) - goto capi_fail; - if (cb_bar_in_fun__user__routines_nofargs>capi_i) - if (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_int1(e_1_e))) - goto capi_fail; - - capi_return = PyEval_CallObject(cb_bar_in_fun__user__routines_capi, - (PyObject *)capi_arglist); - - if (capi_return == NULL) - goto capi_fail; - if (capi_return == Py_None) { - Py_DECREF(capi_return); - capi_return = Py_BuildValue("()"); - } - else if (!PyTuple_Check(capi_return)) { - capi_tmp = capi_return; - capi_return = Py_BuildValue("(O)",capi_tmp); - Py_DECREF(capi_tmp); - } - capi_j = PyTuple_Size(capi_return); - capi_i = 0; - goto capi_return_pt; -capi_fail: - fprintf(stderr,"Call-back cb_bar_in_fun__user__routines failed.\n"); - Py_XDECREF(capi_return); - longjmp(cb_bar_in_fun__user__routines_jmpbuf,-1); -capi_return_pt: - ; -} -\end{verbatim} -Usage in Python: -\begin{verbatim} ->>> import foo8 as foo ->>> def bar(i): print 'In bar i=',i -... ->>> foo.fun(bar) -In bar i= 1 -\end{verbatim} - -\end{document} - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: t -%%% End: diff --git a/numpy/f2py/doc/index.html b/numpy/f2py/doc/index.html deleted file mode 100644 index e162ed41a..000000000 --- a/numpy/f2py/doc/index.html +++ /dev/null @@ -1,264 +0,0 @@ - - - - - - -F2PY - Fortran to Python Interface Generator - - - - - -

F2PY ­ Fortran to Python Interface Generator

-by Pearu Peterson - -

What's new?

- -See NEWS.txt for the latest changes in f2py. -
-
July ??, 2002 -
Implemented prototype calculator, complete tests for scalar F77 - functions, --help-compiler option. Fixed number of bugs and - removed obsolete features. -
April 4, 2002 -
Fixed a nasty bug of copying one-dimensional non-contiguous arrays. - (Thanks to Travis O. for pointing this out). -
March 26, 2002 -
Bug fixes, turned off F2PY_REPORT_ATEXIT by default. -
March 13, 2002 -
MAC support, fixed incomplete dependency calculator, minor bug fixes. -
March 3, 2002 -
Fixed memory leak and copying of multi-dimensional complex arrays. -
Old news. -
- -

Introduction

- -Writing Python C/API wrappers for Fortran routines can be a very -tedious task, especially if a Fortran routine takes more than 20 -arguments but only few of them are relevant for the problems that they -solve. So, I have developed a tool that generates the C/API modules -containing wrapper functions of Fortran routines. I call this -tool as F2PY ­ Fortran to Python Interface Generator. -It is completely written in Python -language and can be called from the command line as f2py. -F2PY (in NumPy) is released under the terms of the NumPy License. - - -

f2py, Second Edition

- -The development of f2py started in summer of 1999. -For now (January, 2000) it has reached to stage of being a -complete tool: it scans real Fortran code, creates signature file -that the user can modify, constructs C/API module that can be -complied and imported to Python, and it creates LaTeX documentation -for wrapper functions. Below is a bit longer list of -f2py features: -
    -
  1. f2py scans real Fortran codes and produces the signature files. - The syntax of the signature files is borrowed from the Fortran 90/95 - language specification with some extensions. -
  2. f2py generates a GNU Makefile that can be used - for building shared modules (see below for a list of supported - platforms/compilers). Starting from the third release, - f2py generates setup_modulename.py for - building extension modules using distutils tools. -
  3. f2py uses the signature files to produce the wrappers for - Fortran 77 routines and their COMMON blocks. -
  4. For external arguments f2py constructs a very flexible - call-back mechanism so that Python functions can be called from - Fortran. -
  5. You can pass in almost arbitrary Python objects to wrapper - functions. If needed, f2py takes care of type-casting and - non-contiguous arrays. -
  6. You can modify the signature files so that f2py will generate - wrapper functions with desired signatures. depend() - attribute is introduced to control the initialization order of the - variables. f2py introduces intent(hide) - attribute to remove - the particular argument from the argument list of the wrapper - function and intent(c) that is useful for wrapping C -libraries. In addition, optional and -required - attributes are introduced and employed. -
  7. f2py supports almost all standard Fortran 77/90/95 constructs - and understands all basic Fortran types, including - (multi-dimensional, complex) arrays and character strings with - adjustable and assumed sizes/lengths. -
  8. f2py generates a LaTeX document containing the - documentations of the wrapped functions (argument types, dimensions, - etc). The user can easily add some human readable text to the - documentation by inserting note(<LaTeX text>) attribute to - the definition of routine signatures. -
  9. With f2py one can access also Fortran 90/95 - module subroutines from Python. -
- -For more information, see the User's -Guide of the tool. Windows users should also take a look at -f2py HOWTO for Win32 (its latest version -can be found here). - -

Requirements

-
    -
  1. You'll need Python - (1.5.2 or later, 2.2 is recommended) to run f2py - (because it uses exchanged module re). - To build generated extension modules with distutils setup script, - you'll need Python 2.x. -
  2. You'll need Numerical - Python - (version 13 or later, 20.3 is recommended) to compile - C/API modules (because they use function - PyArray_FromDimsAndDataAndDescr) -
- -

Download

- -
-
User's Guide: -
usersguide.html, - usersguide.pdf, - usersguide.ps.gz, - usersguide.dvi. -
Snapshots of the fifth public release: -
2.x/F2PY-2-latest.tar.gz -
Snapshots of earlier releases: -
rel-5.x, rel-4.x, - rel-3.x, - rel-2.x,rel-1.x, - rel-0.x -
- -

Installation

- -Unpack the source file, change to directory f2py-?-??? -and run python setup.py install. That's it! - -

Platform/Compiler Related Notes

- -f2py has been successfully tested on - -f2py will probably run on other UN*X systems as -well. Additions to the list of platforms/compilers where -f2py has been successfully used are most welcome. -

-Note: -Using Compaq Fortran -compiler on Alpha Linux is succesful unless when -wrapping Fortran callback functions returning -COMPLEX. This applies also for IRIX64. -

-Note: -Fortran 90/95 module support is currently tested with Absoft F90, VAST/f90, Intel F90 compilers on Linux (MD7.0,Debian woody). - - -

Mailing list

- -There is a mailing list f2py-users -available for the users of the f2py -program and it is open for discussion, questions, and answers. You can subscribe -the list here. - -

CVS Repository

- -f2py is being developed under CVS and those who are -interested in the really latest version of f2py (possibly -unstable) can get it from the repository as follows: -
    -
  1. First you need to login (the password is guest): -
    -> cvs -d :pserver:anonymous@cens.ioc.ee:/home/cvs login
    -
    -
  2. and then do the checkout: -
    -> cvs -z6 -d :pserver:anonymous@cens.ioc.ee:/home/cvs checkout f2py2e
    -
    -
  3. In the directory f2py2e you can get the updates by hitting -
    -> cvs -z6 update -P -d
    -
    -
-You can browse f2py CVS repository here. - -

Related sites

- -
    -
  1. Numerical Python. -
  2. Pyfort -- The Python-Fortran connection tool. -
  3. Scientific Python. -
  4. SciPy -- Scientific tools for Python (includes Multipack). -
  5. The Fortran Company. -
  6. Fortran Standards. - -
  7. American National Standard Programming Language FORTRAN ANSI(R) X3.9-1978 -
  8. Mathtools.net -- A technical computing portal for all scientific and engineering needs. - -
- - -
-
-Valid HTML 4.0! -Pearu Peterson -<pearu(at)ioc.ee>
- -Last modified: Fri Jan 20 14:55:12 MST 2006 - -
- - - - -

-

-This Python -ring site owned by Pearu Peterson. -
-[ - Previous 5 Sites -| - Previous -| - Next -| - Next 5 Sites -| - Random Site -| - List Sites -] -
-

- - - - - - - - - - diff --git a/numpy/f2py/doc/intro.tex b/numpy/f2py/doc/intro.tex deleted file mode 100644 index d9625b09c..000000000 --- a/numpy/f2py/doc/intro.tex +++ /dev/null @@ -1,158 +0,0 @@ - -\section{Introduction} -\label{sec:intro} - -\fpy is a command line tool that generates Python C/API modules for -interfacing Fortran~77/90/95 codes and Fortran~90/95 modules from -Python. In general, using \fpy an -interface is produced in three steps: -\begin{itemize} -\item[(i)] \fpy scans Fortran sources and creates the so-called - \emph{signature} file; the signature file contains the signatures of - Fortran routines; the signatures are given in the free format of the - Fortran~90/95 language specification. Latest version of \fpy - generates also a make file for building shared module. - About currently supported compilers see the \fpy home page -\item[(ii)] Optionally, the signature files can be modified manually - in order to dictate how the Fortran routines should be called or - seemed from the Python environment. -\item[(iii)] \fpy reads the signature files and generates Python C/API - modules that can be compiled and imported to Python code. In - addition, a LaTeX document is generated that contains the - documentation of wrapped functions. -\end{itemize} -(Note that if you are satisfied with the default signature that \fpy -generates in step (i), all three steps can be covered with just -one call to \fpy --- by not specifying `\texttt{-h}' flag). -Latest versions of \fpy support so-called \fpy directive that allows -inserting various information about wrapping directly to Fortran -source code as comments (\texttt{f2py }). - -The following diagram illustrates the usage of the tool: -\begin{verbatim} -! Fortran file foo.f: - subroutine foo(a) - integer a - a = a + 5 - end -\end{verbatim} -\begin{verbatim} -! Fortran file bar.f: - function bar(a,b) - integer a,b,bar - bar = a + b - end -\end{verbatim} -\begin{itemize} -\item[(i)] \shell{\fpy foo.f bar.f -m foobar -h foobar.pyf} -\end{itemize} -\begin{verbatim} -!%f90 -! Signature file: foobar.pyf -python module foobar ! in - interface ! in :foobar - subroutine foo(a) ! in :foobar:foo.f - integer intent(inout) :: a - end subroutine foo - function bar(a,b) ! in :foobar:bar.f - integer :: a - integer :: b - integer :: bar - end function bar - end interface -end python module foobar -\end{verbatim} -\begin{itemize} -\item[(ii)] Edit the signature file (here I made \texttt{foo}s - argument \texttt{a} to be \texttt{intent(inout)}, see - Sec.~\ref{sec:attributes}). -\item[(iii)] \shell{\fpy foobar.pyf} -\end{itemize} -\begin{verbatim} -/* Python C/API module: foobarmodule.c */ -... -\end{verbatim} -\begin{itemize} -\item[(iv)] \shell{make -f Makefile-foobar} -%\shell{gcc -shared -I/usr/include/python1.5/ foobarmodule.c\bs\\ -%foo.f bar.f -o foobarmodule.so} -\end{itemize} -\begin{verbatim} -Python shared module: foobarmodule.so -\end{verbatim} -\begin{itemize} -\item[(v)] Usage in Python: -\end{itemize} -\vspace*{-4ex} -\begin{verbatim} ->>> import foobar ->>> print foobar.__doc__ -This module 'foobar' is auto-generated with f2py (version:1.174). -The following functions are available: - foo(a) - bar = bar(a,b) -. ->>> print foobar.bar(2,3) -5 ->>> from Numeric import * ->>> a = array(3) ->>> print a,foobar.foo(a),a -3 None 8 -\end{verbatim} -Information about how to call \fpy (steps (i) and (iii)) can be -obtained by executing\\ -\shell{\fpy}\\ -This will print the usage instructions. - Step (iv) is system dependent -(compiler and the locations of the header files \texttt{Python.h} and -\texttt{arrayobject.h}), and so you must know how to compile a shared -module for Python in you system. - -The next Section describes the step (ii) in more detail in order to -explain how you can influence to the process of interface generation -so that the users can enjoy more writing Python programs using your -wrappers that call Fortran routines. Step (v) is covered in -Sec.~\ref{sec:notes}. - - -\subsection{Features} -\label{sec:features} - -\fpy has the following features: -\begin{enumerate} -\item \fpy scans real Fortran codes and produces the signature files. - The syntax of the signature files is borrowed from the Fortran~90/95 - language specification with some extensions. -\item \fpy uses the signature files to produce the wrappers for - Fortran~77 routines and their \texttt{COMMON} blocks. -\item For \texttt{external} arguments \fpy constructs a very flexible - call-back mechanism so that Python functions can be called from - Fortran. -\item You can pass in almost arbitrary Python objects to wrapper - functions. If needed, \fpy takes care of type-casting and - non-contiguous arrays. -\item You can modify the signature files so that \fpy will generate - wrapper functions with desired signatures. \texttt{depend()} - attribute is introduced to control the initialization order of the - variables. \fpy introduces \texttt{intent(hide)} attribute to remove - the particular argument from the argument list of the wrapper - function. In addition, \texttt{optional} and \texttt{required} - attributes are introduced and employed. -\item \fpy supports almost all standard Fortran~77/90/95 constructs - and understands all basic Fortran types, including - (multi-dimensional, complex) arrays and character strings with - adjustable and assumed sizes/lengths. -\item \fpy generates a LaTeX document containing the - documentations of the wrapped functions (argument types, dimensions, - etc). The user can easily add some human readable text to the - documentation by inserting \texttt{note()} attribute to - the definition of routine signatures. -\item \fpy generates a GNU make file that can be used for building - shared modules calling Fortran functions. -\item \fpy supports wrapping Fortran 90/95 module routines. -\end{enumerate} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: diff --git a/numpy/f2py/doc/multiarray/array_from_pyobj.c b/numpy/f2py/doc/multiarray/array_from_pyobj.c deleted file mode 100644 index 7e0de9a74..000000000 --- a/numpy/f2py/doc/multiarray/array_from_pyobj.c +++ /dev/null @@ -1,323 +0,0 @@ -/* - * File: array_from_pyobj.c - * - * Description: - * ------------ - * Provides array_from_pyobj function that returns a contigious array - * object with the given dimensions and required storage order, either - * in row-major (C) or column-major (Fortran) order. The function - * array_from_pyobj is very flexible about its Python object argument - * that can be any number, list, tuple, or array. - * - * array_from_pyobj is used in f2py generated Python extension - * modules. - * - * Author: Pearu Peterson - * Created: 13-16 January 2002 - * $Id: array_from_pyobj.c,v 1.1 2002/01/16 18:57:33 pearu Exp $ - */ - - -#define ARR_IS_NULL(arr,mess) \ -if (arr==NULL) { \ - fprintf(stderr,"array_from_pyobj:" mess); \ - return NULL; \ -} - -#define CHECK_DIMS_DEFINED(rank,dims,mess) \ -if (count_nonpos(rank,dims)) { \ - fprintf(stderr,"array_from_pyobj:" mess); \ - return NULL; \ -} - -#define HAS_PROPER_ELSIZE(arr,type_num) \ - ((PyArray_DescrFromType(type_num)->elsize) == (arr)->descr->elsize) - -/* static */ -/* void f2py_show_args(const int type_num, */ -/* const int *dims, */ -/* const int rank, */ -/* const int intent) { */ -/* int i; */ -/* fprintf(stderr,"array_from_pyobj:\n\ttype_num=%d\n\trank=%d\n\tintent=%d\n",\ */ -/* type_num,rank,intent); */ -/* for (i=0;i1)) { - lazy_transpose(arr); - arr->flags &= ~CONTIGUOUS; - } - Py_INCREF(arr); - } - return arr; - } - - if (PyArray_Check(obj)) { /* here we have always intent(in) or - intent(inout) */ - - PyArrayObject *arr = (PyArrayObject *)obj; - int is_cont = (intent & F2PY_INTENT_C) ? - (ISCONTIGUOUS(arr)) : (array_has_column_major_storage(arr)); - - if (check_and_fix_dimensions(arr,rank,dims)) - return NULL; /*XXX: set exception */ - - if ((intent & F2PY_INTENT_COPY) - || (! (is_cont - && HAS_PROPER_ELSIZE(arr,type_num) - && PyArray_CanCastSafely(arr->descr->type_num,type_num)))) { - PyArrayObject *tmp_arr = NULL; - if (intent & F2PY_INTENT_INOUT) { - ARR_IS_NULL(NULL,"intent(inout) array must be contiguous and" - " with a proper type and size.\n") - } - if ((rank>1) && (! (intent & F2PY_INTENT_C))) - lazy_transpose(arr); - if (PyArray_CanCastSafely(arr->descr->type_num,type_num)) { - tmp_arr = (PyArrayObject *)PyArray_CopyFromObject(obj,type_num,0,0); - ARR_IS_NULL(arr,"CopyFromObject failed: array.\n"); - } else { - tmp_arr = (PyArrayObject *)PyArray_FromDims(arr->nd, - arr->dimensions, - type_num); - ARR_IS_NULL(tmp_arr,"FromDims failed: array with unsafe cast.\n"); - if (copy_ND_array(arr,tmp_arr)) - ARR_IS_NULL(NULL,"copy_ND_array failed: array with unsafe cast.\n"); - } - if ((rank>1) && (! (intent & F2PY_INTENT_C))) { - lazy_transpose(arr); - lazy_transpose(tmp_arr); - tmp_arr->flags &= ~CONTIGUOUS; - } - arr = tmp_arr; - } - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - return arr; - } - - if ((obj==Py_None) && (intent & F2PY_OPTIONAL)) { - PyArrayObject *arr = NULL; - CHECK_DIMS_DEFINED(rank,dims,"optional must have defined dimensions.\n"); - arr = (PyArrayObject *)PyArray_FromDims(rank,dims,type_num); - ARR_IS_NULL(arr,"FromDims failed: optional.\n"); - if (intent & F2PY_INTENT_OUT) { - if ((!(intent & F2PY_INTENT_C)) && (rank>1)) { - lazy_transpose(arr); - arr->flags &= ~CONTIGUOUS; - } - Py_INCREF(arr); - } - return arr; - } - - if (intent & F2PY_INTENT_INOUT) { - ARR_IS_NULL(NULL,"intent(inout) argument must be an array.\n"); - } - - { - PyArrayObject *arr = (PyArrayObject *) \ - PyArray_ContiguousFromObject(obj,type_num,0,0); - ARR_IS_NULL(arr,"ContiguousFromObject failed: not a sequence.\n"); - if (check_and_fix_dimensions(arr,rank,dims)) - return NULL; /*XXX: set exception */ - if ((rank>1) && (! (intent & F2PY_INTENT_C))) { - PyArrayObject *tmp_arr = NULL; - lazy_transpose(arr); - arr->flags &= ~CONTIGUOUS; - tmp_arr = (PyArrayObject *) PyArray_CopyFromObject((PyObject *)arr,type_num,0,0); - Py_DECREF(arr); - arr = tmp_arr; - ARR_IS_NULL(arr,"CopyFromObject(Array) failed: intent(fortran)\n"); - lazy_transpose(arr); - arr->flags &= ~CONTIGUOUS; - } - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - return arr; - } - -} - - /*****************************************/ - /* Helper functions for array_from_pyobj */ - /*****************************************/ - -static -int array_has_column_major_storage(const PyArrayObject *ap) { - /* array_has_column_major_storage(a) is equivalent to - transpose(a).iscontiguous() but more efficient. - - This function can be used in order to decide whether to use a - Fortran or C version of a wrapped function. This is relevant, for - example, in choosing a clapack or flapack function depending on - the storage order of array arguments. - */ - int sd; - int i; - sd = ap->descr->elsize; - for (i=0;ind;++i) { - if (ap->dimensions[i] == 0) return 1; - if (ap->strides[i] != sd) return 0; - sd *= ap->dimensions[i]; - } - return 1; -} - -static -void lazy_transpose(PyArrayObject* arr) { - /* - Changes the order of array strides and dimensions. This - corresponds to the lazy transpose of a Numeric array in-situ. - Note that this function is assumed to be used even times for a - given array. Otherwise, the caller should set flags &= ~CONTIGUOUS. - */ - int rank,i,s,j; - rank = arr->nd; - if (rank < 2) return; - - for(i=0,j=rank-1;istrides[i]; - arr->strides[i] = arr->strides[j]; - arr->strides[j] = s; - s = arr->dimensions[i]; - arr->dimensions[i] = arr->dimensions[j]; - arr->dimensions[j] = s; - } -} - -static -int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,int *dims) { - /* - This function fills in blanks (that are -1's) in dims list using - the dimensions from arr. It also checks that non-blank dims will - match with the corresponding values in arr dimensions. - */ - const int arr_size = (arr->nd)?PyArray_Size((PyObject *)arr):1; - - if (rank > arr->nd) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ - int new_size = 1; - int free_axe = -1; - int i; - /* Fill dims where -1 or 0; check dimensions; calc new_size; */ - for(i=0;ind;++i) { - if (dims[i] >= 0) { - if (dims[i]!=arr->dimensions[i]) { - fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n", - i,dims[i],arr->dimensions[i]); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else { - dims[i] = arr->dimensions[i] ? arr->dimensions[i] : 1; - } - new_size *= dims[i]; - } - for(i=arr->nd;i1) { - fprintf(stderr,"%d-th dimension must be %d but got 0 (not defined).\n", - i,dims[i]); - return 1; - } else if (free_axe<0) - free_axe = i; - else - dims[i] = 1; - if (free_axe>=0) { - dims[free_axe] = arr_size/new_size; - new_size *= dims[free_axe]; - } - if (new_size != arr_size) { - fprintf(stderr,"confused: new_size=%d, arr_size=%d (maybe too many free" - " indices)\n",new_size,arr_size); - return 1; - } - } else { - int i; - for (i=rank;ind;++i) - if (arr->dimensions[i]>1) { - fprintf(stderr,"too many axes: %d, expected rank=%d\n",arr->nd,rank); - return 1; - } - for (i=0;i=0) { - if (arr->dimensions[i]!=dims[i]) { - fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n", - i,dims[i],arr->dimensions[i]); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else - dims[i] = arr->dimensions[i]; - } - return 0; -} - -/* End of file: array_from_pyobj.c */ diff --git a/numpy/f2py/doc/multiarray/bar.c b/numpy/f2py/doc/multiarray/bar.c deleted file mode 100644 index 350636ea6..000000000 --- a/numpy/f2py/doc/multiarray/bar.c +++ /dev/null @@ -1,15 +0,0 @@ - -#include - -void bar(int *a,int m,int n) { - int i,j; - printf("C:"); - printf("m=%d, n=%d\n",m,n); - for (i=0;id ==> generate tiling loop for index i with step size of - * e ==> generate tiling loop for index j with step size of - * i ==> generate loop for index i with unrolling factor of - * j ==> generate loop for index j with unrolling factor of - * ; ==> input terminator (required) - * rules are: - * i,j tokens must appear - * if d appears, it must appear before i - * if e appears, it must appear before j - * ; must appear - * matrix size is controlled by #define N in this program. - * - * this code was adapted from mmgen.c v1.2 and extended to generate pre- - * condition loops for unrolling factors that do not evenly divide the - * matrix size (or the tiling step size for loop nests with a tiling loop). - * note that this program only provides a preconditioning loop for the - * innermost loop. unrolling factors for non-innermost loops that do not - * evenly divide the matrix size (or step size) are not supported. - * - * my interest in this program generator is to hook it to a sentence - * generator and a minimum execution time finder, that is - * while((sentence=sgen())!=NULL){ - * genprogram=tpgen(sentence); - * system("cc -O4 genprogram.c"); - * system("a.out >> tpresults"); - * } - * findmintime(tpresults); - * this will find the optimum algorithm for the host system via an - * exhaustive search. - * - * please report bugs and suggestions for enhancements to me. - */ - -#include -#include -#include -#define N 500 - -#define ALLOC1 temp1=(struct line *)malloc(sizeof(struct line));\ -temp1->indentcnt=indentcnt; - -#define LINK1 temp1->next=insertbefore;\ -insertafter->next=temp1;\ -insertafter=temp1; - -#define INSERT1 temp1->next=start;\ -start=temp1; - -#define ALLOC2 temp1=(struct line *)malloc(sizeof(struct line));\ -temp2=(struct line *)malloc(sizeof(struct line));\ -temp1->indentcnt=indentcnt;\ -temp2->indentcnt=indentcnt++; - -#define LINK2 temp1->next=temp2;\ -temp2->next=insertbefore;\ -insertafter->next=temp1;\ -insertafter=temp1;\ -insertbefore=temp2; - -struct line{ int indentcnt; char line[256]; struct line *next; }; - -int indentcnt; -int iflag,jflag; -int ijflag,jiflag; -int dflag,eflag; -int counter; -int iistep,jjstep; -int iunroll,junroll; -int precond; - -char c; -int i,ttp,nt; -char *p0; -char tptype[80]; -char number[10]; - -struct line *start,*head,*insertafter,*insertbefore,*temp1,*temp2; - -void processloop(); -void processstmt(); - -main(){ - - indentcnt=0; - iflag=jflag=0; - ijflag=jiflag=0; - dflag=eflag=0; - iunroll=junroll=0; - counter=1; - precond=0; - ttp=0; - - start=NULL; - ALLOC2 - sprintf(temp1->line,"/* begin */\nt_start=second();\n"); - sprintf(temp2->line,"/* end */\nt_end = second();\n"); - head=temp1; temp1->next=temp2; temp2->next=NULL; - insertafter=temp1; insertbefore=temp2; - - while((c=getchar())!=';'){ - tptype[ttp++]=c; - if(isdigit(c)){ - nt=0; - while(isdigit(c)){ - number[nt++]=c; - c=getchar(); - if(c==';'){ fprintf(stderr,"unexpected ;!\n"); exit(1); } - tptype[ttp++]=c; - } - number[nt]='\0'; - sscanf(number,"%d",&counter); - } - switch(c){ - case 'd': - if(iflag){ fprintf(stderr,"d cannot appear after i!\n"); exit(1); } - dflag++; - ALLOC1 - sprintf(temp1->line,"#define IISTEP %d\n",counter); - INSERT1 - iistep=counter; - counter=1; - ALLOC2 - sprintf(temp1->line,"for(ii=0;ii<%d;ii+=IISTEP){\n",N); - sprintf(temp2->line,"}\n",N); - LINK2 - ALLOC1 - sprintf(temp1->line,"it=min(ii+IISTEP,%d);\n",N); - LINK1 - break; - case 'e': - if(jflag){ fprintf(stderr,"e cannot appear after j!\n"); exit(1); } - eflag++; - ALLOC1 - sprintf(temp1->line,"#define JJSTEP %d\n",counter); - INSERT1 - jjstep=counter; - counter=1; - ALLOC2 - sprintf(temp1->line,"for(jj=0;jj<%d;jj+=JJSTEP){\n",N); - sprintf(temp2->line,"}\n",N); - LINK2 - ALLOC1 - sprintf(temp1->line,"jt=min(jj+JJSTEP,%d);\n",N); - LINK1 - break; - case 'i': - iunroll=counter; - counter=1; - iflag++; if(jflag) jiflag++; - if(dflag) precond=iistep%iunroll; else precond=N%iunroll; - if(precond&&(jiflag==0)){ - fprintf(stderr,"unrolling factor for outer loop i\n"); - fprintf(stderr," does not evenly divide matrix/step size!\n"); - exit(1); - } - if(dflag&&(iunroll>1)&&(N%iistep)){ - fprintf(stderr,"with unrolling of i, step size for tiled loop ii\n"); - fprintf(stderr," does not evenly divide matrix size!\n"); - exit(1); - } - processloop('i',dflag,iunroll,precond,junroll); - break; - case 'j': - junroll=counter; - counter=1; - jflag++; if(iflag) ijflag++; - if(eflag) precond=jjstep%junroll; else precond=N%junroll; - if(precond&&(ijflag==0)){ - fprintf(stderr,"unrolling factor for outer loop j\n"); - fprintf(stderr," does not evenly divide matrix/step size!\n"); - exit(1); - } - if(eflag&&(junroll>1)&&(N%jjstep)){ - fprintf(stderr,"with unrolling of j, step size for tiled loop jj\n"); - fprintf(stderr," does not evenly divide matrix size!\n"); - exit(1); - } - processloop('j',eflag,junroll,precond,iunroll); - break; - default: break; - } - } - processstmt(); - - tptype[ttp++]=c; - - if((iflag==0)||(jflag==0)){ - fprintf(stderr, - "one of the loops (i,j) was not specified!\n"); - exit(1); - } - - temp1=start; - while(temp1!=NULL){ - printf("%s",temp1->line); - temp1=temp1->next; - } - printf("#include \n"); - printf("#include \n"); - printf("#include \n"); - if(dflag|eflag) printf("#define min(a,b) ((a)<=(b)?(a):(b))\n"); - printf("double second();\n"); - printf("double t_start,t_end,t_total;\n"); - printf("int times;\n"); - printf("\ndouble b[%d][%d],dummy[10000],bt[%d][%d];\n\nmain(){\n" - ,N,N,N,N); - if(precond) printf(" int i,j,n;\n"); else printf(" int i,j;\n"); - if(dflag) printf(" int ii,it;\n"); - if(eflag) printf(" int jj,jt;\n"); - printf("/* set coefficients so that result matrix should have \n"); - printf(" * column entries equal to column index\n"); - printf(" */\n"); - printf(" for (i=0;i<%d;i++){\n",N); - printf(" for (j=0;j<%d;j++){\n",N); - printf(" b[i][j] = (double) i;\n"); - printf(" }\n"); - printf(" }\n"); - printf("\n t_total=0.0;\n for(times=0;times<10;times++){\n\n",N); - printf("/* try to flush cache */\n"); - printf(" for(i=0;i<10000;i++){\n",N); - printf(" dummy[i] = 0.0;\n"); - printf(" }\n"); - printf("%s",head->line); - temp1=head->next; - while(temp1!=NULL){ - for(i=0;iindentcnt;i++) printf(" "); - while((p0=strstr(temp1->line,"+0"))!=NULL){ - *p0++=' '; *p0=' '; - } - printf("%s",temp1->line); - temp1=temp1->next; - } - printf("\n t_total+=t_end-t_start;\n }\n"); - printf("/* check result */\n"); - printf(" for (j=0;j<%d;j++){\n",N); - printf(" for (i=0;i<%d;i++){\n",N); - printf(" if (bt[i][j]!=((double)j)){\n"); - printf(" fprintf(stderr,\"error in bt[%cd][%cd]",'%','%'); - printf("\\n\",i,j);\n"); - printf(" fprintf(stderr,\" for %s\\n\");\n",tptype); - printf(" exit(1);\n"); - printf(" }\n"); - printf(" }\n"); - printf(" }\n"); - tptype[ttp]='\0'; - printf(" printf(\"%c10.2f secs\",t_total);\n",'%'); - printf(" printf(\" for 10 runs of %s\\n\");\n",tptype); - printf("}\n"); - printf("double second(){\n"); - printf(" void getrusage();\n"); - printf(" struct rusage ru;\n"); - printf(" double t;\n"); - printf(" getrusage(RUSAGE_SELF,&ru);\n"); - printf(" t = ((double)ru.ru_utime.tv_sec) +\n"); - printf(" ((double)ru.ru_utime.tv_usec)/1.0e6;\n"); - printf(" return t;\n"); - printf("}\n"); - -} - -void processloop(index,flag,unroll,precond,unroll2) -char index; -int flag,unroll,precond,unroll2; -{ - char build[80],temp[40]; - int n; - if(precond){ - ALLOC1 - sprintf(temp1->line,"/* preconditioning loop for unrolling factor */\n"); - LINK1 - if(unroll2==1){ - build[0]='\0'; - if(flag){ - if(index='i') - sprintf(temp,"n=IISTEP%c%d; ",'%',unroll); - else - sprintf(temp,"n=JJSTEP%c%d; ",'%',unroll); - strcat(build,temp); - sprintf(temp,"for(%c=%c%c;%c<%c%c+n;%c++) ",index,index,index, - index,index,index,index); - strcat(build,temp); - }else{ - sprintf(temp,"n=%d%c%d; ",N,'%',unroll); - strcat(build,temp); - sprintf(temp,"for(%c=0;%cline,"%s\n",build); - LINK1 - }else{ - if(flag){ - ALLOC1 - if(index=='i') - sprintf(temp1->line,"n=IISTEP%c%d;\n",'%',unroll); - else - sprintf(temp1->line,"n=JJSTEP%c%d;\n",'%',unroll); - LINK1 - ALLOC1 - sprintf(temp1->line,"for(%c=%c%c;%c<%c%c+n;%c++){\n",index,index,index, - index,index,index,index); - LINK1 - }else{ - ALLOC1 - sprintf(temp1->line,"n=%d%c%d;\n",N,'%',unroll); - LINK1 - ALLOC1 - sprintf(temp1->line,"for(%c=0;%cline," bt[i][j+%d]=b[j+%d][i];\n",n,n); - LINK1 - } - }else{ - for(n=0;nline," bt[i+%d][j]=b[j][i+%d];\n",n,n); - LINK1 - } - } - ALLOC1 - sprintf(temp1->line,"}\n"); - LINK1 - } - ALLOC2 - if(flag){ - sprintf(temp1->line,"for(%c=%c%c+n;%c<%ct;%c+=%d){\n",index,index,index, - index,index,index,unroll); - }else{ - sprintf(temp1->line,"for(%c=n;%c<%d;%c+=%d){\n",index,index,N,index, - unroll); - } - sprintf(temp2->line,"}\n",N); - LINK2 - }else{ - ALLOC2 - if(unroll==1){ - if(flag){ - sprintf(temp1->line,"for(%c=%c%c;%c<%ct;%c++){\n",index,index,index, - index,index,index); - }else{ - sprintf(temp1->line,"for(%c=0;%c<%d;%c++){\n",index,index,N,index); - } - }else{ - if(flag){ - sprintf(temp1->line,"for(%c=%c%c;%c<%ct;%c+=%d){\n",index,index,index, - index,index,index,unroll); - }else{ - sprintf(temp1->line,"for(%c=0;%c<%d;%c+=%d){\n",index,index,N,index, - unroll); - } - } - sprintf(temp2->line,"}\n",N); - LINK2 - } -} - -void processstmt() -{ - int i,j; - for(i=0;iline,"bt[i+%d][j+%d]=b[j+%d][i+%d];\n",i,j,j,i); - LINK1 - } - } -} --- -Mark Smotherman, Computer Science Dept., Clemson University, Clemson, SC - -======================================================================= -From: has (h.genceli@bre.com) - Subject: transpose of a nxm matrix stored in a vector !!! - Newsgroups: sci.math.num-analysis - Date: 2000/07/25 - - -If I have a matrix nrows x ncols, I can store it in a vector. -so A(i,j) is really a[i*ncols+j]. So really TRANS of A -(say B) is really is also a vector B where - -0<=i b[j*nrows+i] wrote: -> If I have a matrix nrows x ncols, I can store it in a vector. -> so A(i,j) is really a[i*ncols+j]. So really TRANS of A -> (say B) is really is also a vector B where - -[snip] - -Hey, if you just want to do a transpose-matrix vector multiply, there is -no need to explicitly store the transpose matrix in another array and -doubling the storage! - -W.C. --- - - From: Robin Becker (robin@jessikat.fsnet.co.uk) - Subject: Re: transpose of a nxm matrix stored in a vector !!! - Newsgroups: sci.math.num-analysis - Date: 2000/07/25 - - -In article , has -writes ->If I have a matrix nrows x ncols, I can store it in a vector. ->so A(i,j) is really a[i*ncols+j]. So really TRANS of A ->(say B) is really is also a vector B where -> ->0<=i b[j*nrows+i] b[j*nrows+i] = a[i*ncols+j]. -> ->Fine but I want to use only one array a to do this transformation. -> ->i.e a[j*nrows+i] = a[i*ncols+j]. this will itself ->erase some elements so each time a swap is necessary in a loop. -> ->temp = a[j*nrows+i] ->a[j*nrows+i] = a[i*ncols+j] ->a[i*ncols+j] = temp -> ->but still this will lose some info as it is, so indexing ->should have more intelligence in it ???? anybody ->can give me a lead here, thanks. -> ->Has -> -> -> - -void dmx_transpose(unsigned n, unsigned m, double* a, double* b) -{ - unsigned size = m*n; - if(b!=a){ - real *bmn, *aij, *anm; - bmn = b + size; /*b+n*m*/ - anm = a + size; - while(b3){ - unsigned i,row,column,current; - for(i=1, size -= 2;ii) { - real temp = a[i]; - a[i] = a[current]; - a[current] = temp; - } - } - } -} --- -Robin Becker - - From: E. Robert Tisdale (edwin@netwood.net) - Subject: Re: transpose of a nxm matrix stored in a vector !!! - Newsgroups: sci.math.num-analysis - Date: 2000/07/25 - - -Take a look at -The C++ Scalar, Vector, Matrix and Tensor class library - - http://www.netwood.net/~edwin/svmt/ - -SubVector& - SubVector::transpose(Extent p, Extent q) { - SubVector& - v = *this; - if (1 < p && 1 < q) { - // A vector v of extent n = qp is viewed as a q by p matrix U and - // a p by q matrix V where U_{ij} = v_{p*i+j} and V_{ij} = v_{q*i+j}. - // The vector v is modified in-place so that V is the transpose of U. - // The algorithm searches for every sequence k_s of S indices - // such that a circular shift of elements v_{k_s} <-- v_{k_{s+1}} - // and v_{k_{S-1}} <-- v_{k_0} effects an in-place transpose. - Extent n = q*p; - Extent m = 0; // count up to n-2 - Offset l = 0; // 1 <= l <= n-2 - while (++l < n-1 && m < n-2) { - Offset k = l; - Offset j = k; - while (l < (k = (j%p)*q + j/p)) { // Search backward for k < l. - j = k; - } - // If a sequence of indices beginning with l has any index k < l, - // it has already been transposed. The sequence length S = 1 - // and diagonal element v_k is its own transpose if k = j. - // Skip every index sequence that has already been transposed. - if (k == l) { // a new sequence - if (k < j) { // with 1 < S - TYPE x = v[k]; // save v_{k_0} - do { - v[k] = v[j]; // v_{k_{s}} <-- v_{k_{s+1}} - k = j; - ++m; - } while (l < (j = (k%q)*p + k/q)); - v[k] = x; // v_{k_{S-1}} <-- v_{k_0} - } - ++m; - } - } - } return v; - } - - - -SubVector& - -Read the rest of this message... (50 more lines) - - From: Victor Eijkhout (eijkhout@disco.cs.utk.edu) - Subject: Re: transpose of a nxm matrix stored in a vector !!! - Newsgroups: sci.math.num-analysis - Date: 2000/07/25 - - -"Alan Miller" writes: - -> The attached routine does an in situ transpose. -> begin 666 Dtip.f90 -> M4U5"4D]55$E.12!D=&EP("AA+"!N,2P@;C(L(&YD:6TI#0HA("TM+2TM+2TM - -Hm. F90? You're not silently allocating a temporary I hope? - -(Why did you have to encode this? Now I have to save, this decode, ... -and all for plain ascii?) - --- -Victor Eijkhout -"When I was coming up, [..] we knew exactly who the they were. It was us -versus them, and it was clear who the them was were. Today, we are not -so sure who the they are, but we know they're there." [G.W. Bush] - - From: Alan Miller (amiller_@_vic.bigpond.net.au) - Subject: Re: transpose of a nxm matrix stored in a vector !!! - Newsgroups: sci.math.num-analysis - Date: 2000/07/25 - - -Victor Eijkhout wrote in message ... ->"Alan Miller" writes: -> ->> The attached routine does an in situ transpose. ->> begin 666 Dtip.f90 ->> M4U5"4D]55$E.12!D=&EP("AA+"!N,2P@;C(L(&YD:6TI#0HA("TM+2TM+2TM -> ->Hm. F90? You're not silently allocating a temporary I hope? -> ->(Why did you have to encode this? Now I have to save, this decode, ... ->and all for plain ascii?) -> - -I know the problem. -I sometimes use a Unix system, and have to use decode64 to read -attachments. On the other hand, Windows wraps lines around, -formats then and generally makes the code unreadable. - -The straight code for dtip (double transpose in place) is attached -this time. - ->-- ->Victor Eijkhout - - --- -Alan Miller, Retired Scientist (Statistician) -CSIRO Mathematical & Information Sciences -Alan.Miller -at- vic.cmis.csiro.au -http://www.ozemail.com.au/~milleraj -http://users.bigpond.net.au/amiller/ - - -================================================================= - -From: Darran Edmundson (dedmunds@sfu.ca) - Subject: array reordering algorithm? - Newsgroups: sci.math.num-analysis - Date: 1995/04/30 - - -A code I've written refers to a complex array as two separate real arrays. -However, I have a canned subroutine which expects a single array where the -real and imaginary values alternate. Essentially I have a case of mismatched -data structures, yet for reasons that I'd rather not go into, I'm stuck with them. - -Assuming that the two real arrays A and B are sequential in memory, and -that the single array of alternating real/imaginary values C shares the same -space, what I need is a porting subroutine that remaps the data from one format -to the other - using as little space as possible. - -I think of the problem as follows. Imagine an array of dimension 10 containing -the values 1,3,5,7,9,2,4,6,8,10 in this order. - - A(1) / 1 \ C(1) - A(2) | 3 | C(2) - A(3) | 5 | C(3) - A(4) | 7 | C(4) - A(5) \ 9 | C(5) - | - B(1) / 2 | C(6) - B(2) | 4 | C(7) - B(3) | 6 | C(8) - B(4) | 8 | C(9) - B(5) \ 10 / C(10) - -Given that I know this initial pattern, I want to sort the array C in-place *without -making comparisons*. That is, the algorithm can only depend on the initial -knowledge of the pattern. Do you see what a sort is going to do? It will -make the A and B arrays alternate, i.e. C(1)=A(1), C(2)=B(1), C(3)=A(2), -C(4)=B(2), etc. It's not a real sort though because I can't actually refer to the -values above (i.e. no comparisons) because A and B will be holding real data, -not this contrived pattern. The pattern above exists though - it's the -natural ordering in memory of A and B. - -Either pair swapping only or a small amount of workspace can be used. The -in-place is important - imagine scaling this problem up to an -array of 32 or 64 million double precision values and you can easily see how -duplicating the array is not a feasible solution. - -Any ideas? I've been stumped on this for a day and a half now. - -Darran Edmundson -dedmunds@sfu.ca - - From: Roger Critchlow (rec@elf115.elf.org) - Subject: Re: array reordering algorithm? - Newsgroups: sci.math.num-analysis - Date: 1995/04/30 - - - Any ideas? I've been stumped on this for a day and a half now. - -Here's some code for in situ permutations of arrays that I wrote -a few years ago. It all started from the in situ transposition -algorithms in the Collected Algorithms of the ACM, the references -for which always get lost during the decryption from fortran. - -This is the minimum space algorithm. All you need to supply is -a function which computes the new order array index from the old -order array index. - -If you can spare n*m bits to record the indexes of elements which -have been permuted, then you can speed things up. - --- rec -- - ------------------------------------------------------------------------- -/* -** Arbitrary in situ permutations of an m by n array of base type TYPE. -** Copyright 1995 by Roger E Critchlow Jr, rec@elf.org, San Francisco, CA. -** Fair use permitted, caveat emptor. -*/ -typedef int TYPE; - -int transposition(int ij, int m, int n) /* transposition about diagonal from upper left to lower right */ -{ return ((ij%m)*n+ (ij/m)); } - -int countertrans(int ij, int m, int n) /* transposition about diagonal from upper right to lower left */ -{ return ((m-1-(ij%m))*n+ (n-1-(ij/m))); } - -int rotate90cw(int ij, int m, int n) /* 90 degree clockwise rotation */ -{ return ((m-1-(ij%m))*n+ (ij/m)); } - -int rotate90ccw(int ij, int m, int n) /* 90 degree counter clockwise rotation */ -{ return ((ij%m)*n+ (n-1-(ij/m))); } - -int rotate180(int ij, int m, int n) /* 180 degree rotation */ -{ return ((m-1-(ij/n))*n+ (n-1-(ij%n))); } - -int reflecth(int ij, int m, int n) /* reflection across horizontal plane */ -{ return ((m-1-(ij/n))*n+ (ij%n)); } - -int reflectv(int ij, int m, int n) /* reflection across vertical plane */ -{ return ((ij/n)*n+ (n-1-(ij%n))); } - -int in_situ_permutation(TYPE a[], int m, int n, int (*origination)(int ij, int m, int n)) -{ - int ij, oij, dij, n_to_do; - TYPE b; - n_to_do = m*n; - for (ij = 0; ij < m*n && n_to_do > 0; ij += 1) { - /* Test for previously permuted */ - for (oij = origination(ij,m,n); oij > ij; oij = origination(oij,m,n)) - ; - if (oij < ij) - continue; - /* Chase the cycle */ - dij = ij; - b = a[ij]; - for (oij = origination(dij,m,n); oij != ij; oij = origination(dij,m,n)) { - a[dij] = a[oij]; - dij = oij; - n_to_do -= 1; - } - a[dij] = b; - n_to_do -= 1; - } return 0; -} - -#define TESTING 1 -#if TESTING - -/* fill a matrix with sequential numbers, row major ordering */ -void fill_matrix_rows(a, m, n) TYPE *a; int m, n; -{ - int i, j; - for (i = 0; i < m; i += 1) - for (j = 0; j < n; j += 1) - a[i*n+j] = i*n+j; -} - -/* fill a matrix with sequential numbers, column major ordering */ -void fill_matrix_cols(a, m, n) TYPE *a; int m, n; -{ - int i, j; - for (i = 0; i < m; i += 1) - for (j = 0; j < n; j += 1) - a[i*n+j] = j*m+i; -} - -/* test a matrix for sequential numbers, row major ordering */ -int test_matrix_rows(a, m, n) TYPE *a; int m, n; -{ - int i, j, o; - for (o = i = 0; i < m; i += 1) - for (j = 0; j < n; j += 1) - o += a[i*n+j] != i*n+j; - return o; -} - -/* test a matrix for sequential numbers, column major ordering */ -int test_matrix_cols(a, m, n) TYPE *a; int m, n; -{ - int i, j, o; - for (o = i = 0; i < m; i += 1) - for (j = 0; j < n; j += 1) - o += a[i*n+j] != j*m+i; - return o; -} - -/* print a matrix */ -void print_matrix(a, m, n) TYPE *a; int m, n; -{ - char *format; - int i, j; - if (m*n < 10) format = "%2d"; - if (m*n < 100) format = "%3d"; - if (m*n < 1000) format = "%4d"; - if (m*n < 10000) format = "%5d"; - for (i = 0; i < m; i += 1) { - for (j = 0; j < n; j += 1) - printf(format, a[i*n+j]); - printf("\n"); - } -} - -#if TEST_TRANSPOSE -#define MAXSIZE 1000 - -main() -{ - int i, j, m, n, o; - TYPE a[MAXSIZE]; - for (m = 1; m < sizeof(a)/sizeof(a[0]); m += 1) - for (n = 1; m*n < sizeof(a)/sizeof(a[0]); n += 1) { - fill_matrix_rows(a, m, n); /* {0 1} {2 3} */ - if (o = transpose(a, m, n)) - printf(">> transpose returned %d for a[%d][%d], row major\n", o, m, n); - if ((o = test_matrix_cols(a, n, m)) != 0) /* {0 2} {1 3} */ - printf(">> transpose made %d mistakes for a[%d][%d], row major\n", o, m, n); - /* column major */ - fill_matrix_rows(a, m, n); - if (o = transpose(a, m, n)) - printf(">> transpose returned %d for a[%d][%d], column major\n", o, m, n); - if ((o = test_matrix_cols(a, n, m)) != 0) - printf(">> transpose made %d mistakes for a[%d][%d], column major\n", o, m, n); - } return 0; -} -#endif /* TEST_TRANSPOSE */ - - -#define TEST_DISPLAY 1 -#if TEST_DISPLAY -main(argc, argv) int argc; char *argv[]; -{ - TYPE *a; - int m = 5, n = 5; - extern void *malloc(); - if (argc > 1) { - m = atoi(argv[1]); - if (argc > 2) - n = atoi(argv[2]); - } - a = malloc(m*n*sizeof(TYPE)); - - printf("matrix\n"); - fill_matrix_rows(a, m, n); - print_matrix(a, m, n); - printf("transposition\n"); - in_situ_permutation(a, m, n, transposition); - print_matrix(a, n, m); - - printf("counter transposition\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, countertrans); - print_matrix(a, n, m); - - printf("rotate 90 degrees clockwise\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, rotate90cw); - print_matrix(a, n, m); - - printf("rotate 90 degrees counterclockwise\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, rotate90ccw); - print_matrix(a, n, m); - - printf("rotate 180 degrees\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, rotate180); - print_matrix(a, m, n); - - printf("reflect across horizontal\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, reflecth); - print_matrix(a, m, n); - - printf("reflect across vertical\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, reflectv); - print_matrix(a, m, n); - - return 0; -} - -#endif -#endif - diff --git a/numpy/f2py/doc/multiarrays.txt b/numpy/f2py/doc/multiarrays.txt deleted file mode 100644 index 704208976..000000000 --- a/numpy/f2py/doc/multiarrays.txt +++ /dev/null @@ -1,120 +0,0 @@ -From pearu@ioc.ee Thu Dec 30 09:58:01 1999 -Date: Fri, 26 Nov 1999 12:02:42 +0200 (EET) -From: Pearu Peterson -To: Users of f2py2e -- Curtis Jensen , - Vladimir Janku , - Travis Oliphant -Subject: Multidimensional arrays in f2py2e - - -Hi! - -Below I will describe how f2py2e wraps Fortran multidimensional arrays as -it constantly causes confusion. As for example, consider Fortran code - - subroutine foo(l,m,n,a) - integer l,m,n - real*8 a(l,m,n) - .. - end -Running f2py2e with -h flag, it generates the following signature - -subroutine foo(l,m,n,a) - integer optional,check(shape(a,2)==l),depend(a) :: l=shape(a,2) - integer optional,check(shape(a,1)==m),depend(a) :: m=shape(a,1) - integer optional,check(shape(a,0)==n),depend(a) :: n=shape(a,0) - real*8 dimension(l,m,n),check(rank(a)==3) :: a -end subroutine foo - -where parameters l,m,n are considered optional and they are initialized in -Python C/API code using the array a. Note that a can be also a proper -list, that is, asarray(a) should result in a rank-3 array. But then there -is an automatic restriction that elements of a (in Python) are not -changeable (in place) even if Fortran subroutine changes the array a (in -C,Fortran). - -Hint: you can attribute the array a with 'intent(out)' which causes foo to -return the array a (in Python) if you are to lazy to define a=asarray(a) -before the call to foo (in Python). - -Calling f2py2e without the switch -h, a Python C/API module will be -generated. After compiling it and importing it to Python ->>> print foo.__doc__ -shows -None = foo(a,l=shape(a,2),m=shape(a,1),n=shape(a,0)) - -You will notice that f2py2e has changed the order of arguments putting the -optional ones at the end of the argument list. -Now, you have to be careful when specifying the parameters l,m,n (though -situations where you need this should be rare). A proper definition -of the array a should be, say - - a = zeros(n,m,l) - -Note that the dimensions l,m,n are in reverse, that is, the array a should -be transposed when feeding it to the wrapper. - -Hint (and a performance hit): To be always consistent with fortran -arrays, you can define, for example - a = zeros(l,m,n) -and call from Python - foo(transpose(a),l,m,n) -which is equivalent with the given Fortran call - call foo(l,m,n,a) - -Another hint (not recommended, though): If you don't like optional -arguments feature at all and want to be strictly consistent with Fortran -signature, that is, you want to call foo from Python as - foo(l,m,n,a) -then you should edit the signature to -subroutine foo(l,m,n,a) - integer :: l - integer :: m - integer :: n - real*8 dimension(l,m,n),check(rank(a)==3),depend(l,m,n), & - check(shape(a,2)==l,shape(a,1)==m,shape(a,0)==n):: a -end -Important! Note that now the array a should depend on l,m,n -so that the checks can be performed in the proper order. -(you cannot check, say, shape(a,2)==l before initializing a or l) -(There are other ways to edit the signature in order to get the same -effect but they are not so safe and I will not discuss about them here). - -Hint: If the array a should be a work array (as used frequently in -Fortran) and you a too lazy (its good lazyness;) to provide it (in Python) -then you can define it as optional by ediding the signature: -subroutine foo(l,m,n,a) - integer :: l - integer :: m - integer :: n - real*8 dimension(l,m,n),check(rank(a)==3),depend(l,m,n), & - check(shape(a,2)==l,shape(a,1)==m,shape(a,0)==n):: a - optional a -end -Note again that the array a must depend on l,m,n. Then the array a will be -allocated in the Python C/API module. Not also that ->>> print foo.__doc__ -shows then -None = foo(l,m,n,a=) -Performance hint: If you call the given foo lots of times from Python then -you don't want to allocate/deallocate the memory in each call. So, it is -then recommended to define a temporary array in Python, for instance ->>> tmp = zeros(n,m,l) ->>> for i in ...: ->>> foo(l,m,n,a=tmp) - -Important! It is not good at all to define - >>> tmp = transpose(zeros(l,m,n)) -because tmp will be then a noncontiguous array and there will be a -huge performance hit as in Python C/API a new array will be allocated and -also a copying of arrays will be performed elementwise! -But - >>> tmp = asarray(transpose(zeros(l,m,n))) -is still ok. - -I hope that the above answers lots of your (possible) questions about -wrapping Fortran multidimensional arrays with f2py2e. - -Regards, - Pearu - diff --git a/numpy/f2py/doc/notes.tex b/numpy/f2py/doc/notes.tex deleted file mode 100644 index 2746b049d..000000000 --- a/numpy/f2py/doc/notes.tex +++ /dev/null @@ -1,310 +0,0 @@ - -\section{Calling wrapper functions from Python} -\label{sec:notes} - -\subsection{Scalar arguments} -\label{sec:scalars} - -In general, for scalar argument you can pass in in -addition to ordinary Python scalars (like integers, floats, complex -values) also arbitrary sequence objects (lists, arrays, strings) --- -then the first element of a sequence is passed in to the Fortran routine. - -It is recommended that you always pass in scalars of required type. This -ensures the correctness as no type-casting is needed. -However, no exception is raised if type-casting would produce -inaccurate or incorrect results! For example, in place of an expected -complex value you can give an integer, or vice-versa (in the latter case only -a rounded real part of the complex value will be used). - -If the argument is \texttt{intent(inout)} then Fortran routine can change the -value ``in place'' only if you pass in a sequence object, for -instance, rank-0 array. Also make sure that the type of an array is of -correct type. Otherwise type-casting will be performed and you may -get inaccurate or incorrect results. The following example illustrates this -\begin{verbatim} ->>> a = array(0) ->>> calculate_pi(a) ->>> print a -3 -\end{verbatim} - -If you pass in an ordinary Python scalar in place of -\texttt{intent(inout)} variable, it will be used as an input argument -since -Python -scalars cannot not be changed ``in place'' (all Python scalars -are immutable objects). - -\subsection{String arguments} -\label{sec:strings} - -You can pass in strings of arbitrary length. If the length is greater than -required, only a required part of the string is used. If the length -is smaller than required, additional memory is allocated and fulfilled -with `\texttt{\bs0}'s. - -Because Python strings are immutable, \texttt{intent(inout)} argument -expects an array version of a string --- an array of chars: -\texttt{array("")}. -Otherwise, the change ``in place'' has no effect. - - -\subsection{Array arguments} -\label{sec:arrays} - -If the size of an array is relatively large, it is \emph{highly - recommended} that you pass in arrays of required type. Otherwise, -type-casting will be performed which includes the creation of new -arrays and their copying. If the argument is also -\texttt{intent(inout)}, the wasted time is doubled. So, pass in arrays -of required type! - -On the other hand, there are situations where it is perfectly all -right to ignore this recommendation: if the size of an array is -relatively small or the actual time spent in Fortran routine takes -much longer than copying an array. Anyway, if you want to optimize -your Python code, start using arrays of required types. - -Another source of performance hit is when you use non-contiguous -arrays. The performance hit will be exactly the same as when using -incorrect array types. This is because a contiguous copy is created -to be passed in to the Fortran routine. - -\fpy provides a feature such that the ranks of array arguments need -not to match --- only the correct total size matters. For example, if -the wrapper function expects a rank-1 array \texttt{array([...])}, -then it is correct to pass in rank-2 (or higher) arrays -\texttt{array([[...],...,[...]])} assuming that the sizes will match. -This is especially useful when the arrays should contain only one -element (size is 1). Then you can pass in arrays \texttt{array(0)}, -\texttt{array([0])}, \texttt{array([[0]])}, etc and all cases are -handled correctly. In this case it is correct to pass in a Python -scalar in place of an array (but then ``change in place'' is ignored, -of course). - -\subsubsection{Multidimensional arrays} - -If you are using rank-2 or higher rank arrays, you must always -remember that indexing in Fortran starts from the lowest dimension -while in Python (and in C) the indexing starts from the highest -dimension (though some compilers have switches to change this). As a -result, if you pass in a 2-dimensional array then the Fortran routine -sees it as the transposed version of the array (in multi-dimensional -case the indexes are reversed). - -You must take this matter into account also when modifying the -signature file and interpreting the generated Python signatures: - -\begin{itemize} -\item First, when initializing an array using \texttt{init\_expr}, the index -vector \texttt{\_i[]} changes accordingly to Fortran convention. -\item Second, the result of CPP-macro \texttt{shape(,0)} - corresponds to the last dimension of the Fortran array, etc. -\end{itemize} -Let me illustrate this with the following example:\\ -\begin{verbatim} -! Fortran file: arr.f - subroutine arr(l,m,n,a) - integer l,m,n - real*8 a(l,m,n) - ... - end -\end{verbatim} -\fpy will generate the following signature file:\\ -\begin{verbatim} -!%f90 -! Signature file: arr.f90 -python module arr ! in - interface ! in :arr - subroutine arr(l,m,n,a) ! in :arr:arr.f - integer optional,check(shape(a,2)==l),depend(a) :: l=shape(a,2) - integer optional,check(shape(a,1)==m),depend(a) :: m=shape(a,1) - integer optional,check(shape(a,0)==n),depend(a) :: n=shape(a,0) - real*8 dimension(l,m,n) :: a - end subroutine arr - end interface -end python module arr -\end{verbatim} -and the following wrapper function will be produced -\begin{verbatim} -None = arr(a,l=shape(a,2),m=shape(a,1),n=shape(a,0)) -\end{verbatim} - -In general, I would suggest not to specify the given optional -variables \texttt{l,m,n} when calling the wrapper function --- let the -interface find the values of the variables \texttt{l,m,n}. But there -are occasions when you need to specify the dimensions in Python. - -So, in Python a proper way to create an array from the given -dimensions is -\begin{verbatim} ->>> a = zeros(n,m,l,'d') -\end{verbatim} -(note that the dimensions are reversed and correct type is specified), -and then a complete call to \texttt{arr} is -\begin{verbatim} ->>> arr(a,l,m,n) -\end{verbatim} - -From the performance point of view, always be consistent with Fortran -indexing convention, that is, use transposed arrays. But if you do the -following -\begin{verbatim} ->>> a = transpose(zeros(l,m,n,'d')) ->>> arr(a) -\end{verbatim} -then you will get a performance hit! The reason is that here the -transposition is not actually performed. Instead, the array \texttt{a} -will be non-contiguous which means that before calling a Fortran -routine, internally a contiguous array is created which -includes memory allocation and copying. In addition, if -the argument array is also \texttt{intent(inout)}, the results are -copied back to the initial array which doubles the -performance hit! - -So, to improve the performance: always pass in -arrays that are contiguous. - -\subsubsection{Work arrays} - -Often Fortran routines use the so-called work arrays. The -corresponding arguments can be declared as optional arguments, but be -sure that all dimensions are specified (bounded) and defined before -the initialization (dependence relations). - -On the other hand, if you call the Fortran routine many times then you -don't want to allocate/deallocate the memory of the work arrays on -every call. In this case it is recommended that you create temporary -arrays with proper sizes in Python and use them as work arrays. But be -careful when specifying the required type and be sure that the -temporary arrays are contiguous. Otherwise the performance hit would -be even harder than the hit when not using the temporary arrays from -Python! - - - -\subsection{Call-back arguments} -\label{sec:cbargs} - -\fpy builds a very flexible call-back mechanisms for call-back -arguments. If the wrapper function expects a call-back function \texttt{fun} -with the following Python signature to be passed in -\begin{verbatim} -def fun(a_1,...,a_n): - ... - return x_1,...,x_k -\end{verbatim} -but the user passes in a function \texttt{gun} with the signature -\begin{verbatim} -def gun(b_1,...,b_m): - ... - return y_1,...,y_l -\end{verbatim} -and the following extra arguments (specified as additional optional -argument for the wrapper function): -\begin{verbatim} -fun_extra_args = (e_1,...,e_p) -\end{verbatim} -then the actual call-back is constructed accordingly to the following rules: -\begin{itemize} -\item if \texttt{p==0} then \texttt{gun(a\_1,...,a\_q)}, where - \texttt{q=min(m,n)}; -\item if \texttt{n+p<=m} then \texttt{gun(a\_1,...,a\_n,e\_1,...,e\_p)}; -\item if \texttt{p<=mm} then \texttt{gun(e\_1,...,e\_m)}; -\item if \texttt{n+p} is less than the number of required arguments - of the function \texttt{gun}, an exception is raised. -\end{itemize} - -A call-back function \texttt{gun} may return any number of objects as a tuple: -if \texttt{kl}, then only objects \texttt{x\_1,...,x\_l} are set. - - -\subsection{Obtaining information on wrapper functions} -\label{sec:info} - -From the previous sections we learned that it is useful for the -performance to pass in arguments of expected type, if possible. To -know what are the expected types, \fpy generates a complete -documentation strings for all wrapper functions. You can read them -from Python by printing out \texttt{\_\_doc\_\_} attributes of the -wrapper functions. For the example in Sec.~\ref{sec:intro}: -\begin{verbatim} ->>> print foobar.foo.__doc__ -Function signature: - foo(a) -Required arguments: - a : in/output rank-0 array(int,'i') ->>> print foobar.bar.__doc__ -Function signature: - bar = bar(a,b) -Required arguments: - a : input int - b : input int -Return objects: - bar : int -\end{verbatim} - -In addition, \fpy generates a LaTeX document -(\texttt{module.tex}) containing a bit more information on -the wrapper functions. See for example Appendix that contains a result -of the documentation generation for the example module -\texttt{foobar}. Here the file \texttt{foobar-smart.f90} (modified -version of \texttt{foobar.f90}) is used --- it contains -\texttt{note()} attributes for specifying some additional -information. - -\subsection{Wrappers for common blocks} -\label{sec:wrapcomblock} - -[See examples \texttt{test-site/e/runme*}] - -What follows is obsolute for \fpy version higher that 2.264. - -\fpy generates wrapper functions for common blocks. For every common -block with a name \texttt{} a function -\texttt{get\_()} is constructed that takes no arguments -and returns a dictionary. The dictionary represents maps between the -names of common block fields and the arrays containing the common -block fields (multi-dimensional arrays are transposed). So, in order -to access to the common block fields, you must first obtain the -references -\begin{verbatim} -commonblock = get_() -\end{verbatim} -and then the fields are available through the arrays -\texttt{commonblock[""]}. -To change the values of common block fields, you can use for scalars -\begin{verbatim} -commonblock[""][0] = -\end{verbatim} -and for arrays -\begin{verbatim} -commonblock[""][:] = -\end{verbatim} -for example. - -For more information on the particular common block wrapping, see -\texttt{get\_.\_\_doc\_\_}. - -\subsection{Wrappers for F90/95 module data and routines} -\label{sec:wrapf90modules} - -[See example \texttt{test-site/mod/runme\_mod}] - -\subsection{Examples} -\label{sec:examples} - -Examples on various aspects of wrapping Fortran routines to Python can -be found in directories \texttt{test-site/d/} and -\texttt{test-site/e/}: study the shell scripts \texttt{runme\_*}. See -also files in \texttt{doc/ex1/}. - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: diff --git a/numpy/f2py/doc/oldnews.html b/numpy/f2py/doc/oldnews.html deleted file mode 100644 index 0e09c032f..000000000 --- a/numpy/f2py/doc/oldnews.html +++ /dev/null @@ -1,121 +0,0 @@ - - - - - - -F2PY - Fortran to Python Interface Generator - - - - -

F2PY old news.

- -
-
February 23, 2002 -
Fixed a bug of incorrect shapes of multi-dimensional arrays - when returning from Fortran routine (thanks to Eric for pointing - this out). - F2PY_REPORT_ATEXIT is disabled by default under Win32. -
February 14, 2002 -
Introduced callprotoargument statement so that - proper prototypes can be specified (this fixes SEGFAULTs when - wrapping C functions with f2py, see NEWS.txt for more details). Updated for the - latest numpy_distutils. Fixed few bugs. -
February 3, 2002 -
Introduced intent(overwrite),intent(out=name) - attributes, callstatement C-expr; statement, and - reviewed reference counting in callback mechanism. Fixed bugs. -
January 18, 2002 -
Introduced extra keyword argument copy_#varname#=1 - for intent(copy) variables, - -DF2PY_REPORT_ATEXIT for reporting f2py - performance, - has_column_major_storage member function for generated - modules, and dmalloc support. -
January 16, 2002 -
BREAKING NEWS! Solved long lasted dilemma of wrapping - multi-dimensional arrays where different - storage orders in C and Fortran come into account. From now on - this difference is dealt automatically by the f2py generated - module and in a very efficient way. For example, the corresponding - element A(i,j) of a Fortran array can be accessed in Python as - A[i,j]. -
January 13, 2002 -
Fifth Public Release is coming soon..., a snapshot is available - for download, now with updates. -
December 17, 2001 -
Fourth Public Release: Win32 support. -
Making f2py2e a module. Currently it has only one - member function run_main(comline_list). -
Removed command line arguments -fix,-f90,-f77 - and introduced many new ones. See NEWS.txt. -
intent(..) statement with empty name list defines - default intent(..) attribute for all routine arguments. -
Refinements in Win32 support. Eric Jones has provided a f2py - HOWTO for Windows users. See win32_notes.txt. -
Major rewrote of the code generator to achieve - a higher quality of generated C/API modules (-Wall messages are - considerably reduced, especially for callback functions). -
Many bugs were fixed. -
December 12, 2001 -
Win32 support (thanks to Eric Jones and Tiffany Kamm). Minor - cleanups and fixes. -
December 4, 2001 -
Third Public Release: f2py supports distutils. It can be - installed with one and it generates setup_modulename.py - to be used for building Python extension modules. -
Introduced threadsafe, fortranname, - and intent(c) statements. -
August 13, 2001 -
Changed the name FPIG to F2PY for avoiding confusion with project names. -
Updated f2py for use with Numeric version 20.x. -
January 12, 2001 -
Example usages of PyFortranObject. - Fixed bugs. Updated the - Python 9 Conference paper (F2PY paper). -
December 9, 2000 -
Implemented support for PARAMETER statement. -
November 6, 2000 -
Submitted a paper for 9th Python Conference (accepted). It is available in html, PDF, - and Gzipped PS formats. -
September 17, 2000 -
Support for F90/95 module data and routines. COMMON block - wrapping is rewritten. New signature file syntax: - pythonmodule. Signature files generated with - f2py-2.264 or earlier, are incompatible (need replacement - module with - pythonmodule). -
September 12, 2000 -
The second public release of f2py is out. See Release notes. -
September 11, 2000 -
Now f2py supports wrapping Fortran 90/95 module routines - (support for F90/95 module data coming soon) -
June 12, 2000 -
Now f2py has a mailing list f2py-users open for discussion. - -
- - - -
-
-Valid HTML 4.0! -Pearu Peterson -<pearu(at)ioc.ee>
- -Last modified: Mon Dec 3 19:40:26 EET 2001 - -
- - - - - - - - diff --git a/numpy/f2py/doc/options.tex b/numpy/f2py/doc/options.tex deleted file mode 100644 index 84d9410f8..000000000 --- a/numpy/f2py/doc/options.tex +++ /dev/null @@ -1,63 +0,0 @@ - -\section{\fpy command line options} -\label{sec:opts} - -\fpy has the following command line syntax (run \fpy without arguments -to get up to date options!!!): -\begin{verbatim} -f2py [] [[[only:]||[skip:]] ]\ - [: ...] -\end{verbatim} -where -\begin{description} -\item[\texttt{}] --- the following options are available: - \begin{description} - \item[\texttt{-f77}] --- \texttt{} are in Fortran~77 - fixed format (default). - \item[\texttt{-f90}] --- \texttt{} are in - Fortran~90/95 free format (default for signature files). - \item[\texttt{-fix}] --- \texttt{} are in - Fortran~90/95 fixed format. - \item[\texttt{-h }] --- after scanning the - \texttt{} write the signatures of Fortran routines - to file \texttt{} and exit. If \texttt{} - exists, \fpy quits without overwriting the file. Use - \texttt{-{}-overwrite-signature} to overwrite. - \item[\texttt{-m }] --- specify the name of the module - when scanning Fortran~77 codes for the first time. \fpy will - generate Python C/API module source \texttt{module.c}. - \item[\texttt{-{}-lower/-{}-no-lower}] --- lower/do not lower the cases - when scanning the \texttt{}. Default when - \texttt{-h} flag is specified/unspecified (that is for Fortran~77 - codes/signature files). - \item[\texttt{-{}-short-latex}] --- use this flag when you want to - include the generated LaTeX document to another LaTeX document. - \item[\texttt{-{}-debug-capi}] --- create a very verbose C/API - code. Useful for debbuging. -% \item[\texttt{-{}-h-force}] --- if \texttt{-h } is used then -% overwrite the file \texttt{} (if it exists) and continue -% with constructing the C/API module source. - \item[\texttt{-makefile }] --- run \fpy without arguments - for more information. - \item[\texttt{-{}-use-libs}] --- see \texttt{-makefile}. - \item[\texttt{-{}-overwrite-makefile}] --- overwrite existing - \texttt{Makefile-}. - \item[\texttt{-v}] --- print \fpy version number and exit. - \item[\texttt{-pyinc}] --- print Python include path and exit. - \end{description} -\item[\texttt{}] --- are the paths to Fortran files or - to signature files that will be scanned for \texttt{} in order to determine their signatures. -\item[\texttt{}] --- are the names of Fortran - routines for which Python C/API wrapper functions will be generated. - Default is all that are found in \texttt{}. -\item[\texttt{only:}/\texttt{skip:}] --- are flags for filtering - in/out the names of fortran routines to be wrapped. Run \fpy without - arguments for more information about the usage of these flags. -\end{description} - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: diff --git a/numpy/f2py/doc/python9.tex b/numpy/f2py/doc/python9.tex deleted file mode 100644 index cda3cd18b..000000000 --- a/numpy/f2py/doc/python9.tex +++ /dev/null @@ -1,1046 +0,0 @@ -\documentclass[twocolumn]{article} -\usepackage{epsfig} -\usepackage{xspace} -\usepackage{verbatim} - - -\headsep=0pt -\topmargin=0pt -\headheight=0pt -\oddsidemargin=0pt -\textwidth=6.5in -\textheight=9in -%%tth:\newcommand{\xspace}{ } -\newcommand{\fpy}{\texttt{f2py}\xspace} -\newcommand{\bs}{\symbol{`\\}} -% need bs here: -%%tth:\newcommand{\bs}{\texttt{}} - -\newcommand{\tthhide}[1]{#1} -\newcommand{\latexhide}[1]{} -%%tth:\newcommand{\tthhide}[1]{} -%%tth:\newcommand{\latexhide}[1]{#1} - -\newcommand{\shell}[1]{ -\latexhide{ - \special{html: -
-
-sh> #1
-
-
} -} -\tthhide{ - \\[1ex] - \hspace*{1em} - \texttt{sh> \begin{minipage}[t]{0.8\textwidth}#1\end{minipage}}\\[1ex] -} -} - -\newcommand{\email}[1]{\special{html:}\texttt{<#1>}\special{html:}} -\newcommand{\wwwsite}[1]{\special{html:}{#1}\special{html:}} -\title{Fortran to Python Interface Generator with -an Application to Aerospace Engineering} -\author{ -\large Pearu Peterson\\ -\small \email{pearu@cens.ioc.ee}\\ -\small Center of Nonlinear Studies\\ -\small Institute of Cybernetics at TTU\\ -\small Akadeemia Rd 21, 12618 Tallinn, ESTONIA\\[2ex] -\large Joaquim R. R. A. Martins and Juan J. Alonso\\ -\small \email{joaquim.martins@stanford.edu}, \email{jjalonso@stanford.edu}\\ -\small Department of Aeronautics and Astronautics\\ -\small Stanford University, CA -} -\date{$Revision: 1.17 $\\\today} -\begin{document} - -\maketitle - -\special{html: Other formats of this document: -Gzipped PS, -PDF -} - -\begin{abstract} - FPIG --- Fortran to Python Interface Generator --- is a tool for - generating Python C/API extension modules that interface - Fortran~77/90/95 codes with Python. This tool automates the process - of interface generation by scanning the Fortran source code to - determine the signatures of Fortran routines and creating a - Python C/API module that contains the corresponding interface - functions. FPIG also attempts to find dependence relations between - the arguments of a Fortran routine call (e.g. an array and its - dimensions) and constructs interface functions with potentially - fewer arguments. The tool is extremely flexible since the user has - control over the generation process of the interface by specifying the - desired function signatures. The home page for FPIG can be found at - \wwwsite{http://cens.ioc.ee/projects/f2py2e/}. - - FPIG has been used successfully to wrap a large number of Fortran - programs and libraries. Advances in computational science have led - to large improvements in the modeling of physical systems which are - often a result of the coupling of a variety of physical models that - were typically run in isolation. Since a majority of the available - physical models have been previously written in Fortran, the - importance of FPIG in accomplishing these couplings cannot be - understated. In this paper, we present an application of FPIG to - create an object-oriented framework for aero-structural analysis and - design of aircraft. -\end{abstract} - -%%tth: -\tableofcontents - -\section{Preface} -\label{sec:preface} - -The use of high-performance computing has made it possible to tackle -many important problems and discover new physical phenomena in science -and engineering. These accomplishments would not have been achieved -without the computer's ability to process large amounts of data in a -reasonably short time. It can safely be said that the computer has -become an essential tool for scientists and engineers. However, the -diversity of problems in science and engineering has left its mark as -computer programs have been developed in different programming -languages, including languages developed to describe certain specific -classes of problems. - -In interdisciplinary fields it is not uncommon for scientists and -engineers to face problems that have already been solved in a -different programming environment from the one they are familiar with. -Unfortunately, researchers may not have the time or willingness to -learn a new programming language and typically end up developing the -corresponding tools in the language that they normally use. This -approach to the development of new software can substantially impact -the time to develop and the quality of the resulting product: firstly, -it usually takes longer to develop and test a new tool than to learn a -new programming environment, and secondly it is very unlikely that a -non-specialist in a given field can produce a program that is more -efficient than more established tools. - -To avoid situations such as the one described above, one alternative -would be to provide automatic or semi-automatic interfaces between programming -languages. Another possibility would be to provide language -translators, but these obviously require more work than interface -generators --- a translator must understand all language constructs -while an interface generator only needs to understand a subset of these -constructs. With an automatic interface between two languages, scientists or -engineers can effectively use programs written in other programming -languages without ever having to learn them. - -Although it is clear that it is impossible to interface arbitrary programming -languages with each other, there is no reason for doing so. Low-level languages such as C and Fortran are well known for -their speed and are therefore suitable for applications where -performance is critical. High-level scripting languages, on the other -hand, are generally slower but much easier to learn and use, -especially when performing interactive analysis. Therefore, it makes -sense to create interfaces only in one direction: from lower-level -languages to higher-level languages. - -In an ideal world, scientists and engineers would use higher-level -languages for the manipulation of the mathematical formulas in a problem -rather than having to struggle with tedious programming details. For tasks -that are computationally demanding, they would use interfaces to -high-performance routines that are written in a lower-level language -optimized for execution speed. - - -\section{Introduction} -\label{sec:intro} - -This paper presents a tool that has been developed for the creation of -interfaces between Fortran and Python. - - -The Fortran language is popular in -scientific computing, and is used mostly in applications that use -extensive matrix manipulations (e.g. linear algebra). Since Fortran - has been the standard language among scientists and engineers for - at least three decades, there is a large number of legacy codes available that - perform a variety of tasks using very sophisticated algorithms (see -e.g. \cite{netlib}). - -The Python language \cite{python}, on the other hand, is a relatively -new programming language. It is a very high-level scripting language -that supports object-oriented programming. What makes Python -especially appealing is its very clear and natural syntax, which makes it -easy to learn and use. With Python one can implement relatively -complicated algorithms and tasks in a short time with very compact -source code. - -Although there are ongoing projects for extending Python's usage in -scientific computation, it lacks reliable tools that are common in -scientific and engineering such as ODE integrators, equation solvers, -tools for FEM, etc. The implementation of all of these tools in Python -would be not only too time-consuming but also inefficient. On the -other hand, these tools are already developed in other, -computationally more efficient languages such as Fortran or C. -Therefore, the perfect role for Python in the context of scientific -computing would be that of a ``gluing'' language. That is, the role -of providing high-level interfaces to C, C++ and Fortran libraries. - -There are a number of widely-used tools that can be used for interfacing -software libraries to Python. For binding C libraries with various -scripting languages, including Python, the tool most often used is -SWIG \cite{swig}. Wrapping Fortran routines with Python is less -popular, mainly because there are many platform and compiler-specific -issues that need to be addressed. Nevertheless, there is great -interest in interfacing Fortran libraries because they provide -invaluable tools for scientific computing. At LLNL, for example, a tool -called PyFort has been developed for connecting Fortran and -Python~\cite{pyfort}. - -The tools mentioned above require an input file describing signatures -of functions to be interfaced. To create these input files, one needs -to have a good knowledge of either C or Fortran. In addition, -binding libraries that have thousands of routines can certainly constitute a -very tedious task, even with these tools. - -The tool that is introduced in this paper, FPIG (Fortran to Python -Interface Generator)~\cite{fpig}, automatically generates interfaces -between Fortran and Python. It is different from the tools mentioned -above in that FPIG can create signature files automatically by -scanning the source code of the libraries and then construct Python -C/API extension modules. Note that the user need not be experienced -in C or even Fortran. In addition, FPIG is designed to wrap large -Fortran libraries containing many routines with only one or two -commands. This process is very flexible since one can always modify -the generated signature files to insert additional attributes in order -to achieve more sophisticated interface functions such as taking care -of optional arguments, predicting the sizes of array arguments and -performing various checks on the correctness of the input arguments. - -The organization of this paper is as follows. First, a simple example -of FPIG usage is given. Then FPIG's basic features are described and -solutions to platform and compiler specific issues are discussed. -Unsolved problems and future work on FPIG's development are also -addressed. Finally, an application to a large aero-structural solver -is presented as real-world example of FPIG's usage. - -\section{Getting Started} -\label{sec:getstart} - -To get acquainted with FPIG, let us consider the simple Fortran~77 -subroutine shown in Fig. \ref{fig:exp1.f}. -\begin{figure}[htb] - \latexhide{\label{fig:exp1.f}} - \special{html:
} - \verbatiminput{examples/exp1.f} - \special{html:
} - \caption{Example Fortran code \texttt{exp1.f}. This routine calculates - the simplest rational lower and upper approximations to $e$ (for - details of - the algorithm see \cite{graham-etal}, p.122)} - \tthhide{\label{fig:exp1.f}} -\end{figure} -In the sections that follow, two ways of creating interfaces to this -Fortran subroutine are described. The first and simplest way is -suitable for Fortran codes that are developed in connection with \fpy. -The second and not much more difficult method, is suitable for -interfacing existing Fortran libraries which might have been developed -by other programmers. - -Numerical Python~\cite{numpy} is needed in order to compile extension -modules generated by FPIG. - -\subsection{Interfacing Simple Routines} -\label{sec:example1} - -In order to call the Fortran routine \texttt{exp1} from Python, let us -create an interface to it by using \fpy (FPIG's front-end program). In -order to do this, we issue the following command, \shell{f2py -m foo -exp1.f} where the option \texttt{-m foo} sets the name of the Python -C/API extension module that \fpy will create to -\texttt{foo}. To learn more about the \fpy command line options, run \fpy -without arguments. - -The output messages in Fig. \ref{fig:f2pyoutmess} -illustrate the procedure followed by \fpy: - (i) it scans the Fortran source code specified in the command line, - (ii) it analyses and determines the routine signatures, - (iii) it constructs the corresponding Python C/API extension modules, - (iv) it writes documentation to a LaTeX file, and - (v) it creates a GNU Makefile for building the shared modules. -\begin{figure}[htb] - \latexhide{\label{fig:f2pyoutmess}} - \special{html:
} - {\tthhide{\small} - \verbatiminput{examples/exp1mess.txt} - } - \special{html:
} - \caption{Output messages of \texttt{f2py -m foo exp1.f}.} - \tthhide{\label{fig:f2pyoutmess}} -\end{figure} - -Now we can build the \texttt{foo} module: -\shell{make -f Makefile-foo} - -Figure \ref{fig:exp1session} illustrates a sample session for - calling the Fortran routine \texttt{exp1} from Python. -\begin{figure}[htb] - \latexhide{\label{fig:exp1session}} - \special{html:
} - \verbatiminput{examples/exp1session.txt} - \special{html:
} - \caption{Calling Fortran routine \texttt{exp1} from Python. Here - \texttt{l[0]/l[1]} gives an estimate to $e$ with absolute error - less than \texttt{u[0]/u[1]-l[0]/l[1]} (this value may depend on - the platform and compiler used).} - \tthhide{\label{fig:exp1session}} -\end{figure} - -Note the difference between the signatures of the Fortran routine -\texttt{exp1(l,u,n)} and the corresponding wrapper function -\texttt{l,u=exp1([n])}. Clearly, the later is more informative to -the user: \texttt{exp1} takes one optional argument \texttt{n} and it -returns \texttt{l}, \texttt{u}. This exchange of signatures is -achieved by special comment lines (starting with \texttt{Cf2py}) in -the Fortran source code --- these lines are interpreted by \fpy as -normal Fortran code. Therefore, in the given example the line \texttt{Cf2py - integer*4 :: n = 1} informs \fpy that the variable \texttt{n} is -optional with a default value equal to one. The line \texttt{Cf2py - intent(out) l,u} informs \fpy that the variables \texttt{l,u} are to be -returned to Python after calling Fortran function \texttt{exp1}. - -\subsection{Interfacing Libraries} -\label{sec:example2} - -In our example the Fortran source \texttt{exp1.f} contains \fpy -specific information, though only as comments. When interfacing -libraries from other parties, it is not recommended to modify their -source. Instead, one should use a special auxiliary file to collect -the signatures of all Fortran routines and insert \fpy specific -declaration and attribute statements in that file. This auxiliary file -is called a \emph{signature file} and is identified by the extension -\texttt{.pyf}. - -We can use \fpy to generate these signature files by using the -\texttt{-h .pyf} option. -In our example, \fpy could have been called as follows, -\shell{f2py -m foo -h foo.pyf exp1.f} -where the option \texttt{-h foo.pyf} requests \fpy to read the -routine signatures, save them to the file \texttt{foo.pyf}, and then -exit. -If \texttt{exp1.f} in Fig.~\ref{fig:exp1.f} were to -contain no lines starting with \texttt{Cf2py}, the corresponding -signature file \texttt{foo.pyf} would be as shown in Fig.~\ref{fig:foo.pyf}. -In order to obtain the exchanged and more convenient signature -\texttt{l,u=foo.exp1([n])}, we would edit \texttt{foo.pyf} as shown in -Fig.~\ref{fig:foom.pyf}. -The Python C/API extension module \texttt{foo} can be constructed by -applying \fpy to the signature file with the following command: -\shell{f2py foo.pyf} -The procedure for building the corresponding shared module and using -it in Python is identical to the one described in the previous section. - -\begin{figure}[htb] - \latexhide{\label{fig:foo.pyf}} - \special{html:
} - \verbatiminput{examples/foo.pyf} - \special{html:
} - \caption{Raw signature file \texttt{foo.pyf} generated with - \texttt{f2py -m foo -h foo.pyf exp1.f}} - \tthhide{\label{fig:foo.pyf}} -\end{figure} -\begin{figure}[htb] - \latexhide{\label{fig:foom.pyf}} - \special{html:
} - \verbatiminput{examples/foom.pyf} - \special{html:
} - \caption{Modified signature file \texttt{foo.pyf}} - \tthhide{\label{fig:foom.pyf}} -\end{figure} - -As we can see, the syntax of the signature file is an -extension of the Fortran~90/95 syntax. This means that only a few new -constructs are introduced for \fpy in addition to all standard Fortran -constructs; signature files can even be written in fixed form. A -complete set of constructs that are used when creating interfaces, is -described in the \fpy User's Guide \cite{f2py-ug}. - - -\section{Basic Features} -\label{sec:features} - -In this section a short overview of \fpy features is given. -\begin{enumerate} -\item All basic Fortran types are supported. They include -the following type specifications: -\begin{verbatim} -integer[ | *1 | *2 | *4 | *8 ] -logical[ | *1 | *2 | *4 | *8 ] -real[ | *4 | *8 | *16 ] -complex[ | *8 | *16 | *32 ] -double precision, double complex -character[ |*(*)|*1|*2|*3|...] -\end{verbatim} -In addition, they can all be in the kind-selector form -(e.g. \texttt{real(kind=8)}) or char-selector form -(e.g. \texttt{character(len=5)}). -\item Arrays of all basic types are supported. Dimension - specifications can be of form \texttt{} or - \texttt{:}. In addition, \texttt{*} and \texttt{:} - dimension specifications can be used for input arrays. - Dimension specifications may contain also \texttt{PARAMETER}'s. -\item The following attributes are supported: - \begin{itemize} - \item - \texttt{intent(in)}: used for input-only arguments. - \item - \texttt{intent(inout)}: used for arguments that are changed in - place. - \item - \texttt{intent(out)}: used for return arguments. - \item - \texttt{intent(hide)}: used for arguments to be removed from - the signature of the Python function. - \item - \texttt{intent(in,out)}, \texttt{intent(inout,out)}: used for - arguments with combined behavior. - \item - \texttt{dimension()} - \item - \texttt{depend([])}: used - for arguments that depend on other arguments in \texttt{}. - \item - \texttt{check([])}: used for checking the - correctness of input arguments. - \item - \texttt{note()}: used for - adding notes to the module documentation. - \item - \texttt{optional}, \texttt{required} - \item - \texttt{external}: used for call-back arguments. - \item - \texttt{allocatable}: used for Fortran 90/95 allocatable arrays. - \end{itemize} -\item Using \fpy one can call arbitrary Fortran~77/90/95 subroutines - and functions from Python, including Fortran 90/95 module routines. -\item Using \fpy one can access data in Fortran~77 COMMON blocks and - variables in Fortran 90/95 modules, including allocatable arrays. -\item Using \fpy one can call Python functions from Fortran (call-back - functions). \fpy supports very flexible hooks for call-back functions. -\item Wrapper functions perform the necessary type conversations for their - arguments resulting in contiguous Numeric arrays that are suitable for - passing to Fortran routines. -\item \fpy generates documentation strings -for \texttt{\_\_doc\_\_} attributes of the wrapper functions automatically. -\item \fpy scans Fortran codes and creates the signature - files. It automatically detects the signatures of call-back functions, - solves argument dependencies, decides the order of initialization of - optional arguments, etc. -\item \fpy automatically generates GNU Makefiles for compiling Fortran - and C codes, and linking them to a shared module. - \fpy detects available Fortran and C compilers. The - supported compilers include the GNU project C Compiler (gcc), Compaq - Fortran, VAST/f90 Fortran, Absoft F77/F90, and MIPSpro 7 Compilers, etc. - \fpy has been tested to work on the following platforms: Intel/Alpha - Linux, HP-UX, IRIX64. -\item Finally, the complete \fpy User's Guide is available in various - formats (ps, pdf, html, dvi). A mailing list, - \email{f2py-users@cens.ioc.ee}, is open for support and feedback. See - the FPIG's home page for more information \cite{fpig}. -\end{enumerate} - - -\section{Implementation Issues} -\label{sec:impl} - -The Fortran to Python interface can be thought of as a three layer -``sandwich'' of different languages: Python, C, and Fortran. This -arrangement has two interfaces: Python-C and C-Fortran. Since Python -itself is written in C, there are no basic difficulties in -implementing the Python-C interface~\cite{python-doc:ext}. The C-Fortran -interface, on the other hand, results in many platform and compiler specific -issues that have to be dealt with. We will now discuss these issues -in some detail and describe how they are solved in FPIG. - -\subsection{Mapping Fortran Types to C Types} -\label{sec:mapF2Ctypes} - -Table \ref{tab:mapf2c} defines how Fortran types are mapped to C types -in \fpy. -\begin{table}[htb] - \begin{center} - \begin{tabular}[c]{l|l} - Fortran type & C type \\\hline - \texttt{integer *1} & \texttt{char}\\ - \texttt{byte} & \texttt{char}\\ - \texttt{integer *2} & \texttt{short}\\ - \texttt{integer[ | *4]} & \texttt{int}\\ - \texttt{integer *8} & \texttt{long long}\\ - \texttt{logical *1} & \texttt{char}\\ - \texttt{logical *2} & \texttt{short}\\ - \texttt{logical[ | *4]} & \texttt{int}\\ - \texttt{logical *8} & \texttt{int}\\ - \texttt{real[ | *4]} & \texttt{float}\\ - \texttt{real *8} & \texttt{double}\\ - \texttt{real *16} & \texttt{long double}\\ - \texttt{complex[ | *8]} & \texttt{struct \{float r,i;\}}\\ - \texttt{complex *16} & \texttt{struct \{double r,i;\}}\\ - \texttt{complex *32} & \texttt{struct \{long double r,i;\}}\\ - \texttt{character[*...]} & \texttt{char *}\\ - \end{tabular} - \caption{Mapping Fortran types to C types.} - \label{tab:mapf2c} - \end{center} -\end{table} -Users may redefine these mappings by creating a \texttt{.f2py\_f2cmap} -file in the working directory. This file should contain a Python -dictionary of dictionaries, e.g. \texttt{\{'real':\{'low':'float'\}\}}, -that informs \fpy to map Fortran type \texttt{real(low)} -to C type \texttt{float} (here \texttt{PARAMETER low = ...}). - - -\subsection{Calling Fortran (Module) Routines} -\label{sec:callrout} - -When mixing Fortran and C codes, one has to know how function names -are mapped to low-level symbols in their object files. Different -compilers may use different conventions for this purpose. For example, gcc -appends the underscore \texttt{\_} to a Fortran routine name. Other -compilers may use upper case names, prepend or append different -symbols to Fortran routine names or both. In any case, if the -low-level symbols corresponding to Fortran routines are valid for the -C language specification, compiler specific issues can be solved by -using CPP macro features. - -Unfortunately, there are Fortran compilers that use symbols in -constructing low-level routine names that are not valid for C. For -example, the (IRIX64) MIPSpro 7 Compilers use `\$' character in the -low-level names of module routines which makes it impossible (at -least directly) to call such routines from C when using the MIPSpro 7 -C Compiler. - -In order to overcome this difficulty, FPIG introduces an unique -solution: instead of using low-level symbols for calling Fortran -module routines from C, the references to such routines are determined -at run-time by using special wrappers. These wrappers are called once -during the initialization of an extension module. They are simple -Fortran subroutines that use a Fortran module and call another C -function with Fortran module routines as arguments in order to save -their references to C global variables that are later used for calling -the corresponding Fortran module routines. This arrangement is -set up as follows. Consider the following Fortran 90 module with the -subroutine \texttt{bar}: -\special{html:
} -\begin{verbatim} -module fun - subroutine bar() - end -end -\end{verbatim} -\special{html:
} -Figure \ref{fig:capi-sketch} illustrates a Python C/API extension -module for accessing the F90 module subroutine \texttt{bar} from Python. -When the Python module \texttt{foo} is loaded, \texttt{finitbar} is -called. \texttt{finitbar} calls \texttt{init\_bar} by passing the -reference of the Fortran 90 module subroutine \texttt{bar} to C where it is -saved to the variable \texttt{bar\_ptr}. Now, when one executes \texttt{foo.bar()} -from Python, \texttt{bar\_ptr} is used in \texttt{bar\_capi} to call -the F90 module subroutine \texttt{bar}. -\begin{figure}[htb] - \latexhide{\label{fig:capi-sketch}} - \special{html:
} -\begin{verbatim} -#include "Python.h" -... -char *bar_ptr; -void init_bar(char *bar) { - bar_ptr = bar; -} -static PyObject * -bar_capi(PyObject *self,PyObject *args) { - ... - (*((void *)bar_ptr))(); - ... -} -static PyMethodDef -foo_module_methods[] = { - {"bar",bar_capi,METH_VARARGS}, - {NULL,NULL} -}; -extern void finitbar_; /* GCC convention */ -void initfoo() { - ... - finitbar_(init_bar); - Py_InitModule("foo",foo_module_methods); - ... -} -\end{verbatim} - \special{html:
} - \caption{Sketch of Python C/API for accessing F90 module subroutine - \texttt{bar}. The Fortran function \texttt{finitbar} is defined in - Fig.~\ref{fig:wrapbar}.} - \tthhide{\label{fig:capi-sketch}} -\end{figure} -\begin{figure}[ht] - \latexhide{\label{fig:wrapbar}} -\special{html:
} -\begin{verbatim} - subroutine finitbar(cinit) - use fun - extern cinit - call cinit(bar) - end -\end{verbatim} -\special{html:
} - \caption{Wrapper for passing the reference of \texttt{bar} to C code.} - \tthhide{\label{fig:wrapbar}} -\end{figure} - -Surprisingly, mixing C code and Fortran modules in this way is as -portable and compiler independent as mixing C and ordinary Fortran~77 -code. - -Note that extension modules generated by \fpy actually use -\texttt{PyFortranObject} that implements above described scheme with -exchanged functionalities (see Section \ref{sec:PFO}). - - -\subsection{Wrapping Fortran Functions} -\label{sec:wrapfunc} - -The Fortran language has two types of routines: subroutines and -functions. When a Fortran function returns a composed type such as -\texttt{COMPLEX} or \texttt{CHARACTER}-array then calling this -function directly from C may not work for all compilers, as C -functions are not supposed to return such references. In order to -avoid this, FPIG constructs an additional Fortran wrapper subroutine -for each such Fortran function. These wrappers call just the -corresponding functions in the Fortran layer and return the result to -C through its first argument. - - -\subsection{Accessing Fortran Data} -\label{sec:accsdata} - -In Fortran one can use \texttt{COMMON} blocks and Fortran module -variables to save data that is accessible from other routines. Using -FPIG, one can also access these data containers from Python. To achieve -this, FPIG uses special wrapper functions (similar to the ones used -for wrapping Fortran module routines) to save the references to these -data containers so that they can later be used from C. - -FPIG can also handle \texttt{allocatable} arrays. For example, if a -Fortran array is not yet allocated, then by assigning it in Python, -the Fortran to Python interface will allocate and initialize the -array. For example, the F90 module allocatable array \texttt{bar} -defined in -\special{html:
} -\begin{verbatim} -module fun - integer, allocatable :: bar(:) -end module -\end{verbatim} -\special{html:
} -can be allocated from Python as follows -\special{html:
} -\begin{verbatim} ->>> import foo ->>> foo.fun.bar = [1,2,3,4] -\end{verbatim} -\special{html:
} - -\subsection{\texttt{PyFortranObject}} -\label{sec:PFO} - -In general, we would like to access from Python the following Fortran -objects: -\begin{itemize} -\item subroutines and functions, -\item F90 module subroutines and functions, -\item items in COMMON blocks, -\item F90 module data. -\end{itemize} -Assuming that the Fortran source is available, we can determine the signatures -of these objects (the full specification of routine arguments, the -layout of Fortran data, etc.). In fact, \fpy gets this information -while scanning the Fortran source. - -In order to access these Fortran objects from C, we need to determine -their references. Note that the direct access of F90 module objects is -extremely compiler dependent and in some cases even impossible. -Therefore, FPIG uses various wrapper functions for obtaining the -references to Fortran objects. These wrapper functions are ordinary -F77 subroutines that can easily access objects from F90 modules and -that pass the references to Fortran objects as C variables. - - -\fpy generated Python C/API extension modules use -\texttt{PyFortranObject} to store the references of Fortran objects. -In addition to the storing functionality, the \texttt{PyFortranObject} -also provides methods for accessing/calling Fortran objects from -Python in a user-friendly manner. For example, the item \texttt{a} in -\texttt{COMMON /bar/ a(2)} can be accessed from Python as -\texttt{foo.bar.a}. - -Detailed examples of \texttt{PyFortranObject} usage can be found in -\cite{PFO}. - -\subsection{Callback Functions} -\label{sec:callback} - -Fortran routines may have arguments specified as \texttt{external}. -These arguments are functions or subroutines names that the receiving Fortran routine -will call from its body. For such arguments FPIG -constructs a call-back mechanism (originally contributed by Travis -Oliphant) that allows Fortran routines to call Python functions. This -is actually realized using a C layer between Python and -Fortran. Currently, the call-back mechanism is compiler independent -unless a call-back function needs to return a composed type -(e.g. \texttt{COMPLEX}). - -The signatures of call-back functions are determined when \fpy scans -the Fortran source code. To illustrate this, consider the following -example: -\special{html:
} -\begin{verbatim} - subroutine foo(bar, fun, boo) - integer i - real r - external bar,fun,boo - call bar(i, 1.2) - r = fun() - call sun(boo) - end -\end{verbatim} -\special{html:
} -\fpy recognizes the signatures of the user routines \texttt{bar} and -\texttt{fun} using the information contained in the lines \texttt{call - bar(i, 1.2)} and \texttt{r = fun()}: -\special{html:
} -\begin{verbatim} -subroutine bar(a,b) - integer a - real b -end -function fun() - real fun -end -\end{verbatim} -\special{html:
} -But \fpy cannot determine the signature of the user routine -\texttt{boo} because the source contains no information at all about -the \texttt{boo} specification. Here user needs to provide the -signature of \texttt{boo} manually. - -\section{Future Work} -\label{sec:future} - -FPIG can be used to wrap almost any Fortran code. However, there are -still issues that need to be resolved. Some of them are listed below: -\begin{enumerate} -\item One of the FPIG's goals is to become as platform and compiler - independent as possible. Currently FPIG can be used on - any UN*X platform that has gcc installed in it. In the future, FPIG - should be also tested on Windows systems. -\item Another goal of FPIG is to become as simple to use as - possible. To achieve that, FPIG should start using the facilities of - \texttt{distutils}, the new Python standard to distribute and build - Python modules. Therefore, a contribution to \texttt{distutils} - that can handle Fortran extensions should be developed. -\item Currently users must be aware of - the fact that multi-dimensional arrays are stored differently in C - and Fortran (they must provide transposed multi-dimensional arrays - to wrapper functions). In the future a solution should be found such - that users do not need to worry about this rather - confusing and technical detail. -\item Finally, a repository of signature files for widely-used Fortran - libraries (e.g. BLAS, LAPACK, MINPACK, ODEPACK, EISPACK, LINPACK) should be - provided. -\end{enumerate} - - -\section{Application to a Large Aero-Structural Analysis Framework} -\label{sec:app} - - -\subsection{The Need for Python and FPIG} -\label{sec:appsub1} - -As a demonstration of the power and usefulness of FPIG, we will -present work that has been done at the Aerospace Computing Laboratory -at Stanford University. The focus of the research is on aircraft -design optimization using high-fidelity analysis tools such as -Computational Fluid Dynamics (CFD) and Computational Structural -Mechanics (CSM)~\cite{reno99}. - -The group's analysis programs are written mainly in Fortran and are the result -of many years of development. Until now, any researcher that needed -to use these tools would have to learn a less than user-friendly -interface and become relatively familiar with the inner workings of -the codes before starting the research itself. The need to -couple analyses of different disciplines revealed the additional -inconvenience of gluing and scripting the different codes with -Fortran. - -It was therefore decided that the existing tools should be wrapped -using an object-oriented language in order to improve their ease of -use and versatility. The use of several different languages such as -C++, Java and Perl was investigated but Python seemed to provide the -best solution. The fact that it combines scripting capability -with a fully-featured object-oriented programming language, and that -it has a clean syntax were factors that determined our choice. The -introduction of tools that greatly facilitate the task of wrapping -Fortran with Python provided the final piece needed to realize our -objective. - -\subsection{Wrapping the Fortran Programs} - -In theory, it would have been possible to wrap our Fortran programs -with C and then with Python by hand. However, this would have been a -labor intensive task that would detract from our research. The use of -tools that automate the task of wrapping has been extremely useful. - -The first such tool that we used was PyFort. This tool created the C -wrappers and Python modules automatically, based on signature files -(\texttt{.pyf}) provided by the user. Although it made the task of -wrapping considerably easier, PyFort was limited by the fact that any -Fortran data that was needed at the Python level had to be passed in -the argument list of the Fortran subroutine. Since the bulk of the -data in our programs is shared by using Fortran~77 common blocks and -Fortran~90 modules, this required adding many more arguments to the -subroutine headers. Furthermore, since Fortran does not allow common -block variables or module data to be specified in a subroutine -argument list, a dummy pointer for each desired variable had to be -created and initialized. - -The search for a better solution to this problem led us to \fpy. -Since \fpy provides a solution for accessing common block and module -variables, there was no need to change the Fortran source anymore, -making the wrapping process even easier. With \fpy we also -experienced an increased level of automation since it produces the -signature files automatically, as well as a Makefile for the joint -compilation of the original Fortran and C wrapper codes. This increased -automation did not detract from its flexibility since it was always -possible to edit the signature files to provide different functionality. - -Once Python interfaces were created for each Fortran application -by running \fpy, it was just a matter of using Python to achieve the -final objective of developing an object-oriented framework for our -multidisciplinary solvers. The Python modules that we designed are -discussed in the following section. - - -\subsection{Module Design} -\label{ssec:module} - -The first objective of this effort was to design the classes for each -type of analysis, each representing an independent Python module. In -our case, we are interested in performing aero-structural analysis and -optimization of aircraft wings. We therefore needed an analysis tool -for the flow (CFD), another for analyzing the structure (CSM), as well -as a geometry database. In addition, we needed to interface these two -tools in order to analyze the coupled system. The object design for -each of these modules should be general enough that the underlying -analysis code in Fortran can be changed without changing the Python -interface. Another requirement was that the modules be usable on -their own for single discipline analysis. - -\subsubsection{Geometry} - -The \emph{Geometry} class provides a database for the outer mold -geometry of the aircraft. This database needs to be accessed by both -the flow and structural solvers. It contains a parametric description -of the aircraft's surface as well as methods that extract and update -this information. - - -\subsubsection{Flow} - -The flow solver was wrapped in a class called \emph{Flow}. The class -was designed so that it can wrap any type of CFD solver. It contains -two main objects: the computational mesh and a solver object. A graph -showing the hierarchy of the objects in \emph{Flow} is shown in -Fig.~\ref{fig:flow}. -\tthhide{ -\begin{figure}[h] - \centering - \epsfig{file=./flow.eps, angle=0, width=.7\linewidth} - \caption{The \emph{Flow} container class.} - \label{fig:flow} -\end{figure} -} -\latexhide{ -\begin{figure}[h] - \label{fig:flow} -\special{html: -
- -
-} - \caption{The \emph{Flow} container class.} -\end{figure} -} -Methods in the flow class include those used for the initialization of -all the class components as well as methods that write the current -solution to a file. - - -\subsubsection{Structure} - -The \emph{Structure} class wraps a structural analysis code. The class -stores the information about the structure itself in an object called -\emph{Model} which also provides methods for changing and exporting -its information. A list of the objects contained in this class can be -seen in Fig.~\ref{fig:structure}. -\tthhide{ -\begin{figure}[h] - \centering - \epsfig{file=./structure.eps, angle=0, width=.7\linewidth} - \caption{The \emph{Structure} container class.} - \label{fig:structure} -\end{figure} -} -\latexhide{ -\begin{figure}[h] - \label{fig:structure} -\special{html: -
- -
-} - \caption{The \emph{Structure} container class.} -\end{figure} -} -Since the \emph{Structure} class contains a -dictionary of \emph{LoadCase} objects, it is able to store and solve -multiple load cases, a capability that the original Fortran code -does not have. - - -\subsubsection{Aerostructure} - -The \emph{Aerostructure} class is the main class in the -aero-structural analysis module and contains a \emph{Geometry}, a -\emph{Flow} and a \emph{Structure}. In addition, the class defines -all the functions that are necessary to translate aerodynamic -loads to structural loads and structural displacements to -geometry surface deformations. - -One of the main methods of this class is the one that solves the -aeroelastic system. This method is printed below: -\begin{verbatim} -def Iterate(self, load_case): - """Iterates the aero-structural solution.""" - self.flow.Iterate() - self._UpdateStructuralLoads() - self.structure.CalcDisplacements(load_case) - self.structure.CalcStresses(load_case) - self._UpdateFlowMesh() - return -\end{verbatim} -This is indeed a very readable script, thanks to Python, and any -high-level changes to the solution procedure can be easily -implemented. -The \emph{Aerostructure} class also contains methods that export all -the information on the current solution for visualization, an example -of which is shown in the next section. - - -\subsection{Results} - -In order to visualize results, and because we needed to view results -from multiple disciplines simultaneously, we selected OpenDX. Output -files in DX format are written at the Python level and the result can -be seen in Fig.~\ref{fig:aerostructure} for the case of a transonic -airliner configuration. -\tthhide{ -\begin{figure*}[t] - \centering - \epsfig{file=./aerostructure.eps, angle=-90, width=\linewidth} - \caption{Aero-structural model and results.} - \label{fig:aerostructure} -\end{figure*} -} -\latexhide{ -\begin{figure}[h] - \label{fig:aerostructure} -\special{html: -
- -
-} - \caption{Aero-structural model and results.} -\end{figure} -} - - -The figure illustrates the multidisciplinary nature of the -problem. The grid pictured in the background is the mesh used by the -flow solver and is colored by the pressure values computed at the -cell centers. The wing in the foreground and its outer surface is -clipped to show the internal structural components which are colored -by their stress value. - -In conclusion, \fpy and Python have been extremely useful tools in our -pursuit for increasing the usability and flexibility of existing Fortran -tools. - - -\begin{thebibliography}{99} -\bibitem{netlib} -\newblock Netlib repository at UTK and ORNL. -\newblock \\\wwwsite{http://www.netlib.org/} -\bibitem{python} -Python language. -\newblock \\\wwwsite{http://www.python.org/} -\bibitem{swig} -SWIG --- Simplified Wrapper and Interface Generator. -\newblock \\\wwwsite{http://www.swig.org/} -\bibitem{pyfort} -PyFort --- The Python-Fortran connection tool. -\newblock \\\wwwsite{http://pyfortran.sourceforge.net/} -\bibitem{fpig} -FPIG --- Fortran to Python Interface Generator. -\newblock \\\wwwsite{http://cens.ioc.ee/projects/f2py2e/} -\bibitem{numpy} -Numerical Extension to Python. -\newblock \\\wwwsite{http://numpy.sourceforge.net/} -\bibitem{graham-etal} -R. L. Graham, D. E. Knuth, and O. Patashnik. -\newblock {\em {C}oncrete {M}athematics: a foundation for computer science.} -\newblock Addison-Wesley, 1988 -\bibitem{f2py-ug} -P. Peterson. -\newblock {\em {\tt f2py} - Fortran to Python Interface Generator. Second Edition.} -\newblock 2000 -\newblock -\\\wwwsite{http://cens.ioc.ee/projects/f2py2e/usersguide.html} -\bibitem{python-doc:ext} -Python Documentation: Extending and Embedding. -\newblock \\\wwwsite{http://www.python.org/doc/ext/} -\bibitem{PFO} -P. Peterson. {\em {\tt PyFortranObject} example usages.} -\newblock 2001 -\newblock \\\wwwsite{http://cens.ioc.ee/projects/f2py2e/pyfobj.html} -\bibitem{reno99} -Reuther, J., J. J. Alonso, J. R. R. A. Martins, and -S. C. Smith. -\newblock ``A Coupled Aero-Structural Optimization Method for - Complete Aircraft Configurations'', -\newblock {\em Proceedings of the 37th Aerospace Sciences Meeting}, -\newblock AIAA Paper 1999-0187. Reno, NV, January, 1999 -\end{thebibliography} - -%\end{multicols} - -%\begin{figure}[htbp] -% \begin{center} -% \epsfig{file=aerostructure2b.ps,width=0.75\textwidth} -% \end{center} -%\end{figure} - - - -\end{document} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: t -%%% End: - - diff --git a/numpy/f2py/doc/signaturefile.tex b/numpy/f2py/doc/signaturefile.tex deleted file mode 100644 index 3cd16d890..000000000 --- a/numpy/f2py/doc/signaturefile.tex +++ /dev/null @@ -1,368 +0,0 @@ - -\section{Signature file} -\label{sec:signaturefile} - -The syntax of a signature file is borrowed from the Fortran~90/95 -language specification. Almost all Fortran~90/95 standard constructs -are understood. Recall that Fortran~77 is a subset of Fortran~90/95. -This tool introduces also some new attributes that are used for -controlling the process of Fortran to Python interface construction. -In the following, a short overview of the constructs -used in signature files will be given. - - -\subsection{Module block} -\label{sec:moduleblock} - -A signature file contains one or more \texttt{pythonmodule} blocks. A -\texttt{pythonmodule} block has the following structure: -\begin{verbatim} -python module - interface - - end [interface] - interface - module - - - end [module []] - end [interface] -end [pythonmodule []] -\end{verbatim} -For each \texttt{pythonmodule} block \fpy will generate a C-file -\texttt{module.c} (see step (iii)). (This is not true if -\texttt{} contains substring \texttt{\_\_user\_\_}, see -Sec.~\ref{sec:cbmodule} and \texttt{external} attribute). - -\subsection{Signatures of Fortran routines and Python functions} -\label{sec:routineblock} - - -The signature of a Fortran routine has the following structure: -\begin{verbatim} -[] function|subroutine [([])] \ - [result ()] - [] - [] - [] - [] - [] -end [function|subroutine []] -\end{verbatim} - -Let us introduce also the signature of the corresponding wrapper -function: -\begin{verbatim} -def ([,]): - ... - return -\end{verbatim} - -Before you edit the signature file, you should first decide what is the -desired signature of the corresponding Python function. \fpy offers -many possibilities to control the interface construction process: you -may want to insert/change/remove various attributes in the -declarations of the arguments in order to change the appearance -of the arguments in the Python wrapper function. - -\begin{itemize} -\item -The definition of the \texttt{} is -\begin{verbatim} - [[]::] -\end{verbatim} -where -\begin{verbatim} - := byte | character[] - | complex[] | real[] - | double complex | double precision - | integer[] | logical[] -\end{verbatim} -\begin{verbatim} - := * | ([len=][,[kind]]) - | (kind=[,len=]) - := * | ([kind=]) -\end{verbatim} -(there is no sense to modify \texttt{}s generated by \fpy). -\texttt{} is a comma separated list of attributes (see -Sec.~\ref{sec:attributes}); -\begin{verbatim} - := [[*][()] - | [()]*] - | [// | =] [,] -\end{verbatim} -where \texttt{} is a comma separated list of dimension -bounds; \texttt{} is a C-expression (see -Sec.~\ref{sec:C-expr}). If an argument is not defined with -\texttt{}, its type is determined by -applying \texttt{implicit} rules (if it is not specifyied, then -standard rules are applied). - -\item The definition of the \texttt{} is -a short form of the \texttt{}: -\begin{verbatim} - -\end{verbatim} - -\item \texttt{} is defined as follows -\begin{verbatim} -use [, | ,ONLY:] - := local_name=>use_name [,] -\end{verbatim} - Currently the \texttt{use} statement is used to link call-back - modules (Sec.~\ref{sec:cbmodule}) and the \texttt{external} - arguments (call-back functions). - -\item \texttt{} is defined as follows -\begin{verbatim} -common // -\end{verbatim} -where -\begin{verbatim} - := [()] [,] -\end{verbatim} -One \texttt{module} block should not contain two or more -\texttt{common} blocks with the same name. Otherwise, the later ones -are ignored. The types of variables in \texttt{} can -be defined in \texttt{}. Note that there -you can specify also the array specifications; then you don't need to -do that in \texttt{}. -\end{itemize} - -\subsection{Attributes} -\label{sec:attributes} - -The following attributes are used by \fpy: -\begin{description} -\item[\texttt{optional}] --- the variable is moved to the end of - optional argument list of the wrapper function. Default value of an - optional argument can be specified using \texttt{} in - \texttt{entitydecl}. You can use \texttt{optional} attribute also for - \texttt{external} arguments (call-back functions), but it is your - responsibility to ensure that it is given by the user if Fortran - routine wants to call it. -\item[\texttt{required}] --- the variable is considered as a required - argument (that is default). You will need this in order to overwrite - the \texttt{optional} attribute that is automatically set when - \texttt{} is used. However, usage of this attribute - should be rare. -\item[\texttt{dimension()}] --- used when the variable is - an array. For unbounded dimensions symbols `\texttt{*}' or - `\texttt{:}' can be used (then internally the corresponding - dimensions are set to -1; you'll notice this when certain exceptions - are raised). -\item[\texttt{external}] --- the variable is a call-back function. \fpy will - construct a call-back mechanism for this function. Also call-back - functions must be defined by their signatures, and there are several - ways to do that. In most cases, \fpy will be able to determine the signatures - of call-back functions from the Fortran source code; then it - builds an additional \texttt{module} block with a name containing - string `\texttt{\_\_user\_\_}' (see Sec.~\ref{sec:cbmodule}) and - includes \texttt{use} statement to the routines signature. Anyway, - you should check that the generated signature is correct. - - Alternatively, you can specify the signature by inserting to the - routines block a ``model'' how the call-back function would be called - from Fortran. For subroutines you should use\\ - \hspace*{2em}\texttt{call ()}\\ - and for functions\\% - \hspace*{2em}\texttt{ = ()}\\ - The variables in \texttt{} and \texttt{} - must be defined as well. You can use the arguments of the main - routine, for instance. -\item[\texttt{intent()}] --- this specifies the - ``intention'' of the variable. \texttt{} is a comma - separated list of the following specifications: - \begin{description} - \item[\texttt{in}] --- the variable is considered to be an input - variable (default). It means that the Fortran function uses only - the value(s) of the variable and is assumed not to change it. - \item[\texttt{inout}] --- the variable is considered to be an - input/output variable which means that Fortran routine may change - the value(s) of the variable. Note that in Python only array - objects can be changed ``in place''. (\texttt{intent(outin)} is - \texttt{intent(inout)}.) - \item[\texttt{out}] --- the value of the (output) variable is - returned by the wrapper function: it is appended to the list of - \texttt{}. If \texttt{out} is specified alone, - also \texttt{hide} is assumed. - \item[\texttt{hide}] --- use this if the variable \emph{should not} - or \emph{need not} to be in the list of wrapper function arguments - (not even in optional ones). For example, this is assumed if - \texttt{intent(out)} is used. You can ``hide'' an argument if it - has always a constant value specified in \texttt{}, - for instance. - \end{description} - The following rules apply: - \begin{itemize} - \item if no \texttt{intent} attribute is specified, \texttt{intent(in)} is - assumed; - \item \texttt{intent(in,inout)} is \texttt{intent(in)}; - \item \texttt{intent(in,hide)}, \texttt{intent(inout,hide)} are \texttt{intent(hide)}; - \item \texttt{intent(out)} is \texttt{intent(out,hide)}; -\item \texttt{intent(inout)} is NOT \texttt{intent(in,out)}. - \end{itemize} - In conclusion, the following combinations are ``minimal'': - \texttt{intent(in)}, \texttt{intent(inout)}, \texttt{intent(out)}, - \texttt{intent(hide)}, \texttt{intent(in,out)}, and - \texttt{intent(inout,out)}. -\item[\texttt{check([])}] --- if - \texttt{} evaluates to zero, an exception is raised - about incorrect value or size or any other incorrectness of the - variable. If \texttt{check()} or \texttt{check} is used then \fpy - will not try to guess the checks automatically. -\item[\texttt{depend([])}] --- the variable depends on other - variables listed in \texttt{}. These dependence relations - determine the order of internal initialization of the variables. If - you need to change these relations then be careful not to break the - dependence relations of other relevant variables. If - \texttt{depend()} or \texttt{depend} is used then \fpy will not try - to guess the dependence relations automatically. -\item[\texttt{note()}] --- with this attribute you can - include human readable documentation strings to the LaTeX document - that \fpy generates. Do not insert here information that \fpy can - establish by itself, such as, types, sizes, lengths of the - variables. Here you can insert almost arbitrary LaTeX text. Note - that \texttt{} is mainly used inside the LaTeX - \texttt{description} environment. Hint: you can use - \texttt{\bs{}texttt\{\}} for typesetting variable \texttt{} - in LaTeX. In order to get a new line to the LaTeX document, use - \texttt{\bs{}n} followed by a space. For longer text, you may want - to use line continuation feature of Fortran 90/95 language: set - \texttt{\&} (ampersand) - to be the last character in a line. -\item[\texttt{parameter}] --- the variable is parameter and it must - have a value. If the parameter is used in dimension specification, - it is replaced by its value. (Are there any other usages of - parameters except in dimension specifications? Let me know and I'll - add support for it). -\end{description} - - -\subsection{C-expressions} -\label{sec:C-expr} - -The signature of a routine may contain C-expressions in -\begin{itemize} -\item \texttt{} for initializing particular variable, or in -\item \texttt{} of the \texttt{check} attribute, or in -\item \texttt{} of the \texttt{dimension} attribute. -\end{itemize} -A C-expression may contain -\begin{itemize} -\item standard C-statement, -\item functions offered in \texttt{math.h}, -\item previously initialized variables (study -the dependence relations) from the argument list, and -\item the following CPP-macros: - \begin{description} - \item[\texttt{len()}] --- the length of an array \texttt{}; - \item[\texttt{shape(,)}] --- the $n$-th dimension of an array - \texttt{}; - \item[\texttt{rank()}] --- the rank of an array \texttt{}; - \item[\texttt{slen()}] --- the length of a string \texttt{}. - \end{description} -\end{itemize} - - -In addition, when initializing arrays, an index vector \texttt{int - \_i[rank()];} -is available: \texttt{\_i[0]} refers to -the index of the first dimension, \texttt{\_i[1]} to the index of -the second dimension, etc. For example, the argument type declaration\\ -\hspace*{2em}\texttt{integer a(10) = \_i[0]}\\ -is equivalent with the following Python statement\\ -\hspace*{2em}\texttt{a = array(range(10))} - - -\subsection{Required/optional arguments} -\label{sec:reqoptargs} - -When \texttt{optional} attribute is used (including the usage of -\texttt{} without the \texttt{required} attribute), the -corresponding variable in the argument list of a Fortran routine is -appended to the optional argument list of the wrapper function. - -For optional array argument all dimensions must be bounded (not -\texttt{(*)} or \texttt{(:)}) and defined at the time of -initialization (dependence relations). - -If the \texttt{None} object is passed in in place of a required array -argument, it will be considered as optional: that is, the memory is -allocated (of course, if it has unbounded dimensions, an exception -will be raised), and if \texttt{} is defined, -initialization is carried out. - - -\subsection{Internal checks} -\label{sec:intchecks} - -All array arguments are checked against the correctness of their rank. -If there is a mismatch, \fpy attempts to fix that by constructing an -array with a correct rank from the given array argument (there will be -no performance hit as no data is copied). The freedom to do so is -given only if some dimensions are unbounded or their value is 1. An -exception is raised when the sizes will not match. - -All bounded dimensions of an array are checked to be larger or equal -to the dimensions specified in the signature. - -So, you don't need to give explicit \texttt{check} attributes to check -these internal checks. - - -\subsection{Call-back modules} -\label{sec:cbmodule} - -A Fortran routine may have \texttt{external} arguments (call-back -functions). The signatures of the call-back functions must be defined -in a call-back \texttt{module} block (its name contains -\texttt{\_\_user\_\_}), in general; other possibilities are described -in the \texttt{external} attribute specification (see -Sec.~\ref{sec:attributes}). For the signatures of call-back -functions the following restrictions apply: -\begin{itemize} -\item Attributes \texttt{external}, \texttt{check(...)}, and - initialization statements are ignored. -\item Attribute \texttt{optional} is used only for changing the order - of the arguments. -\item For arrays all dimension bounds must be specified. They may be - C-expressions containing variables from the argument list. - Note that here CPP-macros \texttt{len}, \texttt{shape}, - \texttt{rank}, and \texttt{slen} are not available. -\end{itemize} - - -\subsection{Common blocks} -\label{sec:commonblocks} - -All fields in a common block are mapped to arrays of appropriate sizes -and types. Scalars are mapped to rank-0 arrays. For multi-dimensional -fields the corresponding arrays are transposed. In the type -declarations of the variables representing the common block fields, -only \texttt{dimension()}, \texttt{intent(hide)}, and -\texttt{note()} attributes are used, others are ignored. - -\subsection{Including files} -\label{sec:include} - -You can include files to the signature file using -\begin{verbatim} -include '' -\end{verbatim} -statement. It can be used in any part of the signature file. -If the file \texttt{} does not exists or it is not in the path, -the \texttt{include} line is ignored. - -\subsection{\fpy directives} -\label{sec:directives} - -You can insert signature statements directly to Fortran source codes -as comments. Anything that follows \texttt{f2py} is -regarded as normal statement for \fpy. - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: - diff --git a/numpy/f2py/doc/using_F_compiler.txt b/numpy/f2py/doc/using_F_compiler.txt deleted file mode 100644 index 3067f0776..000000000 --- a/numpy/f2py/doc/using_F_compiler.txt +++ /dev/null @@ -1,147 +0,0 @@ - -Title: Wrapping F compiled Fortran 90 modules with F2PY - ================================================ - -Rationale: The F compiler does not support external procedures which - makes it impossible to use it in F2PY in a normal way. - This document describes a workaround to this problem so - that F compiled codes can be still wrapped with F2PY. - -Author: Pearu Peterson -Date: May 8, 2002 - -Acknowledgement: Thanks to Siegfried Gonzi who hammered me to produce - this document. - -Normally wrapping Fortran 90 modules to Python using F2PY is carried -out with the following command - - f2py -c -m fun foo.f90 - -where file foo.f90 contains, for example, - -module foo - public :: bar - contains - subroutine bar (a) - integer,intent(inout) :: a - print *,"Hello from foo.bar" - print *,"a=",a - a = a + 5 - print *,"a=",a - end subroutine bar -end module foo - -Then with a supported F90 compiler (running `f2py -c --help-compiler' -will display the found compilers) f2py will generate an extension -module fun.so into the current directory and the Fortran module foo -subroutine bar can be called from Python as follows - ->>> import fun ->>> print fun.foo.bar.__doc__ -bar - Function signature: - bar(a) -Required arguments: - a : in/output rank-0 array(int,'i') - ->>> from Numeric import array ->>> a = array(3) ->>> fun.foo.bar(a) - Hello from foo.bar - a= 3 - a= 8 ->>> a -8 ->>> - -This works nicely with all supported Fortran compilers. - -However, the F compiler (http://www.fortran.com/F/compilers.html) is -an exception. Namely, the F compiler is designed to recognize only -module procedures (and main programs, of course) but F2PY needs to -compile also the so-called external procedures that it generates to -facilitate accessing Fortran F90 module procedures from C and -subsequently from Python. As a result, wrapping F compiled Fortran -procedures to Python is _not_ possible using the simple procedure as -described above. But, there is a workaround that I'll describe below -in five steps. - -1) Compile foo.f90: - - F -c foo.f90 - -This creates an object file foo.o into the current directory. - -2) Create the signature file: - - f2py foo.f90 -h foo.pyf - -This creates a file foo.pyf containing - -module foo ! in foo.f90 - real public :: bar - subroutine bar(a) ! in foo.f90:foo - integer intent(inout) :: a - end subroutine bar -end module foo - -3) Open the file foo.pyf with your favorite text editor and change the - above signature to - -python module foo - interface - subroutine bar(a) - fortranname foo_MP_bar - intent(c) bar - integer intent(in,out) :: a - end subroutine bar - end interface -end python module foo - -The most important modifications are - - a) adding `python' keyword everywhere before the `module' keyword - - b) including an `interface' block around the all subroutine blocks. - - c) specifying the real symbol name of the subroutine using - `fortranname' statement. F generated symbol names are in the form - _MP_ - - d) specifying that subroutine is `intent(c)'. - -Notice that the `intent(inout)' attribute is changed to -`intent(in,out)' that instructs the wrapper to return the modified -value of `a'. - -4) Build the extension module - - f2py -c foo.pyf foo.o --fcompiler=Gnu /opt/F/lib/quickfit.o \ - /opt/F/lib/libf96.a - -This will create the extension module foo.so into the current -directory. Notice that you must use Gnu compiler (gcc) for linking. -And the paths to F specific object files and libraries may differ for -your F installation. - -5) Finally, we can call the module subroutine `bar' from Python - ->>> import foo ->>> print foo.bar.__doc__ -bar - Function signature: - a = bar(a) -Required arguments: - a : input int -Return objects: - a : int - ->>> foo.bar(3) -8 ->>> - -Notice that the F compiled module procedures are called as ordinary -external procedures. Also I/O seems to be lacking for F compiled -Fortran modules. - -Enjoy, - Pearu diff --git a/numpy/f2py/doc/win32_notes.txt b/numpy/f2py/doc/win32_notes.txt deleted file mode 100644 index 1b7b9029c..000000000 --- a/numpy/f2py/doc/win32_notes.txt +++ /dev/null @@ -1,85 +0,0 @@ -The following notes are from Eric Jones. - -My Setup: - -For Python/Fortran development, I run Windows 2000 and use the mingw32 -(www.mingw.org) set of gcc/g77 compilers and tools (gcc 2.95.2) to build python -extensions. I'll also ocassionally use MSVC for extension development, but -rarely on projects that include Fortran code. This short HOWTO describes how -I use f2py in the Windows environment. Pretty much everything is done from -a CMD (DOS) prompt, so you'll need to be familiar with using shell commands. - -Installing f2py: - -Before installing f2py, you'll need to install python. I use python2.1 (maybe -python2.2 will be out by the time you read this). Any version of Python beyond -version 1.52 should be fine. See www.python.org for info on installing Python. - -You'll also need Numeric which is available at -http://sourceforge.net/projects/numpy/. The latest version is 20.3. - -Since Pearu has moved to a setup.py script, installation is pretty easy. You -can download f2py from http://cens.ioc.ee/projects/f2py2e/. The latest public -release is http://cens.ioc.ee/projects/f2py2e/rel-3.x/f2py-3.latest.tgz. Even -though this is a .tgz file instead of a .zip file, most standard compression -utilities such as WinZip (www.winzip.com) handle unpacking .tgz files -automatically. Here are the download steps: - - 1. Download the latest version of f2py and save it to disk. - - 2. Use WinZip or some other tool to open the "f2py.xxx.tgz" file. - a. When WinZip says archive contains one file, "f2py.xxx.tar" - and ask if it should open it, respond with "yes". - b. Extract (use the extract button at the top) all the files - in the archive into a file. I'll use c:\f2py2e - - 3. Open a cmd prompt by clicking start->run and typing "cmd.exe". - Now type the following commands. - - C:\WINDOWS\SYSTEM32> cd c:\f2py2e - C:\F2PY2E> python setup.py install - - This will install f2py in the c:\python21\f2py2e directory. It - also copies a few scripts into the c:\python21\Scripts directory. - Thats all there is to installing f2py. Now lets set up the environment - so that f2py is easy to use. - - 4. You need to set up a couple of environement variables. The path - "c:\python21\Scripts" needs to be added to your path variables. - To do this, go to the enviroment variables settings page. This is - where it is on windows 2000: - - Desktop->(right click)My Computer->Properties->Advanced-> - Environment Variables - - a. Add "c:\python21\Scripts" to the end of the Path variable. - b. If it isn't already there, add ".py" to the PATHEXT variable. - This tells the OS to execute f2py.py even when just "f2py" is - typed at a command prompt. - - 5. Well, there actually isn't anything to be done here. The Python - installation should have taken care of associating .py files with - Python for execution, so you shouldn't have to do anything to - registry settings. - -To test your installation, open a new cmd prompt, and type the following: - - C:\WINDOWS\SYSTEM32> f2py - Usage: - f2py [] [[[only:]||[skip:]] \ - ] \ - [: ...] - ... - -This prints out the usage information for f2py. If it doesn't, there is -something wrong with the installation. - -Testing: -The f2py test scripts are kinda Unix-centric, so they don't work under windows. - -XXX include test script XXX. - -Compiler and setup.py issues: - -XXX - diff --git a/numpy/f2py/docs/FAQ.txt b/numpy/f2py/docs/FAQ.txt deleted file mode 100644 index 416560e92..000000000 --- a/numpy/f2py/docs/FAQ.txt +++ /dev/null @@ -1,615 +0,0 @@ - -====================================================================== - F2PY Frequently Asked Questions -====================================================================== - -.. contents:: - -General information -=================== - -Q: How to get started? ----------------------- - -First, install__ F2PY. Then check that F2PY installation works -properly (see below__). Try out a `simple example`__. - -Read `F2PY Users Guide and Reference Manual`__. It contains lots -of complete examples. - -If you have any questions/problems when using F2PY, don't hesitate to -turn to `F2PY users mailing list`__ or directly to me. - -__ index.html#installation -__ #testing -__ index.html#usage -__ usersguide/index.html -__ index.html#mailing-list - -Q: When to report bugs? ------------------------ - -* If F2PY scanning fails on Fortran sources that otherwise compile - fine. - -* After checking that you have the latest version of F2PY from its - CVS. It is possible that a bug has been fixed already. See also the - log entries in the file `HISTORY.txt`_ (`HISTORY.txt in CVS`_). - -* After checking that your Python and Numerical Python installations - work correctly. - -* After checking that your C and Fortran compilers work correctly. - - -Q: How to report bugs? ----------------------- - -You can send bug reports directly to me. Please, include information -about your platform (operating system, version) and -compilers/linkers, e.g. the output (both stdout/stderr) of -:: - - python -c 'import f2py2e.diagnose;f2py2e.diagnose.run()' - -Feel free to add any other relevant information. However, avoid -sending the output of F2PY generated ``.pyf`` files (unless they are -manually modified) or any binary files like shared libraries or object -codes. - -While reporting bugs, you may find the following notes useful: - -* `How To Ask Questions The Smart Way`__ by E. S. Raymond and R. Moen. - -* `How to Report Bugs Effectively`__ by S. Tatham. - -__ http://www.catb.org/~esr/faqs/smart-questions.html -__ http://www.chiark.greenend.org.uk/~sgtatham/bugs.html - -Installation -============ - -Q: How to use F2PY with different Python versions? --------------------------------------------------- - -Run the installation command using the corresponding Python -executable. For example, -:: - - python2.1 setup.py install - -installs the ``f2py`` script as ``f2py2.1``. - -See `Distutils User Documentation`__ for more information how to -install Python modules to non-standard locations. - -__ http://www.python.org/sigs/distutils-sig/doc/inst/inst.html - - -Q: Why F2PY is not working after upgrading? -------------------------------------------- - -If upgrading from F2PY version 2.3.321 or earlier then remove all f2py -specific files from ``/path/to/python/bin`` directory before -running installation command. - -Q: How to get/upgrade numpy_distutils when using F2PY from CVS? ---------------------------------------------------------------- - -To get numpy_distutils from SciPy CVS repository, run -:: - - cd cvs/f2py2e/ - make numpy_distutils - -This will checkout numpy_distutils to the current directory. - -You can upgrade numpy_distutils by executing -:: - - cd cvs/f2py2e/numpy_distutils - cvs update -Pd - -and install it by executing -:: - - cd cvs/f2py2e/numpy_distutils - python setup_numpy_distutils.py install - -In most of the time, f2py2e and numpy_distutils can be upgraded -independently. - -Testing -======= - -Q: How to test if F2PY is installed correctly? ----------------------------------------------- - -Run -:: - - f2py - -without arguments. If F2PY is installed correctly then it should print -the usage information for f2py. - -Q: How to test if F2PY is working correctly? --------------------------------------------- - -For a quick test, try out an example problem from Usage__ -section in `README.txt`_. - -__ index.html#usage - -For running F2PY unit tests, see `TESTING.txt`_. - - -Q: How to run tests and examples in f2py2e/test-suite/ directory? ---------------------------------------------------------------------- - -You shouldn't. These tests are obsolete and I have no intention to -make them work. They will be removed in future. - - -Compiler/Platform-specific issues -================================= - -Q: What are supported platforms and compilers? ----------------------------------------------- - -F2PY is developed on Linux system with a GCC compiler (versions -2.95.x, 3.x). Fortran 90 related hooks are tested against Intel -Fortran Compiler. F2PY should work under any platform where Python and -Numeric are installed and has supported Fortran compiler installed. - -To see a list of supported compilers, execute:: - - f2py -c --help-fcompiler - -Example output:: - - List of available Fortran compilers: - --fcompiler=gnu GNU Fortran Compiler (3.3.4) - --fcompiler=intel Intel Fortran Compiler for 32-bit apps (8.0) - List of unavailable Fortran compilers: - --fcompiler=absoft Absoft Corp Fortran Compiler - --fcompiler=compaq Compaq Fortran Compiler - --fcompiler=compaqv DIGITAL|Compaq Visual Fortran Compiler - --fcompiler=hpux HP Fortran 90 Compiler - --fcompiler=ibm IBM XL Fortran Compiler - --fcompiler=intele Intel Fortran Compiler for Itanium apps - --fcompiler=intelev Intel Visual Fortran Compiler for Itanium apps - --fcompiler=intelv Intel Visual Fortran Compiler for 32-bit apps - --fcompiler=lahey Lahey/Fujitsu Fortran 95 Compiler - --fcompiler=mips MIPSpro Fortran Compiler - --fcompiler=nag NAGWare Fortran 95 Compiler - --fcompiler=pg Portland Group Fortran Compiler - --fcompiler=sun Sun|Forte Fortran 95 Compiler - --fcompiler=vast Pacific-Sierra Research Fortran 90 Compiler - List of unimplemented Fortran compilers: - --fcompiler=f Fortran Company/NAG F Compiler - For compiler details, run 'config_fc --verbose' setup command. - - -Q: How to use the F compiler in F2PY? -------------------------------------- - -Read `f2py2e/doc/using_F_compiler.txt`__. It describes why the F -compiler cannot be used in a normal way (i.e. using ``-c`` switch) to -build F2PY generated modules. It also gives a workaround to this -problem. - -__ http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/doc/using_F_compiler.txt?rev=HEAD&content-type=text/vnd.viewcvs-markup - -Q: How to use F2PY under Windows? ---------------------------------- - -F2PY can be used both within Cygwin__ and MinGW__ environments under -Windows, F2PY can be used also in Windows native terminal. -See the section `Setting up environment`__ for Cygwin and MinGW. - -__ http://cygwin.com/ -__ http://www.mingw.org/ -__ http://cens.ioc.ee/~pearu/numpy/BUILD_WIN32.html#setting-up-environment - -Install numpy_distutils and F2PY. Win32 installers of these packages -are provided in `F2PY Download`__ section. - -__ http://cens.ioc.ee/projects/f2py2e/#download - -Use ``--compiler=`` and ``--fcompiler`` F2PY command line switches to -to specify which C and Fortran compilers F2PY should use, respectively. - -Under MinGW environment, ``mingw32`` is default for a C compiler. - -Supported and Unsupported Features -================================== - -Q: Does F2PY support ``ENTRY`` statements? ------------------------------------------- - -Yes, starting at F2PY version higher than 2.39.235_1706. - -Q: Does F2PY support derived types in F90 code? ------------------------------------------------ - -Not yet. However I do have plans to implement support for F90 TYPE -constructs in future. But note that the task in non-trivial and may -require the next edition of F2PY for which I don't have resources to -work with at the moment. - -Jeffrey Hagelberg from LLNL has made progress on adding -support for derived types to f2py. He writes: - - At this point, I have a version of f2py that supports derived types - for most simple cases. I have multidimensional arrays of derived - types and allocatable arrays of derived types working. I'm just now - starting to work on getting nested derived types to work. I also - haven't tried putting complex number in derived types yet. - -Hopefully he can contribute his changes to f2py soon. - -Q: Does F2PY support pointer data in F90 code? ------------------------------------------------ - -No. I have never needed it and I haven't studied if there are any -obstacles to add pointer data support to F2PY. - -Q: What if Fortran 90 code uses ``(kind=KIND(..))``? ---------------------------------------------------------------- - -Currently, F2PY can handle only ``(kind=)`` -declarations where ```` is a numeric integer (e.g. 1, 2, -4,...) but not a function call ``KIND(..)`` or any other -expression. F2PY needs to know what would be the corresponding C type -and a general solution for that would be too complicated to implement. - -However, F2PY provides a hook to overcome this difficulty, namely, -users can define their own to maps. For -example, if Fortran 90 code contains:: - - REAL(kind=KIND(0.0D0)) ... - -then create a file ``.f2py_f2cmap`` (into the working directory) -containing a Python dictionary:: - - {'real':{'KIND(0.0D0)':'double'}} - -for instance. - -Or more generally, the file ``.f2py_f2cmap`` must contain a dictionary -with items:: - - : {:} - -that defines mapping between Fortran type:: - - ([kind=]) - -and the corresponding ````. ```` can be one of the -following:: - - char - signed_char - short - int - long_long - float - double - long_double - complex_float - complex_double - complex_long_double - string - -For more information, see ``f2py2e/capi_maps.py``. - -Related software -================ - -Q: How F2PY distinguishes from Pyfort? --------------------------------------- - -F2PY and Pyfort have very similar aims and ideology of how they are -targeted. Both projects started to evolve in the same year 1999 -independently. When we discovered each others projects, a discussion -started to join the projects but that unfortunately failed for -various reasons, e.g. both projects had evolved too far that merging -the tools would have been impractical and giving up the efforts that -the developers of both projects have made was unacceptable to both -parties. And so, nowadays we have two tools for connecting Fortran -with Python and this fact will hardly change in near future. To decide -which one to choose is a matter of taste, I can only recommend to try -out both to make up your choice. - -At the moment F2PY can handle more wrapping tasks than Pyfort, -e.g. with F2PY one can wrap Fortran 77 common blocks, Fortran 90 -module routines, Fortran 90 module data (including allocatable -arrays), one can call Python from Fortran, etc etc. F2PY scans Fortran -codes to create signature (.pyf) files. F2PY is free from most of the -limitations listed in in `the corresponding section of Pyfort -Reference Manual`__. - -__ http://pyfortran.sourceforge.net/pyfort/pyfort_reference.htm#pgfId-296925 - -There is a conceptual difference on how F2PY and Pyfort handle the -issue of different data ordering in Fortran and C multi-dimensional -arrays. Pyfort generated wrapper functions have optional arguments -TRANSPOSE and MIRROR that can be used to control explicitly how the array -arguments and their dimensions are passed to Fortran routine in order -to deal with the C/Fortran data ordering issue. F2PY generated wrapper -functions hide the whole issue from an end-user so that translation -between Fortran and C/Python loops and array element access codes is -one-to-one. How the F2PY generated wrappers deal with the issue is -determined by a person who creates a signature file via using -attributes like ``intent(c)``, ``intent(copy|overwrite)``, -``intent(inout|in,out|inplace)`` etc. - -For example, let's consider a typical usage of both F2PY and Pyfort -when wrapping the following simple Fortran code: - -.. include:: simple.f - :literal: - -The comment lines starting with ``cf2py`` are read by F2PY (so that we -don't need to generate/handwrite an intermediate signature file in -this simple case) while for a Fortran compiler they are just comment -lines. - -And here is a Python version of the Fortran code: - -.. include:: pytest.py - :literal: - -To generate a wrapper for subroutine ``foo`` using F2PY, execute:: - - $ f2py -m f2pytest simple.f -c - -that will generate an extension module ``f2pytest`` into the current -directory. - -To generate a wrapper using Pyfort, create the following file - -.. include:: pyforttest.pyf - :literal: - -and execute:: - - $ pyfort pyforttest - -In Pyfort GUI add ``simple.f`` to the list of Fortran sources and -check that the signature file is in free format. And then copy -``pyforttest.so`` from the build directory to the current directory. - -Now, in Python - -.. include:: simple_session.dat - :literal: - -Q: Can Pyfort .pyf files used with F2PY and vice versa? -------------------------------------------------------- - -After some simple modifications, yes. You should take into account the -following differences in Pyfort and F2PY .pyf files. - -+ F2PY signature file contains ``python module`` and ``interface`` - blocks that are equivalent to Pyfort ``module`` block usage. - -+ F2PY attribute ``intent(inplace)`` is equivalent to Pyfort - ``intent(inout)``. F2PY ``intent(inout)`` is a strict (but safe) - version of ``intent(inplace)``, any mismatch in arguments with - expected type, size, or contiguouness will trigger an exception - while ``intent(inplace)`` (dangerously) modifies arguments - attributes in-place. - -Misc -==== - -Q: How to establish which Fortran compiler F2PY will use? ---------------------------------------------------------- - -This question may be releavant when using F2PY in Makefiles. Here -follows a script demonstrating how to determine which Fortran compiler -and flags F2PY will use:: - - # Using post-0.2.2 numpy_distutils - from numpy_distutils.fcompiler import new_fcompiler - compiler = new_fcompiler() # or new_fcompiler(compiler='intel') - compiler.dump_properties() - - # Using pre-0.2.2 numpy_distutils - import os - from numpy_distutils.command.build_flib import find_fortran_compiler - def main(): - fcompiler = os.environ.get('FC_VENDOR') - fcompiler_exec = os.environ.get('F77') - f90compiler_exec = os.environ.get('F90') - fc = find_fortran_compiler(fcompiler, - fcompiler_exec, - f90compiler_exec, - verbose = 0) - print 'FC=',fc.f77_compiler - print 'FFLAGS=',fc.f77_switches - print 'FOPT=',fc.f77_opt - if __name__ == "__main__": - main() - -Users feedback -============== - -Q: Where to find additional information on using F2PY? ------------------------------------------------------- - -There are several F2PY related tutorials, slides, papers, etc -available: - -+ `Fortran to Python Interface Generator with an Application to - Aerospace Engineering`__ by P. Peterson, J. R. R. A. Martins, and - J. J. Alonso in `In Proceedings of the 9th International Python - Conference`__, Long Beach, California, 2001. - -__ http://www.python9.org/p9-cdrom/07/index.htm -__ http://www.python9.org/ - -+ Section `Adding Fortran90 code`__ in the UG of `The Bolometer Data - Analysis Project`__. - -__ http://www.astro.rub.de/laboca/download/boa_master_doc/7_4Adding_Fortran90_code.html -__ http://www.openboa.de/ - -+ Powerpoint presentation `Python for Scientific Computing`__ by Eric - Jones in `The Ninth International Python Conference`__. - -__ http://www.python9.org/p9-jones.ppt -__ http://www.python9.org/ - -+ Paper `Scripting a Large Fortran Code with Python`__ by Alvaro Caceres - Calleja in `International Workshop on Software Engineering for High - Performance Computing System Applications`__. - -__ http://csdl.ics.hawaii.edu/se-hpcs/pdf/calleja.pdf -__ http://csdl.ics.hawaii.edu/se-hpcs/ - -+ Section `Automatic building of C/Fortran extension for Python`__ by - Simon Lacoste-Julien in `Summer 2002 Report about Hybrid Systems - Modelling`__. - -__ http://moncs.cs.mcgill.ca/people/slacoste/research/report/SummerReport.html#tth_sEc3.4 -__ http://moncs.cs.mcgill.ca/people/slacoste/research/report/SummerReport.html - -+ `Scripting for Computational Science`__ by Hans Petter Langtangen - (see the `Mixed language programming`__ and `NumPy array programming`__ - sections for examples on using F2PY). - -__ http://www.ifi.uio.no/~inf3330/lecsplit/ -__ http://www.ifi.uio.no/~inf3330/lecsplit/slide662.html -__ http://www.ifi.uio.no/~inf3330/lecsplit/slide718.html - -+ Chapters 5 and 9 of `Python Scripting for Computational Science`__ - by H. P. Langtangen for case studies on using F2PY. - -__ http://www.springeronline.com/3-540-43508-5 - -+ Section `Fortran Wrapping`__ in `Continuity`__, a computational tool - for continuum problems in bioengineering and physiology. - -__ http://www.continuity.ucsd.edu/cont6_html/docs_fram.html -__ http://www.continuity.ucsd.edu/ - -+ Presentation `PYFORT and F2PY: 2 ways to bind C and Fortran with Python`__ - by Reiner Vogelsang. - -__ http://www.prism.enes.org/WPs/WP4a/Slides/pyfort/pyfort.html - -+ Lecture slides of `Extending Python: speed it up`__. - -__ http://www.astro.uni-bonn.de/~heith/lecture_pdf/friedrich5.pdf - -+ Wiki topics on `Wrapping Tools`__ and `Wrapping Bemchmarks`__ for Climate - System Center at the University of Chicago. - -__ https://geodoc.uchicago.edu/climatewiki/DiscussWrappingTools -__ https://geodoc.uchicago.edu/climatewiki/WrappingBenchmarks - -+ `Performance Python with Weave`__ by Prabhu Ramachandran. - -__ http://www.numpy.org/documentation/weave/weaveperformance.html - -+ `How To Install py-f2py on Mac OSX`__ - -__ http://py-f2py.darwinports.com/ - -Please, let me know if there are any other sites that document F2PY -usage in one or another way. - -Q: What projects use F2PY? --------------------------- - -+ `SciPy: Scientific tools for Python`__ - -__ http://www.numpy.org/ - -+ `The Bolometer Data Analysis Project`__ - -__ http://www.openboa.de/ - -+ `pywavelet`__ - -__ http://www.met.wau.nl/index.html?http://www.met.wau.nl/medewerkers/moenea/python/pywavelet.html - -+ `PyARTS: an ARTS related Python package`__. - -__ http://www.met.ed.ac.uk/~cory/PyARTS/ - -+ `Python interface to PSPLINE`__, a collection of Spline and - Hermite interpolation tools for 1D, 2D, and 3D datasets on - rectilinear grids. - -__ http://pypspline.sourceforge.net - -+ `Markovian Analysis Package for Python`__. - -__ http://pymc.sourceforge.net - -+ `Modular toolkit for Data Processing (MDP)`__ - -__ http://mdp-toolkit.sourceforge.net/ - - -Please, send me a note if you are using F2PY in your project. - -Q: What people think about F2PY? --------------------------------- - -*F2PY is GOOD*: - -Here are some comments people have posted to f2py mailing list and c.l.py: - -+ Ryan Krauss: I really appreciate f2py. It seems weird to say, but I - am excited about relearning FORTRAN to compliment my python stuff. - -+ Fabien Wahl: f2py is great, and is used extensively over here... - -+ Fernando Perez: Anyway, many many thanks for this amazing tool. - - I haven't used pyfort, but I can definitely vouch for the amazing quality of - f2py. And since f2py is actively used by numpy, it won't go unmaintained. - It's quite impressive, and very easy to use. - -+ Kevin Mueller: First off, thanks to those responsible for F2PY; - its been an integral tool of my research for years now. - -+ David Linke: Best regards and thanks for the great tool! - -+ Perrin Meyer: F2Py is really useful! - -+ Hans Petter Langtangen: First of all, thank you for developing - F2py. This is a very important contribution to the scientific - computing community. We are using F2py a lot and are very happy with - it. - -+ Berthold Höllmann: Thank's alot. It seems it is also working in my - 'real' application :-) - -+ John Hunter: At first I wrapped them with f2py (unbelievably easy!)... - -+ Cameron Laird: Among many other features, Python boasts a mature - f2py, which makes it particularly rewarding to yoke Fortran- and - Python-coded modules into finished applications. - -+ Ryan Gutenkunst: f2py is sweet magic. - -*F2PY is BAD*: - -+ `Is it worth using on a large scale python drivers for Fortran - subroutines, interfaced with f2py?`__ - -__ http://sepwww.stanford.edu/internal/computing/python.html - -Additional comments on F2PY, good or bad, are welcome! - -.. References: -.. _README.txt: index.html -.. _HISTORY.txt: HISTORY.html -.. _HISTORY.txt in CVS: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup -.. _TESTING.txt: TESTING.html diff --git a/numpy/f2py/docs/HISTORY.txt b/numpy/f2py/docs/HISTORY.txt deleted file mode 100644 index 72b683eb0..000000000 --- a/numpy/f2py/docs/HISTORY.txt +++ /dev/null @@ -1,1044 +0,0 @@ -.. -*- rest -*- - -========================= - F2PY History -========================= - -:Author: Pearu Peterson -:Web-site: http://cens.ioc.ee/projects/f2py2e/ -:Date: $Date: 2005/09/16 08:36:45 $ -:Revision: $Revision: 1.191 $ - -.. Contents:: - -Release 2.46.243 -===================== - -* common_rules.py - - - Fixed compiler warnings. - -* fortranobject.c - - - Fixed another dims calculation bug. - - Fixed dims calculation bug and added the corresponding check. - - Accept higher dimensional arrays if their effective rank matches. - Effective rank is multiplication of non-unit dimensions. - -* f2py2e.py - - - Added support for numpy.distutils version 0.4.0. - -* Documentation - - - Added example about ``intent(callback,hide)`` usage. Updates. - - Updated FAQ. - -* cb_rules.py - - - Fixed missing need kw error. - - Fixed getting callback non-existing extra arguments. - - External callback functions and extra_args can be set via - ext.module namespace. - - Avoid crash when external callback function is not set. - -* rules.py - - - Enabled ``intent(out)`` for ``intent(aux)`` non-complex scalars. - - Fixed splitting lines in F90 fixed form mode. - - Fixed FORTRANAME typo, relevant when wrapping scalar functions with - ``--no-wrap-functions``. - - Improved failure handling for callback functions. - - Fixed bug in writting F90 wrapper functions when a line length - is exactly 66. - -* cfuncs.py - - - Fixed dependency issue with typedefs. - - Introduced ``-DUNDERSCORE_G77`` that cause extra underscore to be - used for external names that contain an underscore. - -* capi_maps.py - - - Fixed typos. - - Fixed using complex cb functions. - -* crackfortran.py - - - Introduced parent_block key. Get ``use`` statements recursively - from parent blocks. - - Apply parameter values to kindselectors. - - Fixed bug evaluating ``selected_int_kind`` function. - - Ignore Name and Syntax errors when evaluating scalars. - - Treat ``_intType`` as ```` in get_parameters. - - Added support for F90 line continuation in fix format mode. - - Include optional attribute of external to signature file. - - Add ``entry`` arguments to variable lists. - - Treat \xa0 character as space. - - Fixed bug where __user__ callback subroutine was added to its - argument list. - - In strict 77 mode read only the first 72 columns. - - Fixed parsing ``v(i) = func(r)``. - - Fixed parsing ``integer*4::``. - - Fixed parsing ``1.d-8`` when used as a parameter value. - -Release 2.45.241_1926 -===================== - -* diagnose.py - - - Clean up output. - -* cb_rules.py - - - Fixed ``_cpointer`` usage for subroutines. - - Fortran function ``_cpointer`` can be used for callbacks. - -* func2subr.py - - - Use result name when wrapping functions with subroutines. - -* f2py2e.py - - - Fixed ``--help-link`` switch. - - Fixed ``--[no-]lower`` usage with ``-c`` option. - - Added support for ``.pyf.src`` template files. - -* __init__.py - - - Using ``exec_command`` in ``compile()``. - -* setup.py - - - Clean up. - - Disabled ``need_numpy_distutils`` function. From now on it is assumed - that proper version of ``numpy_distutils`` is already installed. - -* capi_maps.py - - - Added support for wrapping unsigned integers. In a .pyf file - ``integer(-1)``, ``integer(-2)``, ``integer(-4)`` correspond to - ``unsigned char``, ``unsigned short``, ``unsigned`` C types, - respectively. - -* tests/c/return_real.py - - - Added tests to wrap C functions returning float/double. - -* fortranobject.c - - - Added ``_cpointer`` attribute to wrapped objects. - -* rules.py - - - ``_cpointer`` feature for wrapped module functions is not - functional at the moment. - - Introduced ``intent(aux)`` attribute. Useful to save a value - of a parameter to auxiliary C variable. Note that ``intent(aux)`` - implies ``intent(c)``. - - Added ``usercode`` section. When ``usercode`` is used in ``python - module`` block twise then the contents of the second multi-line - block is inserted after the definition of external routines. - - Call-back function arguments can be CObjects. - -* cfuncs.py - - - Allow call-back function arguments to be fortran objects. - - Allow call-back function arguments to be built-in functions. - -* crackfortran.py - - - Fixed detection of a function signature from usage example. - - Cleaned up -h output for intent(callback) variables. - - Repair malformed argument list (missing argument name). - - Warn on the usage of multiple attributes without type specification. - - Evaluate only scalars ```` (e.g. not of strings). - - Evaluate ```` using parameters name space. - - Fixed resolving `()[result()]` pattern. - - ``usercode`` can be used more than once in the same context. - -Release 2.43.239_1831 -===================== - -* auxfuncs.py - - - Made ``intent(in,inplace)`` to mean ``intent(inplace)``. - -* f2py2e.py - - - Intoduced ``--help-link`` and ``--link-`` - switches to link generated extension module with system - ```` as defined by numpy_distutils/system_info.py. - -* fortranobject.c - - - Patch to make PyArray_CanCastSafely safe on 64-bit machines. - Fixes incorrect results when passing ``array('l')`` to - ``real*8 intent(in,out,overwrite)`` arguments. - -* rules.py - - - Avoid empty continuation lines in Fortran wrappers. - -* cfuncs.py - - - Adding ``\0`` at the end of a space-padded string, fixes tests - on 64-bit Gentoo. - -* crackfortran.py - - - Fixed splitting lines with string parameters. - -Release 2.43.239_1806 -===================== - -* Tests - - - Fixed test site that failed after padding strings with spaces - instead of zeros. - -* Documentation - - - Documented ``intent(inplace)`` attribute. - - Documented ``intent(callback)`` attribute. - - Updated FAQ, added Users Feedback section. - -* cfuncs.py - - - Padding longer (than provided from Python side) strings with spaces - (that is Fortran behavior) instead of nulls (that is C strncpy behavior). - -* f90mod_rules.py - - - Undoing rmbadnames in Python and Fortran layers. - -* common_rules.py - - - Renaming common block items that have names identical to C keywords. - - Fixed wrapping blank common blocks. - -* fortranobject.h - - - Updated numarray (0.9, 1.0, 1.1) support (patch by Todd Miller). - -* fortranobject.c - - - Introduced ``intent(inplace)`` feature. - - Fix numarray reference counts (patch by Todd). - - Updated numarray (0.9, 1.0, 1.1) support (patch by Todd Miller). - - Enabled F2PY_REPORT_ON_ARRAY_COPY for Numarray. - -* capi_maps.py - - - Always normalize .f2py_f2cmap keys to lower case. - -* rules.py - - - Disabled ``index`` macro as it conflicts with the one defined - in string.h. - - Moved ``externroutines`` up to make it visible to ``usercode``. - - Fixed bug in f90 code generation: no empty line continuation is - allowed. - - Fixed undefined symbols failure when ``fortranname`` is used - to rename a wrapped function. - - Support for ``entry`` statement. - -* auxfuncs.py - - - Made is* functions more robust with respect to parameters that - have no typespec specified. - - Using ``size_t`` instead of ``int`` as the type of string - length. Fixes issues on 64-bit platforms. - -* setup.py - - - Fixed bug of installing ``f2py`` script as ``.exe`` file. - -* f2py2e.py - - - ``--compiler=`` and ``--fcompiler=`` can be specified at the same time. - -* crackfortran.py - - - Fixed dependency detection for non-intent(in|inout|inplace) arguments. - They must depend on their dimensions, not vice-versa. - - Don't match ``!!f2py`` as a start of f2py directive. - - Only effective intent attributes will be output to ``-h`` target. - - Introduced ``intent(callback)`` to build interface between Python - functions and Fortran external routines. - - Avoid including external arguments to __user__ modules. - - Initial hooks to evaluate ``kind`` and ``selected_int_kind``. - - Evaluating parameters in {char,kind}selectors and applying rmbadname. - - Evaluating parameters using also module parameters. Fixed the order - of parameter evaluation. - - Fixed silly bug: when block name was not lower cased, it was not - recognized correctly. - - Applying mapping '.false.'->'False', '.true.'->'True' to logical - parameters. TODO: Support for logical expressions is needed. - - Added support for multiple statements in one line (separated with semicolon). - - Impl. get_useparameters function for using parameter values from - other f90 modules. - - Applied Bertholds patch to fix bug in evaluating expressions - like ``1.d0/dvar``. - - Fixed bug in reading string parameters. - - Evaluating parameters in charselector. Code cleanup. - - Using F90 module parameters to resolve kindselectors. - - Made the evaluation of module data init-expression more robust. - - Support for ``entry`` statement. - - Fixed ``determineexprtype`` that in the case of parameters - returned non-dictionary objects. - - Use ``-*- fix -*-`` to specify that a file is in fixed format. - -Release 2.39.235_1693 -===================== - -* fortranobject.{h,c} - - - Support for allocatable string arrays. - -* cfuncs.py - - - Call-back arguments can now be also instances that have ``__call__`` method - as well as instance methods. - -* f2py2e.py - - - Introduced ``--include_paths ::..`` command line - option. - - Added ``--compiler=`` support to change the C/C++ compiler from - f2py command line. - -* capi_maps.py - - - Handle ``XDY`` parameter constants. - -* crackfortran.py - - - Handle ``XDY`` parameter constants. - - - Introduced formatpattern to workaround a corner case where reserved - keywords are used in format statement. Other than that, format pattern - has no use. - - - Parameters are now fully evaluated. - -* More splitting of documentation strings. - -* func2subr.py - fixed bug for function names that f77 compiler - would set ``integer`` type. - -Release 2.39.235_1660 -===================== - -* f2py2e.py - - - Fixed bug in using --f90flags=.. - -* f90mod_rules.py - - - Splitted generated documentation strings (to avoid MSVC issue when - string length>2k) - - - Ignore ``private`` module data. - -Release 2.39.235_1644 -===================== - -:Date:24 February 2004 - -* Character arrays: - - - Finished complete support for character arrays and arrays of strings. - - ``character*n a(m)`` is treated like ``character a(m,n)`` with ``intent(c)``. - - Character arrays are now considered as ordinary arrays (not as arrays - of strings which actually didn't work). - -* docs - - - Initial f2py manpage file f2py.1. - - Updated usersguide and other docs when using numpy_distutils 0.2.2 - and up. - -* capi_maps.py - - - Try harder to use .f2py_f2cmap mappings when kind is used. - -* crackfortran.py - - - Included files are first search in the current directory and - then from the source file directory. - - Ignoring dimension and character selector changes. - - Fixed bug in Fortran 90 comments of fixed format. - - Warn when .pyf signatures contain undefined symbols. - - Better detection of source code formats. Using ``-*- fortran -*-`` - or ``-*- f90 -*-`` in the first line of a Fortran source file is - recommended to help f2py detect the format, fixed or free, - respectively, correctly. - -* cfuncs.py - - - Fixed intent(inout) scalars when typecode=='l'. - - Fixed intent(inout) scalars when not using numarray. - - Fixed intent(inout) scalars when using numarray. - -* diagnose.py - - - Updated for numpy_distutils 0.2.2 and up. - - Added numarray support to diagnose. - -* fortranobject.c - - - Fixed nasty bug with intent(in,copy) complex slice arrays. - - Applied Todd's patch to support numarray's byteswapped or - misaligned arrays, requires numarray-0.8 or higher. - -* f2py2e.py - - - Applying new hooks for numpy_distutils 0.2.2 and up, keeping - backward compatibility with depreciation messages. - - Using always os.system on non-posix platforms in f2py2e.compile - function. - -* rules.py - - - Changed the order of buildcallback and usercode junks. - -* setup.cfg - - - Added so that docs/ and tests/ directories are included to RPMs. - -* setup.py - - - Installing f2py.py instead of f2py.bat under NT. - - Introduced ``--with-numpy_distutils`` that is useful when making - f2py tar-ball with numpy_distutils included. - -Release 2.37.233-1545 -===================== - -:Date: 11 September 2003 - -* rules.py - - - Introduced ``interface_usercode`` replacement. When ``usercode`` - statement is used inside the first interface block, its contents - will be inserted at the end of initialization function of a F2PY - generated extension module (feature request: Berthold Höllmann). - - Introduced auxiliary function ``as_column_major_storage`` that - converts input array to an array with column major storage order - (feature request: Hans Petter Langtangen). - -* crackfortran.py - - - Introduced ``pymethoddef`` statement. - -* cfuncs.py - - - Fixed "#ifdef in #define TRYPYARRAYTEMPLATE" bug (patch thanks - to Bernhard Gschaider) - -* auxfuncs.py - - - Introduced ``getpymethod`` function. - - Enabled multi-line blocks in ``callprotoargument`` statement. - -* f90mod_rules.py - - - Undone "Fixed Warning 43 emitted by Intel Fortran compiler" that - causes (curios) segfaults. - -* fortranobject.c - - - Fixed segfaults (that were introduced with recent memory leak - fixes) when using allocatable arrays. - - Introduced F2PY_REPORT_ON_ARRAY_COPY CPP macro int-variable. If defined - then a message is printed to stderr whenever a copy of an array is - made and arrays size is larger than F2PY_REPORT_ON_ARRAY_COPY. - -Release 2.35.229-1505 -===================== - -:Date: 5 August 2003 - -* General - - - Introduced ``usercode`` statement (dropped ``c_code`` hooks). - -* setup.py - - - Updated the CVS location of numpy_distutils. - -* auxfuncs.py - - - Introduced ``isint1array(var)`` for fixing ``integer*1 intent(out)`` - support. - -* tests/f77/callback.py - - Introduced some basic tests. - -* src/fortranobject.{c,h} - - - Fixed memory leaks when getting/setting allocatable arrays. - (Bug report by Bernhard Gschaider) - - - Initial support for numarray (Todd Miller's patch). Use -DNUMARRAY - on the f2py command line to enable numarray support. Note that - there is no character arrays support and these hooks are not - tested with F90 compilers yet. - -* cfuncs.py - - - Fixed reference counting bug that appeared when constructing extra - argument list to callback functions. - - Added ``PyArray_LONG != PyArray_INT`` test. - -* f2py2e.py - - Undocumented ``--f90compiler``. - -* crackfortran.py - - - Introduced ``usercode`` statement. - - Fixed newlines when outputting multi-line blocks. - - Optimized ``getlincoef`` loop and ``analyzevars`` for cases where - len(vars) is large. - - Fixed callback string argument detection. - - Fixed evaluating expressions: only int|float expressions are - evaluated succesfully. - -* docs - - Documented -DF2PY_REPORT_ATEXIT feature. - -* diagnose.py - - Added CPU information and sys.prefix printout. - -* tests/run_all.py - - Added cwd to PYTHONPATH. - -* tests/f??/return_{real,complex}.py - - Pass "infinity" check in SunOS. - -* rules.py - - - Fixed ``integer*1 intent(out)`` support - - Fixed free format continuation of f2py generated F90 files. - -* tests/mixed/ - - Introduced tests for mixing Fortran 77, Fortran 90 fixed and free - format codes in one module. - -* f90mod_rules.py - - - Fixed non-prototype warnings. - - Fixed Warning 43 emitted by Intel Fortran compiler. - - Avoid long lines in Fortran codes to reduce possible problems with - continuations of lines. - -Public Release 2.32.225-1419 -============================ - -:Date: 8 December 2002 - -* docs/usersguide/ - - Complete revision of F2PY Users Guide - -* tests/run_all.py - - - New file. A Python script to run all f2py unit tests. - -* Removed files: buildmakefile.py, buildsetup.py. - -* tests/f77/ - - - Added intent(out) scalar tests. - -* f2py_testing.py - - - Introduced. It contains jiffies, memusage, run, cmdline functions - useful for f2py unit tests site. - -* setup.py - - - Install numpy_distutils only if it is missing or is too old - for f2py. - -* f90modrules.py - - - Fixed wrapping f90 module data. - - Fixed wrapping f90 module subroutines. - - Fixed f90 compiler warnings for wrapped functions by using interface - instead of external stmt for functions. - -* tests/f90/ - - - Introduced return_*.py tests. - -* func2subr.py - - - Added optional signature argument to createfuncwrapper. - - In f2pywrappers routines, declare external, scalar, remaining - arguments in that order. Fixes compiler error 'Invalid declaration' - for:: - - real function foo(a,b) - integer b - real a(b) - end - -* crackfortran.py - - - Removed first-line comment information support. - - Introduced multiline block. Currently usable only for - ``callstatement`` statement. - - Improved array length calculation in getarrlen(..). - - "From sky" program group is created only if ``groupcounter<1``. - See TODO.txt. - - Added support for ``dimension(n:*)``, ``dimension(*:n)``. They are - treated as ``dimesnion(*)`` by f2py. - - Fixed parameter substitution (this fixes TODO item by Patrick - LeGresley, 22 Aug 2001). - -* f2py2e.py - - - Disabled all makefile, setup, manifest file generation hooks. - - Disabled --[no]-external-modroutines option. All F90 module - subroutines will have Fortran/C interface hooks. - - --build-dir can be used with -c option. - - only/skip modes can be used with -c option. - - Fixed and documented `-h stdout` feature. - - Documented extra options. - - Introduced --quiet and --verbose flags. - -* cb_rules.py - - - Fixed debugcapi hooks for intent(c) scalar call-back arguments - (bug report: Pierre Schnizer). - - Fixed intent(c) for scalar call-back arguments. - - Improved failure reports. - -* capi_maps.py - - - Fixed complex(kind=..) to C type mapping bug. The following hold - complex==complex(kind=4)==complex*8, complex(kind=8)==complex*16 - - Using signed_char for integer*1 (bug report: Steve M. Robbins). - - Fixed logical*8 function bug: changed its C correspondence to - long_long. - - Fixed memory leak when returning complex scalar. - -* __init__.py - - - Introduced a new function (for f2py test site, but could be useful - in general) ``compile(source[,modulename,extra_args])`` for - compiling fortran source codes directly from Python. - -* src/fortranobject.c - - - Multi-dimensional common block members and allocatable arrays - are returned as Fortran-contiguous arrays. - - Fixed NULL return to Python without exception. - - Fixed memory leak in getattr(,'__doc__'). - - .__doc__ is saved to .__dict__ (previously - it was generated each time when requested). - - Fixed a nasty typo from the previous item that caused data - corruption and occasional SEGFAULTs. - - array_from_pyobj accepts arbitrary rank arrays if the last dimension - is undefined. E.g. dimension(3,*) accepts a(3,4,5) and the result is - array with dimension(3,20). - - Fixed (void*) casts to make g++ happy (bug report: eric). - - Changed the interface of ARR_IS_NULL macro to avoid "``NULL used in - arithmetics``" warnings from g++. - -* src/fortranobject.h - - - Undone previous item. Defining NO_IMPORT_ARRAY for - src/fortranobject.c (bug report: travis) - - Ensured that PY_ARRAY_UNIQUE_SYMBOL is defined only for - src/fortranobject.c (bug report: eric). - -* rules.py - - - Introduced dummy routine feature. - - F77 and F90 wrapper subroutines (if any) as saved to different - files, -f2pywrappers.f and -f2pywrappers2.f90, - respectively. Therefore, wrapping F90 requires numpy_distutils >= - 0.2.0_alpha_2.229. - - Fixed compiler warnings about meaningless ``const void (*f2py_func)(..)``. - - Improved error messages for ``*_from_pyobj``. - - Changed __CPLUSPLUS__ macros to __cplusplus (bug report: eric). - - Changed (void*) casts to (f2py_init_func) (bug report: eric). - - Removed unnecessary (void*) cast for f2py_has_column_major_storage - in f2py_module_methods definition (bug report: eric). - - Changed the interface of f2py_has_column_major_storage function: - removed const from the 1st argument. - -* cfuncs.py - - - Introduced -DPREPEND_FORTRAN. - - Fixed bus error on SGI by using PyFloat_AsDouble when ``__sgi`` is defined. - This seems to be `know bug`__ with Python 2.1 and SGI. - - string_from_pyobj accepts only arrays whos elements size==sizeof(char). - - logical scalars (intent(in),function) are normalized to 0 or 1. - - Removed NUMFROMARROBJ macro. - - (char|short)_from_pyobj now use int_from_pyobj. - - (float|long_double)_from_pyobj now use double_from_pyobj. - - complex_(float|long_double)_from_pyobj now use complex_double_from_pyobj. - - Rewrote ``*_from_pyobj`` to be more robust. This fixes segfaults if - getting * from a string. Note that int_from_pyobj differs - from PyNumber_Int in that it accepts also complex arguments - (takes the real part) and sequences (takes the 1st element). - - Removed unnecessary void* casts in NUMFROMARROBJ. - - Fixed casts in ``*_from_pyobj`` functions. - - Replaced CNUMFROMARROBJ with NUMFROMARROBJ. - -.. __: http://sourceforge.net/tracker/index.php?func=detail&aid=435026&group_id=5470&atid=105470 - -* auxfuncs.py - - - Introduced isdummyroutine(). - - Fixed islong_* functions. - - Fixed isintent_in for intent(c) arguments (bug report: Pierre Schnizer). - - Introduced F2PYError and throw_error. Using throw_error, f2py - rejects illegal .pyf file constructs that otherwise would cause - compilation failures or python crashes. - - Fixed islong_long(logical*8)->True. - - Introduced islogical() and islogicalfunction(). - - Fixed prototype string argument (bug report: eric). - -* Updated README.txt and doc strings. Starting to use docutils. - -* Speed up for ``*_from_pyobj`` functions if obj is a sequence. - -* Fixed SegFault (reported by M.Braun) due to invalid ``Py_DECREF`` - in ``GETSCALARFROMPYTUPLE``. - -Older Releases -============== - -:: - - *** Fixed missing includes when wrapping F90 module data. - *** Fixed typos in docs of build_flib options. - *** Implemented prototype calculator if no callstatement or - callprotoargument statements are used. A warning is issued if - callstatement is used without callprotoargument. - *** Fixed transposing issue with array arguments in callback functions. - *** Removed -pyinc command line option. - *** Complete tests for Fortran 77 functions returning scalars. - *** Fixed returning character bug if --no-wrap-functions. - *** Described how to wrap F compiled Fortran F90 module procedures - with F2PY. See doc/using_F_compiler.txt. - *** Fixed the order of build_flib options when using --fcompiler=... - *** Recognize .f95 and .F95 files as Fortran sources with free format. - *** Cleaned up the output of 'f2py -h': removed obsolete items, - added build_flib options section. - *** Added --help-compiler option: it lists available Fortran compilers - as detected by numpy_distutils/command/build_flib.py. This option - is available only with -c option. - - -:Release: 2.13.175-1250 -:Date: 4 April 2002 - -:: - - *** Fixed copying of non-contigious 1-dimensional arrays bug. - (Thanks to Travis O.). - - -:Release: 2.13.175-1242 -:Date: 26 March 2002 - -:: - - *** Fixed ignoring type declarations. - *** Turned F2PY_REPORT_ATEXIT off by default. - *** Made MAX,MIN macros available by default so that they can be - always used in signature files. - *** Disabled F2PY_REPORT_ATEXIT for FreeBSD. - - -:Release: 2.13.175-1233 -:Date: 13 March 2002 - -:: - - *** Fixed Win32 port when using f2py.bat. (Thanks to Erik Wilsher). - *** F2PY_REPORT_ATEXIT is disabled for MACs. - *** Fixed incomplete dependency calculator. - - -:Release: 2.13.175-1222 -:Date: 3 March 2002 - -:: - - *** Plugged a memory leak for intent(out) arrays with overwrite=0. - *** Introduced CDOUBLE_to_CDOUBLE,.. functions for copy_ND_array. - These cast functions probably work incorrectly in Numeric. - - -:Release: 2.13.175-1212 -:Date: 23 February 2002 - -:: - - *** Updated f2py for the latest numpy_distutils. - *** A nasty bug with multi-dimensional Fortran arrays is fixed - (intent(out) arrays had wrong shapes). (Thanks to Eric for - pointing out this bug). - *** F2PY_REPORT_ATEXIT is disabled by default for __WIN32__. - - -:Release: 2.11.174-1161 -:Date: 14 February 2002 - -:: - - *** Updated f2py for the latest numpy_distutils. - *** Fixed raise error when f2py missed -m flag. - *** Script name `f2py' now depends on the name of python executable. - For example, `python2.2 setup.py install' will create a f2py - script with a name `f2py2.2'. - *** Introduced 'callprotoargument' statement so that proper prototypes - can be declared. This is crucial when wrapping C functions as it - will fix segmentation faults when these wrappers use non-pointer - arguments (thanks to R. Clint Whaley for explaining this to me). - Note that in f2py generated wrapper, the prototypes have - the following forms: - extern #rtype# #fortranname#(#callprotoargument#); - or - extern #rtype# F_FUNC(#fortranname#,#FORTRANNAME#)(#callprotoargument#); - *** Cosmetic fixes to F2PY_REPORT_ATEXIT feature. - - -:Release: 2.11.174-1146 -:Date: 3 February 2002 - -:: - - *** Reviewed reference counting in call-back mechanism. Fixed few bugs. - *** Enabled callstatement for complex functions. - *** Fixed bug with initializing capi_overwrite_ - *** Introduced intent(overwrite) that is similar to intent(copy) but - has opposite effect. Renamed copy_=1 to overwrite_=0. - intent(overwrite) will make default overwrite_=1. - *** Introduced intent(in|inout,out,out=) attribute that renames - arguments name when returned. This renaming has effect only in - documentation strings. - *** Introduced 'callstatement' statement to pyf file syntax. With this - one can specify explicitly how wrapped function should be called - from the f2py generated module. WARNING: this is a dangerous feature - and should be used with care. It is introduced to provide a hack - to construct wrappers that may have very different signature - pattern from the wrapped function. Currently 'callstatement' can - be used only inside a subroutine or function block (it should be enough - though) and must be only in one continuous line. The syntax of the - statement is: callstatement ; - - -:Release: 2.11.174 -:Date: 18 January 2002 - -:: - - *** Fixed memory-leak for PyFortranObject. - *** Introduced extra keyword argument copy_ for intent(copy) - variables. It defaults to 1 and forces to make a copy for - intent(in) variables when passing on to wrapped functions (in case - they undesirably change the variable in-situ). - *** Introduced has_column_major_storage member function for all f2py - generated extension modules. It is equivalent to Python call - 'transpose(obj).iscontiguous()' but very efficient. - *** Introduced -DF2PY_REPORT_ATEXIT. If this is used when compiling, - a report is printed to stderr as python exits. The report includes - the following timings: - 1) time spent in all wrapped function calls; - 2) time spent in f2py generated interface around the wrapped - functions. This gives a hint whether one should worry - about storing data in proper order (C or Fortran). - 3) time spent in Python functions called by wrapped functions - through call-back interface. - 4) time spent in f2py generated call-back interface. - For now, -DF2PY_REPORT_ATEXIT is enabled by default. Use - -DF2PY_REPORT_ATEXIT_DISABLE to disable it (I am not sure if - Windows has needed tools, let me know). - Also, I appreciate if you could send me the output of 'F2PY - performance report' (with CPU and platform information) so that I - could optimize f2py generated interfaces for future releases. - *** Extension modules can be linked with dmalloc library. Use - -DDMALLOC when compiling. - *** Moved array_from_pyobj to fortranobject.c. - *** Usage of intent(inout) arguments is made more strict -- only - with proper type contiguous arrays are accepted. In general, - you should avoid using intent(inout) attribute as it makes - wrappers of C and Fortran functions asymmetric. I recommend using - intent(in,out) instead. - *** intent(..) has new keywords: copy,cache. - intent(copy,in) - forces a copy of an input argument; this - may be useful for cases where the wrapped function changes - the argument in situ and this may not be desired side effect. - Otherwise, it is safe to not use intent(copy) for the sake - of a better performance. - intent(cache,hide|optional) - just creates a junk of memory. - It does not care about proper storage order. Can be also - intent(in) but then the corresponding argument must be a - contiguous array with a proper elsize. - *** intent(c) can be used also for subroutine names so that - -DNO_APPEND_FORTRAN can be avoided for C functions. - - *** IMPORTANT BREAKING GOOD ... NEWS!!!: - - From now on you don't have to worry about the proper storage order - in multi-dimensional arrays that was earlier a real headache when - wrapping Fortran functions. Now f2py generated modules take care - of the proper conversations when needed. I have carefully designed - and optimized this interface to avoid any unnecessary memory usage - or copying of data. However, it is wise to use input arrays that - has proper storage order: for C arguments it is row-major and for - Fortran arguments it is column-major. But you don't need to worry - about that when developing your programs. The optimization of - initializing the program with proper data for possibly better - memory usage can be safely postponed until the program is working. - - This change also affects the signatures in .pyf files. If you have - created wrappers that take multi-dimensional arrays in arguments, - it is better to let f2py re-generate these files. Or you have to - manually do the following changes: reverse the axes indices in all - 'shape' macros. For example, if you have defined an array A(n,m) - and n=shape(A,1), m=shape(A,0) then you must change the last - statements to n=shape(A,0), m=shape(A,1). - - -:Release: 2.8.172 -:Date: 13 January 2002 - -:: - - *** Fixed -c process. Removed pyf_extensions function and pyf_file class. - *** Reorganized setup.py. It generates f2py or f2py.bat scripts - depending on the OS and the location of the python executable. - *** Started to use update_version from numpy_distutils that makes - f2py startup faster. As a side effect, the version number system - changed. - *** Introduced test-site/test_f2py2e.py script that runs all - tests. - *** Fixed global variables initialization problem in crackfortran - when run_main is called several times. - *** Added 'import Numeric' to C/API init function. - *** Fixed f2py.bat in setup.py. - *** Switched over to numpy_distutils and dropped fortran_support. - *** On Windows create f2py.bat file. - *** Introduced -c option: read fortran or pyf files, construct extension - modules, build, and save them to current directory. - In one word: do-it-all-in-one-call. - *** Introduced pyf_extensions(sources,f2py_opts) function. It simplifies - the extension building process considerably. Only for internal use. - *** Converted tests to use numpy_distutils in order to improve portability: - a,b,c - *** f2py2e.run_main() returns a pyf_file class instance containing - information about f2py generated files. - *** Introduced `--build-dir ' command line option. - *** Fixed setup.py for bdist_rpm command. - *** Added --numpy-setup command line option. - *** Fixed crackfortran that did not recognized capitalized type - specification with --no-lower flag. - *** `-h stdout' writes signature to stdout. - *** Fixed incorrect message for check() with empty name list. - - -:Release: 2.4.366 -:Date: 17 December 2001 - -:: - - *** Added command line option --[no-]manifest. - *** `make test' should run on Windows, but the results are not truthful. - *** Reorganized f2py2e.py a bit. Introduced run_main(comline_list) function - that can be useful when running f2py from another Python module. - *** Removed command line options -f77,-fix,-f90 as the file format - is determined from the extension of the fortran file - or from its header (first line starting with `!%' and containing keywords - free, fix, or f77). The later overrides the former one. - *** Introduced command line options --[no-]makefile,--[no-]latex-doc. - Users must explicitly use --makefile,--latex-doc if Makefile-, - module.tex is desired. --setup is default. Use --no-setup - to disable setup_.py generation. --overwrite-makefile - will set --makefile. - *** Added `f2py_rout_' to #capiname# in rules.py. - *** intent(...) statement with empty namelist forces intent(...) attribute for - all arguments. - *** Dropped DL_IMPORT and DL_EXPORT in fortranobject.h. - *** Added missing PyFortran_Type.ob_type initialization. - *** Added gcc-3.0 support. - *** Raising non-existing/broken Numeric as a FatalError exception. - *** Fixed Python 2.x specific += construct in fortran_support.py. - *** Fixed copy_ND_array for 1-rank arrays that used to call calloc(0,..) - and caused core dump with a non-gcc compiler (Thanks to Pierre Schnizer - for reporting this bug). - *** Fixed "warning: variable `..' might be clobbered by `longjmp' or `vfork'": - - Reorganized the structure of wrapper functions to get rid of - `goto capi_fail' statements that caused the above warning. - - -:Release: 2.3.343 -:Date: 12 December 2001 - -:: - - *** Issues with the Win32 support (thanks to Eric Jones and Tiffany Kamm): - - Using DL_EXPORT macro for init#modulename#. - - Changed PyObject_HEAD_INIT(&PyType_Type) to PyObject_HEAD_INIT(0). - - Initializing #name#_capi=NULL instead of Py_None in cb hooks. - *** Fixed some 'warning: function declaration isn't a prototype', mainly - in fortranobject.{c,h}. - *** Fixed 'warning: missing braces around initializer'. - *** Fixed reading a line containing only a label. - *** Fixed nonportable 'cp -fv' to shutil.copy in f2py2e.py. - *** Replaced PyEval_CallObject with PyObject_CallObject in cb_rules. - *** Replaced Py_DECREF with Py_XDECREF when freeing hidden arguments. - (Reason: Py_DECREF caused segfault when an error was raised) - *** Impl. support for `include "file"' (in addition to `include 'file'') - *** Fixed bugs (buildsetup.py missing in Makefile, in generated MANIFEST.in) - - -:Release: 2.3.327 -:Date: 4 December 2001 - -:: - - *** Sending out the third public release of f2py. - *** Support for Intel(R) Fortran Compiler (thanks to Patrick LeGresley). - *** Introduced `threadsafe' statement to pyf-files (or to be used with - the 'f2py' directive in fortran codes) to force - Py_BEGIN|END_ALLOW_THREADS block around the Fortran subroutine - calling statement in Python C/API. `threadsafe' statement has - an effect only inside a subroutine block. - *** Introduced `fortranname ' statement to be used only within - pyf-files. This is useful when the wrapper (Python C/API) function - has different name from the wrapped (Fortran) function. - *** Introduced `intent(c)' directive and statement. It is useful when - wrapping C functions. Use intent(c) for arguments that are - scalars (not pointers) or arrays (with row-ordering of elements). - - -:Release: 2.3.321 -:Date: 3 December 2001 - -:: - - *** f2py2e can be installed using distutils (run `python setup.py install'). - *** f2py builds setup_.py. Use --[no-]setup to control this - feature. setup_.py uses fortran_support module (from SciPy), - but for your convenience it is included also with f2py as an additional - package. Note that it has not as many compilers supported as with - using Makefile-, but new compilers should be added to - fortran_support module, not to f2py2e package. - *** Fixed some compiler warnings about else statements. - diff --git a/numpy/f2py/docs/OLDNEWS.txt b/numpy/f2py/docs/OLDNEWS.txt deleted file mode 100644 index 401d2dcee..000000000 --- a/numpy/f2py/docs/OLDNEWS.txt +++ /dev/null @@ -1,63 +0,0 @@ - -.. topic:: Old F2PY NEWS - - March 30, 2004 - F2PY bug fix release (version 2.39.235-1693). Two new command line switches: - ``--compiler`` and ``--include_paths``. Support for allocatable string arrays. - Callback arguments may now be arbitrary callable objects. Win32 installers - for F2PY and Scipy_core are provided. - `Differences with the previous release (version 2.37.235-1660)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.98&r2=1.87&f=h - - - March 9, 2004 - F2PY bug fix release (version 2.39.235-1660). - `Differences with the previous release (version 2.37.235-1644)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.87&r2=1.83&f=h - - February 24, 2004 - Latest F2PY release (version 2.39.235-1644). - Support for numpy_distutils 0.2.2 and up (e.g. compiler flags can be - changed via f2py command line options). Implemented support for - character arrays and arrays of strings (e.g. ``character*(*) a(m,..)``). - *Important bug fixes regarding complex arguments, upgrading is - highly recommended*. Documentation updates. - `Differences with the previous release (version 2.37.233-1545)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.83&r2=1.58&f=h - - September 11, 2003 - Latest F2PY release (version 2.37.233-1545). - New statements: ``pymethoddef`` and ``usercode`` in interface blocks. - New function: ``as_column_major_storage``. - New CPP macro: ``F2PY_REPORT_ON_ARRAY_COPY``. - Bug fixes. - `Differences with the previous release (version 2.35.229-1505)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.58&r2=1.49&f=h - - August 2, 2003 - Latest F2PY release (version 2.35.229-1505). - `Differences with the previous release (version 2.32.225-1419)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.49&r2=1.28&f=h - - April 2, 2003 - Initial support for Numarray_ (thanks to Todd Miller). - - December 8, 2002 - Sixth public release of F2PY (version 2.32.225-1419). Comes with - revised `F2PY Users Guide`__, `new testing site`__, lots of fixes - and other improvements, see `HISTORY.txt`_ for details. - - __ usersguide/index.html - __ TESTING.txt_ - -.. References - ========== - -.. _HISTORY.txt: HISTORY.html -.. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray -.. _TESTING.txt: TESTING.html \ No newline at end of file diff --git a/numpy/f2py/docs/README.txt b/numpy/f2py/docs/README.txt deleted file mode 100644 index cec8a6ec0..000000000 --- a/numpy/f2py/docs/README.txt +++ /dev/null @@ -1,461 +0,0 @@ -.. -*- rest -*- - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - F2PY: Fortran to Python interface generator -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:Author: Pearu Peterson -:License: NumPy License -:Web-site: http://cens.ioc.ee/projects/f2py2e/ -:Discussions to: `f2py-users mailing list`_ -:Documentation: `User's Guide`__, FAQ__ -:Platforms: All -:Date: $Date: 2005/01/30 18:54:53 $ - -.. _f2py-users mailing list: http://cens.ioc.ee/mailman/listinfo/f2py-users/ -__ usersguide/index.html -__ FAQ.html - -------------------------------- - -.. topic:: NEWS!!! - - January 5, 2006 - - WARNING -- these notes are out of date! The package structure for NumPy and - SciPy has changed considerably. Much of this information is now incorrect. - - January 30, 2005 - - Latest F2PY release (version 2.45.241_1926). - New features: wrapping unsigned integers, support for ``.pyf.src`` template files, - callback arguments can now be CObjects, fortran objects, built-in functions. - Introduced ``intent(aux)`` attribute. Wrapped objects have ``_cpointer`` - attribute holding C pointer to wrapped functions or variables. - Many bug fixes and improvements, updated documentation. - `Differences with the previous release (version 2.43.239_1831)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.163&r2=1.137&f=h - - October 4, 2004 - F2PY bug fix release (version 2.43.239_1831). - Better support for 64-bit platforms. - Introduced ``--help-link`` and ``--link-`` options. - Bug fixes. - `Differences with the previous release (version 2.43.239_1806)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.137&r2=1.131&f=h - - September 25, 2004 - Latest F2PY release (version 2.43.239_1806). - Support for ``ENTRY`` statement. New attributes: - ``intent(inplace)``, ``intent(callback)``. Supports Numarray 1.1. - Introduced ``-*- fix -*-`` header content. Improved ``PARAMETER`` support. - Documentation updates. `Differences with the previous release - (version 2.39.235-1693)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.131&r2=1.98&f=h - - `History of NEWS`__ - - __ OLDNEWS.html - -------------------------------- - -.. Contents:: - -============== - Introduction -============== - -The purpose of the F2PY --*Fortran to Python interface generator*-- -project is to provide connection between Python_ and Fortran -languages. F2PY is a Python extension tool for creating Python C/API -modules from (handwritten or F2PY generated) signature files (or -directly from Fortran sources). The generated extension modules -facilitate: - -* Calling Fortran 77/90/95, Fortran 90/95 module, and C functions from - Python. - -* Accessing Fortran 77 ``COMMON`` blocks and Fortran 90/95 module - data (including allocatable arrays) from Python. - -* Calling Python functions from Fortran or C (call-backs). - -* Automatically handling the difference in the data storage order of - multi-dimensional Fortran and Numerical Python (i.e. C) arrays. - -In addition, F2PY can build the generated extension modules to shared -libraries with one command. F2PY uses the ``numpy_distutils`` module -from SciPy_ that supports number of major Fortran compilers. - -.. - (see `COMPILERS.txt`_ for more information). - -F2PY generated extension modules depend on NumPy_ package that -provides fast multi-dimensional array language facility to Python. - - ---------------- - Main features ---------------- - -Here follows a more detailed list of F2PY features: - -* F2PY scans real Fortran codes to produce the so-called signature - files (.pyf files). The signature files contain all the information - (function names, arguments and their types, etc.) that is needed to - construct Python bindings to Fortran (or C) functions. - - The syntax of signature files is borrowed from the - Fortran 90/95 language specification and has some F2PY specific - extensions. The signature files can be modified to dictate how - Fortran (or C) programs are called from Python: - - + F2PY solves dependencies between arguments (this is relevant for - the order of initializing variables in extension modules). - - + Arguments can be specified to be optional or hidden that - simplifies calling Fortran programs from Python considerably. - - + In principle, one can design any Python signature for a given - Fortran function, e.g. change the order arguments, introduce - auxiliary arguments, hide the arguments, process the arguments - before passing to Fortran, return arguments as output of F2PY - generated functions, etc. - -* F2PY automatically generates __doc__ strings (and optionally LaTeX - documentation) for extension modules. - -* F2PY generated functions accept arbitrary (but sensible) Python - objects as arguments. The F2PY interface automatically takes care of - type-casting and handling of non-contiguous arrays. - -* The following Fortran constructs are recognized by F2PY: - - + All basic Fortran types:: - - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ] - integer*([ -1 | -2 | -4 | -8 ]) - character[ | *(*) | *1 | *2 | *3 | ... ] - real[ | *4 | *8 | *16 ], double precision - complex[ | *8 | *16 | *32 ] - - Negative ``integer`` kinds are used to wrap unsigned integers. - - + Multi-dimensional arrays of all basic types with the following - dimension specifications:: - - | : | * | : - - + Attributes and statements:: - - intent([ in | inout | out | hide | in,out | inout,out | c | - copy | cache | callback | inplace | aux ]) - dimension() - common, parameter - allocatable - optional, required, external - depend([]) - check([]) - note() - usercode, callstatement, callprotoargument, threadsafe, fortranname - pymethoddef - entry - -* Because there are only little (and easily handleable) differences - between calling C and Fortran functions from F2PY generated - extension modules, then F2PY is also well suited for wrapping C - libraries to Python. - -* Practice has shown that F2PY generated interfaces (to C or Fortran - functions) are less error prone and even more efficient than - handwritten extension modules. The F2PY generated interfaces are - easy to maintain and any future optimization of F2PY generated - interfaces transparently apply to extension modules by just - regenerating them with the latest version of F2PY. - -* `F2PY Users Guide and Reference Manual`_ - - -=============== - Prerequisites -=============== - -F2PY requires the following software installed: - -* Python_ (versions 1.5.2 or later; 2.1 and up are recommended). - You must have python-dev package installed. -* NumPy_ (versions 13 or later; 20.x, 21.x, 22.x, 23.x are recommended) -* Numarray_ (version 0.9 and up), optional, partial support. -* Scipy_distutils (version 0.2.2 and up are recommended) from SciPy_ - project. Get it from Scipy CVS or download it below. - -Python 1.x users also need distutils_. - -Of course, to build extension modules, you'll need also working C -and/or Fortran compilers installed. - -========== - Download -========== - -You can download the sources for the latest F2PY and numpy_distutils -releases as: - -* `2.x`__/`F2PY-2-latest.tar.gz`__ -* `2.x`__/`numpy_distutils-latest.tar.gz`__ - -Windows users might be interested in Win32 installer for F2PY and -Scipy_distutils (these installers are built using Python 2.3): - -* `2.x`__/`F2PY-2-latest.win32.exe`__ -* `2.x`__/`numpy_distutils-latest.win32.exe`__ - -Older releases are also available in the directories -`rel-0.x`__, `rel-1.x`__, `rel-2.x`__, `rel-3.x`__, `rel-4.x`__, `rel-5.x`__, -if you need them. - -.. __: 2.x/ -.. __: 2.x/F2PY-2-latest.tar.gz -.. __: 2.x/ -.. __: 2.x/numpy_distutils-latest.tar.gz -.. __: 2.x/ -.. __: 2.x/F2PY-2-latest.win32.exe -.. __: 2.x/ -.. __: 2.x/numpy_distutils-latest.win32.exe -.. __: rel-0.x -.. __: rel-1.x -.. __: rel-2.x -.. __: rel-3.x -.. __: rel-4.x -.. __: rel-5.x - -Development version of F2PY from CVS is available as `f2py2e.tar.gz`__. - -__ http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/f2py2e.tar.gz?tarball=1 - -Debian Sid users can simply install ``python-f2py`` package. - -============== - Installation -============== - -Unpack the source file, change to directrory ``F2PY-?-???/`` and run -(you may need to become a root):: - - python setup.py install - -The F2PY installation installs a Python package ``f2py2e`` to your -Python ``site-packages`` directory and a script ``f2py`` to your -Python executable path. - -See also Installation__ section in `F2PY FAQ`_. - -.. __: FAQ.html#installation - -Similarly, to install ``numpy_distutils``, unpack its tar-ball and run:: - - python setup.py install - -======= - Usage -======= - -To check if F2PY is installed correctly, run -:: - - f2py - -without any arguments. This should print out the usage information of -the ``f2py`` program. - -Next, try out the following three steps: - -1) Create a Fortran file `hello.f`__ that contains:: - - C File hello.f - subroutine foo (a) - integer a - print*, "Hello from Fortran!" - print*, "a=",a - end - -__ hello.f - -2) Run - - :: - - f2py -c -m hello hello.f - - This will build an extension module ``hello.so`` (or ``hello.sl``, - or ``hello.pyd``, etc. depending on your platform) into the current - directory. - -3) Now in Python try:: - - >>> import hello - >>> print hello.__doc__ - >>> print hello.foo.__doc__ - >>> hello.foo(4) - Hello from Fortran! - a= 4 - >>> - -If the above works, then you can try out more thorough -`F2PY unit tests`__ and read the `F2PY Users Guide and Reference Manual`_. - -__ FAQ.html#q-how-to-test-if-f2py-is-working-correctly - -=============== - Documentation -=============== - -The documentation of the F2PY project is collected in ``f2py2e/docs/`` -directory. It contains the following documents: - -`README.txt`_ (in CVS__) - The first thing to read about F2PY -- this document. - -__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/README.txt?rev=HEAD&content-type=text/x-cvsweb-markup - -`usersguide/index.txt`_, `usersguide/f2py_usersguide.pdf`_ - F2PY Users Guide and Reference Manual. Contains lots of examples. - -`FAQ.txt`_ (in CVS__) - F2PY Frequently Asked Questions. - -__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/FAQ.txt?rev=HEAD&content-type=text/x-cvsweb-markup - -`TESTING.txt`_ (in CVS__) - About F2PY testing site. What tests are available and how to run them. - -__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/TESTING.txt?rev=HEAD&content-type=text/x-cvsweb-markup - -`HISTORY.txt`_ (in CVS__) - A list of latest changes in F2PY. This is the most up-to-date - document on F2PY. - -__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup - -`THANKS.txt`_ - Acknowledgments. - -.. - `COMPILERS.txt`_ - Compiler and platform specific notes. - -=============== - Mailing list -=============== - -A mailing list f2py-users@cens.ioc.ee is open for F2PY releated -discussion/questions/etc. - -* `Subscribe..`__ -* `Archives..`__ - -__ http://cens.ioc.ee/mailman/listinfo/f2py-users -__ http://cens.ioc.ee/pipermail/f2py-users - - -===== - CVS -===== - -F2PY is being developed under CVS_. The CVS version of F2PY can be -obtained as follows: - -1) First you need to login (the password is ``guest``):: - - cvs -d :pserver:anonymous@cens.ioc.ee:/home/cvs login - -2) and then do the checkout:: - - cvs -z6 -d :pserver:anonymous@cens.ioc.ee:/home/cvs checkout f2py2e - -3) You can update your local F2PY tree ``f2py2e/`` by executing:: - - cvs -z6 update -P -d - -You can browse the `F2PY CVS`_ repository. - -=============== - Contributions -=============== - -* `A short introduction to F2PY`__ by Pierre Schnizer. - -* `F2PY notes`__ by Fernando Perez. - -* `Debian packages of F2PY`__ by José Fonseca. [OBSOLETE, Debian Sid - ships python-f2py package] - -__ http://fubphpc.tu-graz.ac.at/~pierre/f2py_tutorial.tar.gz -__ http://cens.ioc.ee/pipermail/f2py-users/2003-April/000472.html -__ http://jrfonseca.dyndns.org/debian/ - - -=============== - Related sites -=============== - -* `Numerical Python`_ -- adds a fast array facility to the Python language. -* Pyfort_ -- A Python-Fortran connection tool. -* SciPy_ -- An open source library of scientific tools for Python. -* `Scientific Python`_ -- A collection of Python modules that are - useful for scientific computing. -* `The Fortran Company`_ -- A place to find products, services, and general - information related to the Fortran programming language. -* `American National Standard Programming Language FORTRAN ANSI(R) X3.9-1978`__ -* `J3`_ -- The US Fortran standards committee. -* SWIG_ -- A software development tool that connects programs written - in C and C++ with a variety of high-level programming languages. -* `Mathtools.net`_ -- A technical computing portal for all scientific - and engineering needs. - -.. __: http://www.fortran.com/fortran/F77_std/rjcnf.html - -.. References - ========== - - -.. _F2PY Users Guide and Reference Manual: usersguide/index.html -.. _usersguide/index.txt: usersguide/index.html -.. _usersguide/f2py_usersguide.pdf: usersguide/f2py_usersguide.pdf -.. _README.txt: README.html -.. _COMPILERS.txt: COMPILERS.html -.. _F2PY FAQ: -.. _FAQ.txt: FAQ.html -.. _HISTORY.txt: HISTORY.html -.. _HISTORY.txt from CVS: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup -.. _THANKS.txt: THANKS.html -.. _TESTING.txt: TESTING.html -.. _F2PY CVS2: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/ -.. _F2PY CVS: http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/ - -.. _CVS: http://www.cvshome.org/ -.. _Python: http://www.python.org/ -.. _SciPy: http://www.numpy.org/ -.. _NumPy: http://www.numpy.org/ -.. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray -.. _docutils: http://docutils.sourceforge.net/ -.. _distutils: http://www.python.org/sigs/distutils-sig/ -.. _Numerical Python: http://www.numpy.org/ -.. _Pyfort: http://pyfortran.sourceforge.net/ -.. _Scientific Python: - http://starship.python.net/crew/hinsen/scientific.html -.. _The Fortran Company: http://www.fortran.com/fortran/ -.. _J3: http://www.j3-fortran.org/ -.. _Mathtools.net: http://www.mathtools.net/ -.. _SWIG: http://www.swig.org/ - -.. - Local Variables: - mode: indented-text - indent-tabs-mode: nil - sentence-end-double-space: t - fill-column: 70 - End: diff --git a/numpy/f2py/docs/TESTING.txt b/numpy/f2py/docs/TESTING.txt deleted file mode 100644 index d90521175..000000000 --- a/numpy/f2py/docs/TESTING.txt +++ /dev/null @@ -1,108 +0,0 @@ - -======================================================= - F2PY unit testing site -======================================================= - -.. Contents:: - -Tests ------ - -* To run all F2PY unit tests in one command:: - - cd tests - python run_all.py [] - - For example:: - - localhost:~/src_cvs/f2py2e/tests$ python2.2 run_all.py 100 --quiet - ********************************************** - Running '/usr/bin/python2.2 f77/return_integer.py 100 --quiet' - run 1000 tests in 1.87 seconds - initial virtual memory size: 3952640 bytes - current virtual memory size: 3952640 bytes - ok - ********************************************** - Running '/usr/bin/python2.2 f77/return_logical.py 100 --quiet' - run 1000 tests in 1.47 seconds - initial virtual memory size: 3952640 bytes - current virtual memory size: 3952640 bytes - ok - ... - - If some tests fail, try to run the failing tests separately (without - the ``--quiet`` option) as described below to get more information - about the failure. - -* Test intent(in), intent(out) scalar arguments, - scalars returned by F77 functions - and F90 module functions:: - - tests/f77/return_integer.py - tests/f77/return_real.py - tests/f77/return_logical.py - tests/f77/return_complex.py - tests/f77/return_character.py - tests/f90/return_integer.py - tests/f90/return_real.py - tests/f90/return_logical.py - tests/f90/return_complex.py - tests/f90/return_character.py - - Change to tests/ directory and run:: - - python f77/return_.py [] - python f90/return_.py [] - - where ```` is integer, real, logical, complex, or character. - Test scripts options are described below. - - A test is considered succesful if the last printed line is "ok". - - If you get import errors like:: - - ImportError: No module named f77_ext_return_integer - - but ``f77_ext_return_integer.so`` exists in the current directory then - it means that the current directory is not included in to `sys.path` - in your Python installation. As a fix, prepend ``.`` to ``PYTHONPATH`` - environment variable and rerun the tests. For example:: - - PYTHONPATH=. python f77/return_integer.py - -* Test mixing Fortran 77, Fortran 90 fixed and free format codes:: - - tests/mixed/run.py - -* Test basic callback hooks:: - - tests/f77/callback.py - -Options -------- - -You may want to use the following options when running the test -scripts: - -```` - Run tests ```` times. Useful for detecting memory leaks. Under - Linux tests scripts output virtual memory size state of the process - before and after calling the wrapped functions. - -``--quiet`` - Suppress all messages. On success only "ok" should be displayed. - -``--fcompiler=`` - Use:: - - f2py -c --help-fcompiler - - to find out what compilers are available (or more precisely, which - ones are recognized by ``numpy_distutils``). - -Reporting failures ------------------- - -XXX: (1) make sure that failures are due to f2py and (2) send full -stdout/stderr messages to me. Also add compiler,python,platform -information. diff --git a/numpy/f2py/docs/THANKS.txt b/numpy/f2py/docs/THANKS.txt deleted file mode 100644 index 0a3f0b9d6..000000000 --- a/numpy/f2py/docs/THANKS.txt +++ /dev/null @@ -1,63 +0,0 @@ - -================= - Acknowledgments -================= - -F2PY__ is an open source Python package and command line tool developed and -maintained by Pearu Peterson (me__). - -.. __: http://cens.ioc.ee/projects/f2py2e/ -.. __: http://cens.ioc.ee/~pearu/ - -Many people have contributed to the F2PY project in terms of interest, -encouragement, suggestions, criticism, bug reports, code -contributions, and keeping me busy with developing F2PY. For all that -I thank - - James Amundson, John Barnard, David Beazley, Frank Bertoldi, Roman - Bertle, James Boyle, Moritz Braun, Rolv Erlend Bredesen, John - Chaffer, Fred Clare, Adam Collard, Ben Cornett, Jose L Gomez Dans, - Jaime D. Perea Duarte, Paul F Dubois, Thilo Ernst, Bonilla Fabian, - Martin Gelfand, Eduardo A. Gonzalez, Siegfried Gonzi, Bernhard - Gschaider, Charles Doutriaux, Jeff Hagelberg, Janko Hauser, Thomas - Hauser, Heiko Henkelmann, William Henney, Yueqiang Huang, Asim - Hussain, Berthold Höllmann, Vladimir Janku, Henk Jansen, Curtis - Jensen, Eric Jones, Tiffany Kamm, Andrey Khavryuchenko, Greg - Kochanski, Jochen Küpper, Simon Lacoste-Julien, Tim Lahey, Hans - Petter Langtangen, Jeff Layton, Matthew Lewis, Patrick LeGresley, - Joaquim R R A Martins, Paul Magwene Lionel Maziere, Craig McNeile, - Todd Miller, David C. Morrill, Dirk Muders, Kevin Mueller, Andrew - Mullhaupt, Vijayendra Munikoti, Travis Oliphant, Kevin O'Mara, Arno - Paehler, Fernando Perez, Didrik Pinte, Todd Alan Pitts, Prabhu - Ramachandran, Brad Reisfeld, Steve M. Robbins, Theresa Robinson, - Pedro Rodrigues, Les Schaffer, Christoph Scheurer, Herb Schilling, - Pierre Schnizer, Kevin Smith, Paulo Teotonio Sobrinho, José Rui - Faustino de Sousa, Andrew Swan, Dustin Tang, Charlie Taylor, Paul le - Texier, Michael Tiller, Semen Trygubenko, Ravi C Venkatesan, Peter - Verveer, Nils Wagner, R. Clint Whaley, Erik Wilsher, Martin - Wiechert, Gilles Zerah, SungPil Yoon. - -(This list may not be complete. Please forgive me if I have left you -out and let me know, I'll add your name.) - -Special thanks are due to ... - -Eric Jones - he and Travis O. are responsible for starting the -numpy_distutils project that allowed to move most of the platform and -compiler specific codes out from F2PY. This simplified maintaining the -F2PY project considerably. - -Joaquim R R A Martins - he made possible for me to test F2PY on IRIX64 -platform. He also presented our paper about F2PY in the 9th Python -Conference that I planned to attend but had to cancel in very last -minutes. - -Travis Oliphant - his knowledge and experience on Numerical Python -C/API has been invaluable in early development of the F2PY program. -His major contributions are call-back mechanism and copying N-D arrays -of arbitrary types. - -Todd Miller - he is responsible for Numarray support in F2PY. - -Thanks! - Pearu diff --git a/numpy/f2py/docs/default.css b/numpy/f2py/docs/default.css deleted file mode 100644 index 9289e2826..000000000 --- a/numpy/f2py/docs/default.css +++ /dev/null @@ -1,180 +0,0 @@ -/* -:Author: David Goodger -:Contact: goodger@users.sourceforge.net -:date: $Date: 2002/08/01 20:52:44 $ -:version: $Revision: 1.1 $ -:copyright: This stylesheet has been placed in the public domain. - -Default cascading style sheet for the HTML output of Docutils. -*/ - -body { - background: #FFFFFF ; - color: #000000 -} - -a.footnote-reference { - font-size: smaller ; - vertical-align: super } - -a.target { - color: blue } - -a.toc-backref { - text-decoration: none ; - color: black } - -dd { - margin-bottom: 0.5em } - -div.abstract { - margin: 2em 5em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } - -div.attention, div.caution, div.danger, div.error, div.hint, -div.important, div.note, div.tip, div.warning { - margin: 2em ; - border: medium outset ; - padding: 1em } - -div.attention p.admonition-title, div.caution p.admonition-title, -div.danger p.admonition-title, div.error p.admonition-title, -div.warning p.admonition-title { - color: red ; - font-weight: bold ; - font-family: sans-serif } - -div.hint p.admonition-title, div.important p.admonition-title, -div.note p.admonition-title, div.tip p.admonition-title { - font-weight: bold ; - font-family: sans-serif } - -div.dedication { - margin: 2em 5em ; - text-align: center ; - font-style: italic } - -div.dedication p.topic-title { - font-weight: bold ; - font-style: normal } - -div.figure { - margin-left: 2em } - -div.footer, div.header { - font-size: smaller } - -div.system-messages { - margin: 5em } - -div.system-messages h1 { - color: red } - -div.system-message { - border: medium outset ; - padding: 1em } - -div.system-message p.system-message-title { - color: red ; - font-weight: bold } - -div.topic { - margin: 2em } - -h1.title { - text-align: center } - -h2.subtitle { - text-align: center } - -hr { - width: 75% } - -ol.simple, ul.simple { - margin-bottom: 1em } - -ol.arabic { - list-style: decimal } - -ol.loweralpha { - list-style: lower-alpha } - -ol.upperalpha { - list-style: upper-alpha } - -ol.lowerroman { - list-style: lower-roman } - -ol.upperroman { - list-style: upper-roman } - -p.caption { - font-style: italic } - -p.credits { - font-style: italic ; - font-size: smaller } - -p.first { - margin-top: 0 } - -p.label { - white-space: nowrap } - -p.topic-title { - font-weight: bold } - -pre.literal-block, pre.doctest-block { - margin-left: 2em ; - margin-right: 2em ; - background-color: #eeeeee } - -span.classifier { - font-family: sans-serif ; - font-style: oblique } - -span.classifier-delimiter { - font-family: sans-serif ; - font-weight: bold } - -span.field-argument { - font-style: italic } - -span.interpreted { - font-family: sans-serif } - -span.option-argument { - font-style: italic } - -span.problematic { - color: red } - -table { - margin-top: 0.5em ; - margin-bottom: 0.5em } - -table.citation { - border-left: solid thin gray ; - padding-left: 0.5ex } - -table.docinfo { - margin: 2em 4em } - -table.footnote { - border-left: solid thin black ; - padding-left: 0.5ex } - -td, th { - padding-left: 0.5em ; - padding-right: 0.5em ; - vertical-align: baseline } - -td.docinfo-name { - font-weight: bold ; - text-align: right } - -td.field-name { - font-weight: bold } diff --git a/numpy/f2py/docs/docutils.conf b/numpy/f2py/docs/docutils.conf deleted file mode 100644 index 4e5a8425b..000000000 --- a/numpy/f2py/docs/docutils.conf +++ /dev/null @@ -1,16 +0,0 @@ -[general] - -# These entries affect all processing: -#source-link: 1 -datestamp: %Y-%m-%d %H:%M UTC -generator: 1 - -# These entries affect HTML output: -#stylesheet-path: pearu_style.css -output-encoding: latin-1 - -# These entries affect reStructuredText-style PEPs: -#pep-template: pep-html-template -#pep-stylesheet-path: stylesheets/pep.css -#python-home: http://www.python.org -#no-random: 1 diff --git a/numpy/f2py/docs/hello.f b/numpy/f2py/docs/hello.f deleted file mode 100644 index 3e0dc6d21..000000000 --- a/numpy/f2py/docs/hello.f +++ /dev/null @@ -1,7 +0,0 @@ -C File hello.f - subroutine foo (a) - integer a - print*, "Hello from Fortran!" - print*, "a=",a - end - diff --git a/numpy/f2py/docs/pyforttest.pyf b/numpy/f2py/docs/pyforttest.pyf deleted file mode 100644 index 79a9ae205..000000000 --- a/numpy/f2py/docs/pyforttest.pyf +++ /dev/null @@ -1,5 +0,0 @@ -subroutine foo(a,m,n) -integer m = size(a,1) -integer n = size(a,2) -real, intent(inout) :: a(m,n) -end subroutine foo diff --git a/numpy/f2py/docs/pytest.py b/numpy/f2py/docs/pytest.py deleted file mode 100644 index abd3487df..000000000 --- a/numpy/f2py/docs/pytest.py +++ /dev/null @@ -1,10 +0,0 @@ -#File: pytest.py -import Numeric -def foo(a): - a = Numeric.array(a) - m,n = a.shape - for i in range(m): - for j in range(n): - a[i,j] = a[i,j] + 10*(i+1) + (j+1) - return a -#eof diff --git a/numpy/f2py/docs/simple.f b/numpy/f2py/docs/simple.f deleted file mode 100644 index ba468a509..000000000 --- a/numpy/f2py/docs/simple.f +++ /dev/null @@ -1,13 +0,0 @@ -cFile: simple.f - subroutine foo(a,m,n) - integer m,n,i,j - real a(m,n) -cf2py intent(in,out) a -cf2py intent(hide) m,n - do i=1,m - do j=1,n - a(i,j) = a(i,j) + 10*i+j - enddo - enddo - end -cEOF diff --git a/numpy/f2py/docs/simple_session.dat b/numpy/f2py/docs/simple_session.dat deleted file mode 100644 index 10d9dc962..000000000 --- a/numpy/f2py/docs/simple_session.dat +++ /dev/null @@ -1,51 +0,0 @@ ->>> import pytest ->>> import f2pytest ->>> import pyforttest ->>> print f2pytest.foo.__doc__ -foo - Function signature: - a = foo(a) -Required arguments: - a : input rank-2 array('f') with bounds (m,n) -Return objects: - a : rank-2 array('f') with bounds (m,n) - ->>> print pyforttest.foo.__doc__ -foo(a) - ->>> pytest.foo([[1,2],[3,4]]) -array([[12, 14], - [24, 26]]) ->>> f2pytest.foo([[1,2],[3,4]]) # F2PY can handle arbitrary input sequences -array([[ 12., 14.], - [ 24., 26.]],'f') ->>> pyforttest.foo([[1,2],[3,4]]) -Traceback (most recent call last): - File "", line 1, in ? -pyforttest.error: foo, argument A: Argument intent(inout) must be an array. - ->>> import Numeric ->>> a=Numeric.array([[1,2],[3,4]],'f') ->>> f2pytest.foo(a) -array([[ 12., 14.], - [ 24., 26.]],'f') ->>> a # F2PY makes a copy when input array is not Fortran contiguous -array([[ 1., 2.], - [ 3., 4.]],'f') ->>> a=Numeric.transpose(Numeric.array([[1,3],[2,4]],'f')) ->>> a -array([[ 1., 2.], - [ 3., 4.]],'f') ->>> f2pytest.foo(a) -array([[ 12., 14.], - [ 24., 26.]],'f') ->>> a # F2PY passes Fortran contiguous input array directly to Fortran -array([[ 12., 14.], - [ 24., 26.]],'f') -# See intent(copy), intent(overwrite), intent(inplace), intent(inout) -# attributes documentation to enhance the above behavior. - ->>> a=Numeric.array([[1,2],[3,4]],'f') ->>> pyforttest.foo(a) ->>> a # Huh? Pyfort 8.5 gives wrong results.. -array([[ 12., 23.], - [ 15., 26.]],'f') diff --git a/numpy/f2py/docs/usersguide/allocarr.f90 b/numpy/f2py/docs/usersguide/allocarr.f90 deleted file mode 100644 index e0d6c2ec8..000000000 --- a/numpy/f2py/docs/usersguide/allocarr.f90 +++ /dev/null @@ -1,16 +0,0 @@ -module mod - real, allocatable, dimension(:,:) :: b -contains - subroutine foo - integer k - if (allocated(b)) then - print*, "b=[" - do k = 1,size(b,1) - print*, b(k,1:size(b,2)) - enddo - print*, "]" - else - print*, "b is not allocated" - endif - end subroutine foo -end module mod diff --git a/numpy/f2py/docs/usersguide/allocarr_session.dat b/numpy/f2py/docs/usersguide/allocarr_session.dat deleted file mode 100644 index fc91959b7..000000000 --- a/numpy/f2py/docs/usersguide/allocarr_session.dat +++ /dev/null @@ -1,27 +0,0 @@ ->>> import allocarr ->>> print allocarr.mod.__doc__ -b - 'f'-array(-1,-1), not allocated -foo - Function signature: - foo() - ->>> allocarr.mod.foo() - b is not allocated ->>> allocarr.mod.b = [[1,2,3],[4,5,6]] # allocate/initialize b ->>> allocarr.mod.foo() - b=[ - 1.000000 2.000000 3.000000 - 4.000000 5.000000 6.000000 - ] ->>> allocarr.mod.b # b is Fortran-contiguous -array([[ 1., 2., 3.], - [ 4., 5., 6.]],'f') ->>> allocarr.mod.b = [[1,2,3],[4,5,6],[7,8,9]] # reallocate/initialize b ->>> allocarr.mod.foo() - b=[ - 1.000000 2.000000 3.000000 - 4.000000 5.000000 6.000000 - 7.000000 8.000000 9.000000 - ] ->>> allocarr.mod.b = None # deallocate array ->>> allocarr.mod.foo() - b is not allocated diff --git a/numpy/f2py/docs/usersguide/array.f b/numpy/f2py/docs/usersguide/array.f deleted file mode 100644 index ef20c9c20..000000000 --- a/numpy/f2py/docs/usersguide/array.f +++ /dev/null @@ -1,17 +0,0 @@ -C FILE: ARRAY.F - SUBROUTINE FOO(A,N,M) -C -C INCREMENT THE FIRST ROW AND DECREMENT THE FIRST COLUMN OF A -C - INTEGER N,M,I,J - REAL*8 A(N,M) -Cf2py intent(in,out,copy) a -Cf2py integer intent(hide),depend(a) :: n=shape(a,0), m=shape(a,1) - DO J=1,M - A(1,J) = A(1,J) + 1D0 - ENDDO - DO I=1,N - A(I,1) = A(I,1) - 1D0 - ENDDO - END -C END OF FILE ARRAY.F diff --git a/numpy/f2py/docs/usersguide/array_session.dat b/numpy/f2py/docs/usersguide/array_session.dat deleted file mode 100644 index f64933482..000000000 --- a/numpy/f2py/docs/usersguide/array_session.dat +++ /dev/null @@ -1,65 +0,0 @@ ->>> import arr ->>> from Numeric import array ->>> print arr.foo.__doc__ -foo - Function signature: - a = foo(a,[overwrite_a]) -Required arguments: - a : input rank-2 array('d') with bounds (n,m) -Optional arguments: - overwrite_a := 0 input int -Return objects: - a : rank-2 array('d') with bounds (n,m) - ->>> a=arr.foo([[1,2,3], -... [4,5,6]]) -copied an array using PyArray_CopyFromObject: size=6, elsize=8 ->>> print a -[[ 1. 3. 4.] - [ 3. 5. 6.]] ->>> a.iscontiguous(), arr.has_column_major_storage(a) -(0, 1) ->>> b=arr.foo(a) # even if a is proper-contiguous -... # and has proper type, a copy is made -... # forced by intent(copy) attribute -... # to preserve its original contents -... -copied an array using copy_ND_array: size=6, elsize=8 ->>> print a -[[ 1. 3. 4.] - [ 3. 5. 6.]] ->>> print b -[[ 1. 4. 5.] - [ 2. 5. 6.]] ->>> b=arr.foo(a,overwrite_a=1) # a is passed directly to Fortran -... # routine and its contents is discarded -... ->>> print a -[[ 1. 4. 5.] - [ 2. 5. 6.]] ->>> print b -[[ 1. 4. 5.] - [ 2. 5. 6.]] ->>> a is b # a and b are acctually the same objects -1 ->>> print arr.foo([1,2,3]) # different rank arrays are allowed -copied an array using PyArray_CopyFromObject: size=3, elsize=8 -[ 1. 1. 2.] ->>> print arr.foo([[[1],[2],[3]]]) -copied an array using PyArray_CopyFromObject: size=3, elsize=8 -[ [[ 1.] - [ 3.] - [ 4.]]] ->>> ->>> # Creating arrays with column major data storage order: -... ->>> s = arr.as_column_major_storage(array([[1,2,3],[4,5,6]])) -copied an array using copy_ND_array: size=6, elsize=4 ->>> arr.has_column_major_storage(s) -1 ->>> print s -[[1 2 3] - [4 5 6]] ->>> s2 = arr.as_column_major_storage(s) ->>> s2 is s # an array with column major storage order - # is returned immediately -1 \ No newline at end of file diff --git a/numpy/f2py/docs/usersguide/calculate.f b/numpy/f2py/docs/usersguide/calculate.f deleted file mode 100644 index 1cda1c8dd..000000000 --- a/numpy/f2py/docs/usersguide/calculate.f +++ /dev/null @@ -1,14 +0,0 @@ - subroutine calculate(x,n) -cf2py intent(callback) func - external func -c The following lines define the signature of func for F2PY: -cf2py real*8 y -cf2py y = func(y) -c -cf2py intent(in,out,copy) x - integer n,i - real*8 x(n) - do i=1,n - x(i) = func(x(i)) - end do - end diff --git a/numpy/f2py/docs/usersguide/calculate_session.dat b/numpy/f2py/docs/usersguide/calculate_session.dat deleted file mode 100644 index 2fe64f522..000000000 --- a/numpy/f2py/docs/usersguide/calculate_session.dat +++ /dev/null @@ -1,6 +0,0 @@ ->>> import foo ->>> foo.calculate(range(5), lambda x: x*x) -array([ 0., 1., 4., 9., 16.]) ->>> import math ->>> foo.calculate(range(5), math.exp) -array([ 1. , 2.71828175, 7.38905621, 20.08553696, 54.59814835]) diff --git a/numpy/f2py/docs/usersguide/callback.f b/numpy/f2py/docs/usersguide/callback.f deleted file mode 100644 index 6e9bfb920..000000000 --- a/numpy/f2py/docs/usersguide/callback.f +++ /dev/null @@ -1,12 +0,0 @@ -C FILE: CALLBACK.F - SUBROUTINE FOO(FUN,R) - EXTERNAL FUN - INTEGER I - REAL*8 R -Cf2py intent(out) r - R = 0D0 - DO I=-5,5 - R = R + FUN(I) - ENDDO - END -C END OF FILE CALLBACK.F diff --git a/numpy/f2py/docs/usersguide/callback2.pyf b/numpy/f2py/docs/usersguide/callback2.pyf deleted file mode 100644 index 3d77eed24..000000000 --- a/numpy/f2py/docs/usersguide/callback2.pyf +++ /dev/null @@ -1,19 +0,0 @@ -! -*- f90 -*- -python module __user__routines - interface - function fun(i) result (r) - integer :: i - real*8 :: r - end function fun - end interface -end python module __user__routines - -python module callback2 - interface - subroutine foo(f,r) - use __user__routines, f=>fun - external f - real*8 intent(out) :: r - end subroutine foo - end interface -end python module callback2 diff --git a/numpy/f2py/docs/usersguide/callback_session.dat b/numpy/f2py/docs/usersguide/callback_session.dat deleted file mode 100644 index cd2f26084..000000000 --- a/numpy/f2py/docs/usersguide/callback_session.dat +++ /dev/null @@ -1,23 +0,0 @@ ->>> import callback ->>> print callback.foo.__doc__ -foo - Function signature: - r = foo(fun,[fun_extra_args]) -Required arguments: - fun : call-back function -Optional arguments: - fun_extra_args := () input tuple -Return objects: - r : float -Call-back functions: - def fun(i): return r - Required arguments: - i : input int - Return objects: - r : float - ->>> def f(i): return i*i -... ->>> print callback.foo(f) -110.0 ->>> print callback.foo(lambda i:1) -11.0 diff --git a/numpy/f2py/docs/usersguide/common.f b/numpy/f2py/docs/usersguide/common.f deleted file mode 100644 index b098ab20c..000000000 --- a/numpy/f2py/docs/usersguide/common.f +++ /dev/null @@ -1,13 +0,0 @@ -C FILE: COMMON.F - SUBROUTINE FOO - INTEGER I,X - REAL A - COMMON /DATA/ I,X(4),A(2,3) - PRINT*, "I=",I - PRINT*, "X=[",X,"]" - PRINT*, "A=[" - PRINT*, "[",A(1,1),",",A(1,2),",",A(1,3),"]" - PRINT*, "[",A(2,1),",",A(2,2),",",A(2,3),"]" - PRINT*, "]" - END -C END OF COMMON.F diff --git a/numpy/f2py/docs/usersguide/common_session.dat b/numpy/f2py/docs/usersguide/common_session.dat deleted file mode 100644 index 846fdaa07..000000000 --- a/numpy/f2py/docs/usersguide/common_session.dat +++ /dev/null @@ -1,27 +0,0 @@ ->>> import common ->>> print common.data.__doc__ -i - 'i'-scalar -x - 'i'-array(4) -a - 'f'-array(2,3) - ->>> common.data.i = 5 ->>> common.data.x[1] = 2 ->>> common.data.a = [[1,2,3],[4,5,6]] ->>> common.foo() - I= 5 - X=[ 0 2 0 0] - A=[ - [ 1., 2., 3.] - [ 4., 5., 6.] - ] ->>> common.data.a[1] = 45 ->>> common.foo() - I= 5 - X=[ 0 2 0 0] - A=[ - [ 1., 2., 3.] - [ 45., 45., 45.] - ] ->>> common.data.a # a is Fortran-contiguous -array([[ 1., 2., 3.], - [ 45., 45., 45.]],'f') diff --git a/numpy/f2py/docs/usersguide/compile_session.dat b/numpy/f2py/docs/usersguide/compile_session.dat deleted file mode 100644 index 0d8408198..000000000 --- a/numpy/f2py/docs/usersguide/compile_session.dat +++ /dev/null @@ -1,11 +0,0 @@ ->>> import f2py2e ->>> fsource = ''' -... subroutine foo -... print*, "Hello world!" -... end -... ''' ->>> f2py2e.compile(fsource,modulename='hello',verbose=0) -0 ->>> import hello ->>> hello.foo() - Hello world! diff --git a/numpy/f2py/docs/usersguide/default.css b/numpy/f2py/docs/usersguide/default.css deleted file mode 100644 index bb7226161..000000000 --- a/numpy/f2py/docs/usersguide/default.css +++ /dev/null @@ -1,180 +0,0 @@ -/* -:Author: David Goodger -:Contact: goodger@users.sourceforge.net -:date: $Date: 2002/12/07 23:59:33 $ -:version: $Revision: 1.2 $ -:copyright: This stylesheet has been placed in the public domain. - -Default cascading style sheet for the HTML output of Docutils. -*/ - -body { - background: #FFFFFF ; - color: #000000 -} - -a.footnote-reference { - font-size: smaller ; - vertical-align: super } - -a.target { - color: blue } - -a.toc-backref { - text-decoration: none ; - color: black } - -dd { - margin-bottom: 0.5em } - -div.abstract { - margin: 2em 5em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } - -div.attention, div.caution, div.danger, div.error, div.hint, -div.important, div.note, div.tip, div.warning { - margin: 2em ; - border: medium outset ; - padding: 1em } - -div.attention p.admonition-title, div.caution p.admonition-title, -div.danger p.admonition-title, div.error p.admonition-title, -div.warning p.admonition-title { - color: red ; - font-weight: bold ; - font-family: sans-serif } - -div.hint p.admonition-title, div.important p.admonition-title, -div.note p.admonition-title, div.tip p.admonition-title { - font-weight: bold ; - font-family: sans-serif } - -div.dedication { - margin: 2em 5em ; - text-align: center ; - font-style: italic } - -div.dedication p.topic-title { - font-weight: bold ; - font-style: normal } - -div.figure { - margin-left: 2em } - -div.footer, div.header { - font-size: smaller } - -div.system-messages { - margin: 5em } - -div.system-messages h1 { - color: red } - -div.system-message { - border: medium outset ; - padding: 1em } - -div.system-message p.system-message-title { - color: red ; - font-weight: bold } - -div.topic { - margin: 2em } - -h1.title { - text-align: center } - -h2.subtitle { - text-align: center } - -hr { - width: 75% } - -ol.simple, ul.simple { - margin-bottom: 1em } - -ol.arabic { - list-style: decimal } - -ol.loweralpha { - list-style: lower-alpha } - -ol.upperalpha { - list-style: upper-alpha } - -ol.lowerroman { - list-style: lower-roman } - -ol.upperroman { - list-style: upper-roman } - -p.caption { - font-style: italic } - -p.credits { - font-style: italic ; - font-size: smaller } - -p.first { - margin-top: 0 } - -p.label { - white-space: nowrap } - -p.topic-title { - font-weight: bold } - -pre.literal-block, pre.doctest-block { - margin-left: 2em ; - margin-right: 2em ; - background-color: #ee9e9e } - -span.classifier { - font-family: sans-serif ; - font-style: oblique } - -span.classifier-delimiter { - font-family: sans-serif ; - font-weight: bold } - -span.field-argument { - font-style: italic } - -span.interpreted { - font-family: sans-serif } - -span.option-argument { - font-style: italic } - -span.problematic { - color: red } - -table { - margin-top: 0.5em ; - margin-bottom: 0.5em } - -table.citation { - border-left: solid thin gray ; - padding-left: 0.5ex } - -table.docinfo { - margin: 2em 4em } - -table.footnote { - border-left: solid thin black ; - padding-left: 0.5ex } - -td, th { - padding-left: 0.5em ; - padding-right: 0.5em ; - vertical-align: baseline } - -td.docinfo-name { - font-weight: bold ; - text-align: right } - -td.field-name { - font-weight: bold } diff --git a/numpy/f2py/docs/usersguide/docutils.conf b/numpy/f2py/docs/usersguide/docutils.conf deleted file mode 100644 index b772fd137..000000000 --- a/numpy/f2py/docs/usersguide/docutils.conf +++ /dev/null @@ -1,16 +0,0 @@ -[general] - -# These entries affect all processing: -#source-link: 1 -datestamp: %Y-%m-%d %H:%M UTC -generator: 1 - -# These entries affect HTML output: -#stylesheet-path: f2py_style.css -output-encoding: latin-1 - -# These entries affect reStructuredText-style PEPs: -#pep-template: pep-html-template -#pep-stylesheet-path: stylesheets/pep.css -#python-home: http://www.python.org -#no-random: 1 diff --git a/numpy/f2py/docs/usersguide/extcallback.f b/numpy/f2py/docs/usersguide/extcallback.f deleted file mode 100644 index 9a800628e..000000000 --- a/numpy/f2py/docs/usersguide/extcallback.f +++ /dev/null @@ -1,14 +0,0 @@ - subroutine f1() - print *, "in f1, calling f2 twice.." - call f2() - call f2() - return - end - - subroutine f2() -cf2py intent(callback, hide) fpy - external fpy - print *, "in f2, calling f2py.." - call fpy() - return - end diff --git a/numpy/f2py/docs/usersguide/extcallback_session.dat b/numpy/f2py/docs/usersguide/extcallback_session.dat deleted file mode 100644 index c22935ea0..000000000 --- a/numpy/f2py/docs/usersguide/extcallback_session.dat +++ /dev/null @@ -1,19 +0,0 @@ ->>> import pfromf ->>> pfromf.f2() -Traceback (most recent call last): - File "", line 1, in ? -pfromf.error: Callback fpy not defined (as an argument or module pfromf attribute). - ->>> def f(): print "python f" -... ->>> pfromf.fpy = f ->>> pfromf.f2() - in f2, calling f2py.. -python f ->>> pfromf.f1() - in f1, calling f2 twice.. - in f2, calling f2py.. -python f - in f2, calling f2py.. -python f ->>> \ No newline at end of file diff --git a/numpy/f2py/docs/usersguide/fib1.f b/numpy/f2py/docs/usersguide/fib1.f deleted file mode 100644 index cfbb1eea0..000000000 --- a/numpy/f2py/docs/usersguide/fib1.f +++ /dev/null @@ -1,18 +0,0 @@ -C FILE: FIB1.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB1.F diff --git a/numpy/f2py/docs/usersguide/fib1.pyf b/numpy/f2py/docs/usersguide/fib1.pyf deleted file mode 100644 index 3d6cc0a54..000000000 --- a/numpy/f2py/docs/usersguide/fib1.pyf +++ /dev/null @@ -1,12 +0,0 @@ -! -*- f90 -*- -python module fib2 ! in - interface ! in :fib2 - subroutine fib(a,n) ! in :fib2:fib1.f - real*8 dimension(n) :: a - integer optional,check(len(a)>=n),depend(a) :: n=len(a) - end subroutine fib - end interface -end python module fib2 - -! This file was auto-generated with f2py (version:2.28.198-1366). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/numpy/f2py/docs/usersguide/fib2.pyf b/numpy/f2py/docs/usersguide/fib2.pyf deleted file mode 100644 index 4a5ae29f1..000000000 --- a/numpy/f2py/docs/usersguide/fib2.pyf +++ /dev/null @@ -1,9 +0,0 @@ -! -*- f90 -*- -python module fib2 - interface - subroutine fib(a,n) - real*8 dimension(n),intent(out),depend(n) :: a - integer intent(in) :: n - end subroutine fib - end interface -end python module fib2 diff --git a/numpy/f2py/docs/usersguide/fib3.f b/numpy/f2py/docs/usersguide/fib3.f deleted file mode 100644 index 08b050cd2..000000000 --- a/numpy/f2py/docs/usersguide/fib3.f +++ /dev/null @@ -1,21 +0,0 @@ -C FILE: FIB3.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) -Cf2py intent(in) n -Cf2py intent(out) a -Cf2py depend(n) a - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB3.F diff --git a/numpy/f2py/docs/usersguide/ftype.f b/numpy/f2py/docs/usersguide/ftype.f deleted file mode 100644 index cabbb9e2d..000000000 --- a/numpy/f2py/docs/usersguide/ftype.f +++ /dev/null @@ -1,9 +0,0 @@ -C FILE: FTYPE.F - SUBROUTINE FOO(N) - INTEGER N -Cf2py integer optional,intent(in) :: n = 13 - REAL A,X - COMMON /DATA/ A,X(3) - PRINT*, "IN FOO: N=",N," A=",A," X=[",X(1),X(2),X(3),"]" - END -C END OF FTYPE.F diff --git a/numpy/f2py/docs/usersguide/ftype_session.dat b/numpy/f2py/docs/usersguide/ftype_session.dat deleted file mode 100644 index 01f9febaf..000000000 --- a/numpy/f2py/docs/usersguide/ftype_session.dat +++ /dev/null @@ -1,21 +0,0 @@ ->>> import ftype ->>> print ftype.__doc__ -This module 'ftype' is auto-generated with f2py (version:2.28.198-1366). -Functions: - foo(n=13) -COMMON blocks: - /data/ a,x(3) -. ->>> type(ftype.foo),type(ftype.data) -(, ) ->>> ftype.foo() - IN FOO: N= 13 A= 0. X=[ 0. 0. 0.] ->>> ftype.data.a = 3 ->>> ftype.data.x = [1,2,3] ->>> ftype.foo() - IN FOO: N= 13 A= 3. X=[ 1. 2. 3.] ->>> ftype.data.x[1] = 45 ->>> ftype.foo(24) - IN FOO: N= 24 A= 3. X=[ 1. 45. 3.] ->>> ftype.data.x -array([ 1., 45., 3.],'f') diff --git a/numpy/f2py/docs/usersguide/index.txt b/numpy/f2py/docs/usersguide/index.txt deleted file mode 100644 index 5a8d12c68..000000000 --- a/numpy/f2py/docs/usersguide/index.txt +++ /dev/null @@ -1,1772 +0,0 @@ -.. -*- rest -*- - -////////////////////////////////////////////////////////////////////// - F2PY Users Guide and Reference Manual -////////////////////////////////////////////////////////////////////// - -:Author: Pearu Peterson -:Contact: pearu@cens.ioc.ee -:Web site: http://cens.ioc.ee/projects/f2py2e/ -:Date: $Date: 2005/04/02 10:03:26 $ -:Revision: $Revision: 1.27 $ - - -.. section-numbering:: - -.. Contents:: - - -================ - Introduction -================ - -The purpose of the F2PY_ --*Fortran to Python interface generator*-- -project is to provide a connection between Python and Fortran -languages. F2PY is a Python_ package (with a command line tool -``f2py`` and a module ``f2py2e``) that facilitates creating/building -Python C/API extension modules that make it possible - -* to call Fortran 77/90/95 external subroutines and Fortran 90/95 - module subroutines as well as C functions; -* to access Fortran 77 ``COMMON`` blocks and Fortran 90/95 module data, - including allocatable arrays - -from Python. See F2PY_ web site for more information and installation -instructions. - -====================================== - Three ways to wrap - getting started -====================================== - -Wrapping Fortran or C functions to Python using F2PY consists of the -following steps: - -* Creating the so-called signature file that contains descriptions of - wrappers to Fortran or C functions, also called as signatures of the - functions. In the case of Fortran routines, F2PY can create initial - signature file by scanning Fortran source codes and - catching all relevant information needed to create wrapper - functions. - -* Optionally, F2PY created signature files can be edited to optimize - wrappers functions, make them "smarter" and more "Pythonic". - -* F2PY reads a signature file and writes a Python C/API module containing - Fortran/C/Python bindings. - -* F2PY compiles all sources and builds an extension module containing - the wrappers. In building extension modules, F2PY uses - ``numpy_distutils`` that supports a number of Fortran 77/90/95 - compilers, including Gnu, Intel, - Sun Fortre, SGI MIPSpro, Absoft, NAG, Compaq etc. compilers. - -Depending on a particular situation, these steps can be carried out -either by just in one command or step-by-step, some steps can be -ommited or combined with others. - -Below I'll describe three typical approaches of using F2PY. -The following `example Fortran 77 code`__ will be used for -illustration: - -.. include:: fib1.f - :literal: - -__ fib1.f - -The quick way -============== - -The quickest way to wrap the Fortran subroutine ``FIB`` to Python is -to run - -:: - - f2py -c fib1.f -m fib1 - -This command builds (see ``-c`` flag, execute ``f2py`` without -arguments to see the explanation of command line options) an extension -module ``fib1.so`` (see ``-m`` flag) to the current directory. Now, in -Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: - - >>> import Numeric - >>> import fib1 - >>> print fib1.fib.__doc__ - fib - Function signature: - fib(a,[n]) - Required arguments: - a : input rank-1 array('d') with bounds (n) - Optional arguments: - n := len(a) input int - - >>> a=Numeric.zeros(8,'d') - >>> fib1.fib(a) - >>> print a - [ 0. 1. 1. 2. 3. 5. 8. 13.] - -.. topic:: Comments - - * Note that F2PY found that the second argument ``n`` is the - dimension of the first array argument ``a``. Since by default all - arguments are input-only arguments, F2PY concludes that ``n`` can - be optional with the default value ``len(a)``. - - * One can use different values for optional ``n``:: - - >>> a1=Numeric.zeros(8,'d') - >>> fib1.fib(a1,6) - >>> print a1 - [ 0. 1. 1. 2. 3. 5. 0. 0.] - - but an exception is raised when it is incompatible with the input - array ``a``:: - - >>> fib1.fib(a,10) - fib:n=10 - Traceback (most recent call last): - File "", line 1, in ? - fib.error: (len(a)>=n) failed for 1st keyword n - >>> - - This demonstrates one of the useful features in F2PY, that it, - F2PY implements basic compatibility checks between related - arguments in order to avoid any unexpected crashes. - - * When a Numeric array, that is Fortran contiguous and has a typecode - corresponding to presumed Fortran type, is used as an input array - argument, then its C pointer is directly passed to Fortran. - - Otherwise F2PY makes a contiguous copy (with a proper typecode) of - the input array and passes C pointer of the copy to Fortran - subroutine. As a result, any possible changes to the (copy of) - input array have no effect to the original argument, as - demonstrated below:: - - >>> a=Numeric.ones(8,'i') - >>> fib1.fib(a) - >>> print a - [1 1 1 1 1 1 1 1] - - Clearly, this is not an expected behaviour. The fact that the - above example worked with ``typecode='d'`` is considered - accidental. - - F2PY provides ``intent(inplace)`` attribute that would modify - the attributes of an input array so that any changes made by - Fortran routine will be effective also in input argument. For example, - if one specifies ``intent(inplace) a`` (see below, how), then - the example above would read: - - >>> a=Numeric.ones(8,'i') - >>> fib1.fib(a) - >>> print a - [ 0. 1. 1. 2. 3. 5. 8. 13.] - - However, the recommended way to get changes made by Fortran - subroutine back to python is to use ``intent(out)`` attribute. It - is more efficient and a cleaner solution. - - * The usage of ``fib1.fib`` in Python is very similar to using - ``FIB`` in Fortran. However, using *in situ* output arguments in - Python indicates a poor style as there is no safety mechanism - in Python with respect to wrong argument types. When using Fortran - or C, compilers naturally discover any type mismatches during - compile time but in Python the types must be checked in - runtime. So, using *in situ* output arguments in Python may cause - difficult to find bugs, not to mention that the codes will be less - readable when all required type checks are implemented. - - Though the demonstrated way of wrapping Fortran routines to Python - is very straightforward, it has several drawbacks (see the comments - above). These drawbacks are due to the fact that there is no way - that F2PY can determine what is the acctual intention of one or the - other argument, is it input or output argument, or both, or - something else. So, F2PY conservatively assumes that all arguments - are input arguments by default. - - However, there are ways (see below) how to "teach" F2PY about the - true intentions (among other things) of function arguments; and then - F2PY is able to generate more Pythonic (more explicit, easier to - use, and less error prone) wrappers to Fortran functions. - -The smart way -============== - -Let's apply the steps of wrapping Fortran functions to Python one by -one. - -* First, we create a signature file from ``fib1.f`` by running - - :: - - f2py fib1.f -m fib2 -h fib1.pyf - - The signature file is saved to ``fib1.pyf`` (see ``-h`` flag) and - its contents is shown below. - - .. include:: fib1.pyf - :literal: - -* Next, we'll teach F2PY that the argument ``n`` is a input argument - (use ``intent(in)`` attribute) and that the result, i.e. the - contents of ``a`` after calling Fortran function ``FIB``, should be - returned to Python (use ``intent(out)`` attribute). In addition, an - array ``a`` should be created dynamically using the size given by - the input argument ``n`` (use ``depend(n)`` attribute to indicate - dependence relation). - - The content of a modified version of ``fib1.pyf`` (saved as - ``fib2.pyf``) is as follows: - - .. include:: fib2.pyf - :literal: - -* And finally, we build the extension module by running - - :: - - f2py -c fib2.pyf fib1.f - -In Python:: - - >>> import fib2 - >>> print fib2.fib.__doc__ - fib - Function signature: - a = fib(n) - Required arguments: - n : input int - Return objects: - a : rank-1 array('d') with bounds (n) - - >>> print fib2.fib(8) - [ 0. 1. 1. 2. 3. 5. 8. 13.] - -.. topic:: Comments - - * Clearly, the signature of ``fib2.fib`` now corresponds to the - intention of Fortran subroutine ``FIB`` more closely: given the - number ``n``, ``fib2.fib`` returns the first ``n`` Fibonacci numbers - as a Numeric array. Also, the new Python signature ``fib2.fib`` - rules out any surprises that we experienced with ``fib1.fib``. - - * Note that by default using single ``intent(out)`` also implies - ``intent(hide)``. Argument that has ``intent(hide)`` attribute - specified, will not be listed in the argument list of a wrapper - function. - -The quick and smart way -======================== - -The "smart way" of wrapping Fortran functions, as explained above, is -suitable for wrapping (e.g. third party) Fortran codes for which -modifications to their source codes are not desirable nor even -possible. - -However, if editing Fortran codes is acceptable, then the generation -of an intermediate signature file can be skipped in most -cases. Namely, F2PY specific attributes can be inserted directly to -Fortran source codes using the so-called F2PY directive. A F2PY -directive defines special comment lines (starting with ``Cf2py``, for -example) which are ignored by Fortran compilers but F2PY interprets -them as normal lines. - -Here is shown a `modified version of the example Fortran code`__, saved -as ``fib3.f``: - -.. include:: fib3.f - :literal: - -__ fib3.f - -Building the extension module can be now carried out in one command:: - - f2py -c -m fib3 fib3.f - -Notice that the resulting wrapper to ``FIB`` is as "smart" as in -previous case:: - - >>> import fib3 - >>> print fib3.fib.__doc__ - fib - Function signature: - a = fib(n) - Required arguments: - n : input int - Return objects: - a : rank-1 array('d') with bounds (n) - - >>> print fib3.fib(8) - [ 0. 1. 1. 2. 3. 5. 8. 13.] - - -================== - Signature file -================== - -The syntax specification for signature files (.pyf files) is borrowed -from the Fortran 90/95 language specification. Almost all Fortran -90/95 standard constructs are understood, both in free and fixed -format (recall that Fortran 77 is a subset of Fortran 90/95). F2PY -introduces also some extensions to Fortran 90/95 language -specification that help designing Fortran to Python interface, make it -more "Pythonic". - -Signature files may contain arbitrary Fortran code (so that Fortran -codes can be considered as signature files). F2PY silently ignores -Fortran constructs that are irrelevant for creating the interface. -However, this includes also syntax errors. So, be careful not making -ones;-). - -In general, the contents of signature files is case-sensitive. When -scanning Fortran codes and writing a signature file, F2PY lowers all -cases automatically except in multi-line blocks or when ``--no-lower`` -option is used. - -The syntax of signature files is overvied below. - -Python module block -===================== - -A signature file may contain one (recommended) or more ``python -module`` blocks. ``python module`` block describes the contents of -a Python/C extension module ``module.c`` that F2PY -generates. - -Exception: if ```` contains a substring ``__user__``, then -the corresponding ``python module`` block describes the signatures of -so-called call-back functions (see `Call-back arguments`_). - -A ``python module`` block has the following structure:: - - python module - []... - [ - interface - - - - end [interface] - ]... - [ - interface - module - [] - [] - end [module []] - end [interface] - ]... - end [python module []] - -Here brackets ``[]`` indicate a optional part, dots ``...`` indicate -one or more of a previous part. So, ``[]...`` reads zero or more of a -previous part. - - -Fortran/C routine signatures -============================= - -The signature of a Fortran routine has the following structure:: - - [] function | subroutine \ - [ ( [] ) ] [ result ( ) ] - [] - [] - [] - [] - [] - end [ function | subroutine [] ] - -From a Fortran routine signature F2PY generates a Python/C extension -function that has the following signature:: - - def ([,]): - ... - return - -The signature of a Fortran block data has the following structure:: - - block data [ ] - [] - [] - [] - [] - [] - end [ block data [] ] - -Type declarations -------------------- - - The definition of the ```` part - is - - :: - - [ [] :: ] - - where - - :: - - := byte | character [] - | complex [] | real [] - | double complex | double precision - | integer [] | logical [] - - := * - | ( [len=] [ , [kind=] ] ) - | ( kind= [ , len= ] ) - := * | ( [kind=] ) - - := [ [ * ] [ ( ) ] - | [ ( ) ] * ] - | [ / / | = ] \ - [ , ] - - and - - + ```` is a comma separated list of attributes_; - - + ```` is a comma separated list of dimension bounds; - - + ```` is a `C expression`__. - - + ```` may be negative integer for ``integer`` type - specifications. In such cases ``integer*`` represents - unsigned C integers. - -__ `C expressions`_ - - If an argument has no ````, its type is - determined by applying ``implicit`` rules to its name. - - -Statements ------------- - -Attribute statements: - - The ```` is - ```` without ````. - In addition, in an attribute statement one cannot use other - attributes, also ```` can be only a list of names. - -Use statements: - - The definition of the ```` part is - - :: - - use [ , | , ONLY : ] - - where - - :: - - := => [ , ] - - Currently F2PY uses ``use`` statement only for linking call-back - modules and ``external`` arguments (call-back functions), see - `Call-back arguments`_. - -Common block statements: - - The definition of the ```` part is - - :: - - common / / - - where - - :: - - := [ ( ) ] [ , ] - - One ``python module`` block should not contain two or more - ``common`` blocks with the same name. Otherwise, the latter ones are - ignored. The types of variables in ```` are defined - using ````. Note that the corresponding - ```` may contain array specifications; - then you don't need to specify these in ````. - -Other statements: - - The ```` part refers to any other Fortran language - constructs that are not described above. F2PY ignores most of them - except - - + ``call`` statements and function calls of ``external`` arguments - (`more details`__?); - -__ external_ - - + ``include`` statements - - :: - - include '' - include "" - - If a file ```` does not exist, the ``include`` - statement is ignored. Otherwise, the file ```` is - included to a signature file. ``include`` statements can be used - in any part of a signature file, also outside the Fortran/C - routine signature blocks. - - + ``implicit`` statements - - :: - - implicit none - implicit - - where - - :: - - := ( ) - - Implicit rules are used to deterimine the type specification of - a variable (from the first-letter of its name) if the variable - is not defined using ````. Default - implicit rule is given by - - :: - - implicit real (a-h,o-z,$_), integer (i-m) - - + ``entry`` statements - - :: - - entry [([])] - - F2PY generates wrappers to all entry names using the signature - of the routine block. - - Tip: ``entry`` statement can be used to describe the signature - of an arbitrary routine allowing F2PY to generate a number of - wrappers from only one routine block signature. There are few - restrictions while doing this: ``fortranname`` cannot be used, - ``callstatement`` and ``callprotoargument`` can be used only if - they are valid for all entry routines, etc. - - In addition, F2PY introduces the following statements: - - + ``threadsafe`` - Use ``Py_BEGIN_ALLOW_THREADS .. Py_END_ALLOW_THREADS`` block - around the call to Fortran/C function. - - + ``callstatement `` - Replace F2PY generated call statement to Fortran/C function with - ````. The wrapped Fortran/C function - is available as ``(*f2py_func)``. To raise an exception, set - ``f2py_success = 0`` in ````. - - + ``callprotoargument `` - When ``callstatement`` statement is used then F2PY may not - generate proper prototypes for Fortran/C functions (because - ```` may contain any function calls and F2PY has no way - to determine what should be the proper prototype). With this - statement you can explicitely specify the arguments of the - corresponding prototype:: - - extern FUNC_F(,)(); - - + ``fortranname []`` - You can use arbitrary ```` for a given Fortran/C - function. Then you have to specify - ```` with this statement. - - If ``fortranname`` statement is used without - ```` then a dummy wrapper is - generated. - - + ``usercode `` - When used inside ``python module`` block, then given C code - will be inserted to generated C/API source just before - wrapper function definitions. Here you can define arbitrary - C functions to be used in initialization of optional arguments, - for example. If ``usercode`` is used twise inside ``python - module`` block then the second multi-line block is inserted - after the definition of external routines. - - When used inside ````, then given C code will - be inserted to the corresponding wrapper function just after - declaring variables but before any C statements. So, ``usercode`` - follow-up can contain both declarations and C statements. - - When used inside the first ``interface`` block, then given C - code will be inserted at the end of the initialization - function of the extension module. Here you can modify extension - modules dictionary. For example, for defining additional - variables etc. - - + ``pymethoddef `` - Multiline block will be inserted to the definition of - module methods ``PyMethodDef``-array. It must be a - comma-separated list of C arrays (see `Extending and Embedding`__ - Python documentation for details). - ``pymethoddef`` statement can be used only inside - ``python module`` block. - - __ http://www.python.org/doc/current/ext/ext.html - -Attributes ------------- - -The following attributes are used by F2PY: - -``optional`` - The corresponding argument is moved to the end of ```` list. A default value for an optional argument can be - specified ````, see ``entitydecl`` definition. Note that - the default value must be given as a valid C expression. - - Note that whenever ```` is used, ``optional`` attribute - is set automatically by F2PY. - - For an optional array argument, all its dimensions must be bounded. - -``required`` - The corresponding argument is considered as a required one. This is - default. You need to specify ``required`` only if there is a need to - disable automatic ``optional`` setting when ```` is used. - - If Python ``None`` object is used as an required argument, the - argument is treated as optional. That is, in the case of array - argument, the memory is allocated. And if ```` is given, - the corresponding initialization is carried out. - -``dimension()`` - The corresponding variable is considered as an array with given - dimensions in ````. - -``intent()`` - This specifies the "intention" of the corresponding - argument. ```` is a comma separated list of the - following keys: - - + ``in`` - The argument is considered as an input-only argument. It means - that the value of the argument is passed to Fortran/C function and - that function is expected not to change the value of an argument. - - + ``inout`` - The argument is considered as an input/output or *in situ* - output argument. ``intent(inout)`` arguments can be only - "contiguous" Numeric arrays with proper type and size. Here - "contiguous" can be either in Fortran or C sense. The latter one - coincides with the contiguous concept used in Numeric and is - effective only if ``intent(c)`` is used. Fortran-contiguousness - is assumed by default. - - Using ``intent(inout)`` is generally not recommended, use - ``intent(in,out)`` instead. See also ``intent(inplace)`` attribute. - - + ``inplace`` - The argument is considered as an input/output or *in situ* - output argument. ``intent(inplace)`` arguments must be - Numeric arrays with proper size. If the type of an array is - not "proper" or the array is non-contiguous then the array - will be changed in-place to fix the type and make it contiguous. - - Using ``intent(inplace)`` is generally not recommended either. - For example, when slices have been taken from an - ``intent(inplace)`` argument then after in-place changes, - slices data pointers may point to unallocated memory area. - - + ``out`` - The argument is considered as an return variable. It is appended - to the ```` list. Using ``intent(out)`` - sets ``intent(hide)`` automatically, unless also - ``intent(in)`` or ``intent(inout)`` were used. - - By default, returned multidimensional arrays are - Fortran-contiguous. If ``intent(c)`` is used, then returned - multi-dimensional arrays are C-contiguous. - - + ``hide`` - The argument is removed from the list of required or optional - arguments. Typically ``intent(hide)`` is used with ``intent(out)`` - or when ```` completely determines the value of the - argument like in the following example:: - - integer intent(hide),depend(a) :: n = len(a) - real intent(in),dimension(n) :: a - - + ``c`` - The argument is treated as a C scalar or C array argument. In - the case of a scalar argument, its value is passed to C function - as a C scalar argument (recall that Fortran scalar arguments are - actually C pointer arguments). In the case of an array - argument, the wrapper function is assumed to treat - multi-dimensional arrays as C-contiguous arrays. - - There is no need to use ``intent(c)`` for one-dimensional - arrays, no matter if the wrapped function is either a Fortran or - a C function. This is because the concepts of Fortran- and - C-contiguousness overlap in one-dimensional cases. - - If ``intent(c)`` is used as an statement but without entity - declaration list, then F2PY adds ``intent(c)`` attibute to all - arguments. - - Also, when wrapping C functions, one must use ``intent(c)`` - attribute for ```` in order to disable Fortran - specific ``F_FUNC(..,..)`` macros. - - + ``cache`` - The argument is treated as a junk of memory. No Fortran nor C - contiguousness checks are carried out. Using ``intent(cache)`` - makes sense only for array arguments, also in connection with - ``intent(hide)`` or ``optional`` attributes. - - + ``copy`` - Ensure that the original contents of ``intent(in)`` argument is - preserved. Typically used in connection with ``intent(in,out)`` - attribute. F2PY creates an optional argument - ``overwrite_`` with the default value ``0``. - - + ``overwrite`` - The original contents of the ``intent(in)`` argument may be - altered by the Fortran/C function. F2PY creates an optional - argument ``overwrite_`` with the default value - ``1``. - - + ``out=`` - Replace the return name with ```` in the ``__doc__`` - string of a wrapper function. - - + ``callback`` - Construct an external function suitable for calling Python function - from Fortran. ``intent(callback)`` must be specified before the - corresponding ``external`` statement. If 'argument' is not in - argument list then it will be added to Python wrapper but only - initializing external function. - - Use ``intent(callback)`` in situations where a Fortran/C code - assumes that a user implements a function with given prototype - and links it to an executable. Don't use ``intent(callback)`` - if function appears in the argument list of a Fortran routine. - - With ``intent(hide)`` or ``optional`` attributes specified and - using a wrapper function without specifying the callback argument - in argument list then call-back function is looked in the - namespace of F2PY generated extension module where it can be - set as a module attribute by a user. - - + ``aux`` - Define auxiliary C variable in F2PY generated wrapper function. - Useful to save parameter values so that they can be accessed - in initialization expression of other variables. Note that - ``intent(aux)`` silently implies ``intent(c)``. - - The following rules apply: - - + If no ``intent(in | inout | out | hide)`` is specified, - ``intent(in)`` is assumed. - + ``intent(in,inout)`` is ``intent(in)``. - + ``intent(in,hide)`` or ``intent(inout,hide)`` is - ``intent(hide)``. - + ``intent(out)`` is ``intent(out,hide)`` unless ``intent(in)`` or - ``intent(inout)`` is specified. - + If ``intent(copy)`` or ``intent(overwrite)`` is used, then an - additional optional argument is introduced with a name - ``overwrite_`` and a default value 0 or 1, respectively. - + ``intent(inout,inplace)`` is ``intent(inplace)``. - + ``intent(in,inplace)`` is ``intent(inplace)``. - + ``intent(hide)`` disables ``optional`` and ``required``. - -``check([])`` - Perform consistency check of arguments by evaluating - ````; if ```` returns 0, an exception - is raised. - - If ``check(..)`` is not used then F2PY generates few standard checks - (e.g. in a case of an array argument, check for the proper shape - and size) automatically. Use ``check()`` to disable checks generated - by F2PY. - -``depend([])`` - This declares that the corresponding argument depends on the values - of variables in the list ````. For example, ```` - may use the values of other arguments. Using information given by - ``depend(..)`` attributes, F2PY ensures that arguments are - initialized in a proper order. If ``depend(..)`` attribute is not - used then F2PY determines dependence relations automatically. Use - ``depend()`` to disable dependence relations generated by F2PY. - - When you edit dependence relations that were initially generated by - F2PY, be careful not to break the dependence relations of other - relevant variables. Another thing to watch out is cyclic - dependencies. F2PY is able to detect cyclic dependencies - when constructing wrappers and it complains if any are found. - -``allocatable`` - The corresponding variable is Fortran 90 allocatable array defined - as Fortran 90 module data. - -.. _external: - -``external`` - The corresponding argument is a function provided by user. The - signature of this so-called call-back function can be defined - - - in ``__user__`` module block, - - or by demonstrative (or real, if the signature file is a real Fortran - code) call in the ```` block. - - For example, F2PY generates from - - :: - - external cb_sub, cb_fun - integer n - real a(n),r - call cb_sub(a,n) - r = cb_fun(4) - - the following call-back signatures:: - - subroutine cb_sub(a,n) - real dimension(n) :: a - integer optional,check(len(a)>=n),depend(a) :: n=len(a) - end subroutine cb_sub - function cb_fun(e_4_e) result (r) - integer :: e_4_e - real :: r - end function cb_fun - - The corresponding user-provided Python function are then:: - - def cb_sub(a,[n]): - ... - return - def cb_fun(e_4_e): - ... - return r - - See also ``intent(callback)`` attribute. - -``parameter`` - The corresponding variable is a parameter and it must have a fixed - value. F2PY replaces all parameter occurrences by their - corresponding values. - -Extensions -============ - -F2PY directives ------------------ - -The so-called F2PY directives allow using F2PY signature file -constructs also in Fortran 77/90 source codes. With this feature you -can skip (almost) completely intermediate signature file generations -and apply F2PY directly to Fortran source codes. - -F2PY directive has the following form:: - - f2py ... - -where allowed comment characters for fixed and free format Fortran -codes are ``cC*!#`` and ``!``, respectively. Everything that follows -``f2py`` is ignored by a compiler but read by F2PY as a -normal Fortran (non-comment) line: - - When F2PY finds a line with F2PY directive, the directive is first - replaced by 5 spaces and then the line is reread. - -For fixed format Fortran codes, ```` must be at the -first column of a file, of course. For free format Fortran codes, -F2PY directives can appear anywhere in a file. - -C expressions --------------- - -C expressions are used in the following parts of signature files: - -* ```` of variable initialization; -* ```` of the ``check`` attribute; -* `` of the ``dimension`` attribute; -* ``callstatement`` statement, here also a C multi-line block can be used. - -A C expression may contain: - -* standard C constructs; -* functions from ``math.h`` and ``Python.h``; -* variables from the argument list, presumably initialized before - according to given dependence relations; -* the following CPP macros: - - ``rank()`` - Returns the rank of an array ````. - ``shape(,)`` - Returns the ````-th dimension of an array ````. - ``len()`` - Returns the lenght of an array ````. - ``size()`` - Returns the size of an array ````. - ``slen()`` - Returns the length of a string ````. - -For initializing an array ````, F2PY generates a loop over -all indices and dimensions that executes the following -pseudo-statement:: - - (_i[0],_i[1],...) = ; - -where ``_i[]`` refers to the ````-th index value and that runs -from ``0`` to ``shape(,)-1``. - -For example, a function ``myrange(n)`` generated from the following -signature - -:: - - subroutine myrange(a,n) - fortranname ! myrange is a dummy wrapper - integer intent(in) :: n - real*8 intent(c,out),dimension(n),depend(n) :: a = _i[0] - end subroutine myrange - -is equivalent to ``Numeric.arange(n,typecode='d')``. - -.. topic:: Warning! - - F2PY may lower cases also in C expressions when scanning Fortran codes - (see ``--[no]-lower`` option). - -Multi-line blocks ------------------- - -A multi-line block starts with ``'''`` (triple single-quotes) and ends -with ``'''`` in some *strictly* subsequent line. Multi-line blocks can -be used only within .pyf files. The contents of a multi-line block can -be arbitrary (except that it cannot contain ``'''``) and no -transformations (e.g. lowering cases) are applied to it. - -Currently, multi-line blocks can be used in the following constructs: - -+ as a C expression of the ``callstatement`` statement; - -+ as a C type specification of the ``callprotoargument`` statement; - -+ as a C code block of the ``usercode`` statement; - -+ as a list of C arrays of the ``pymethoddef`` statement; - -+ as documentation string. - -================================== -Using F2PY bindings in Python -================================== - -All wrappers (to Fortran/C routines or to common blocks or to Fortran -90 module data) generated by F2PY are exposed to Python as ``fortran`` -type objects. Routine wrappers are callable ``fortran`` type objects -while wrappers to Fortran data have attributes referring to data -objects. - -All ``fortran`` type object have attribute ``_cpointer`` that contains -CObject referring to the C pointer of the corresponding Fortran/C -function or variable in C level. Such CObjects can be used as an -callback argument of F2PY generated functions to bypass Python C/API -layer of calling Python functions from Fortran or C when the -computational part of such functions is implemented in C or Fortran -and wrapped with F2PY (or any other tool capable of providing CObject -of a function). - -.. topic:: Example - - Consider a `Fortran 77 file`__ ``ftype.f``: - - .. include:: ftype.f - :literal: - - and build a wrapper using:: - - f2py -c ftype.f -m ftype - - __ ftype.f - - In Python: - - .. include:: ftype_session.dat - :literal: - - -Scalar arguments -================= - -In general, a scalar argument of a F2PY generated wrapper function can -be ordinary Python scalar (integer, float, complex number) as well as -an arbitrary sequence object (list, tuple, array, string) of -scalars. In the latter case, the first element of the sequence object -is passed to Fortran routine as a scalar argument. - -Note that when type-casting is required and there is possible loss of -information (e.g. when type-casting float to integer or complex to -float), F2PY does not raise any exception. In complex to real -type-casting only the real part of a complex number is used. - -``intent(inout)`` scalar arguments are assumed to be array objects in -order to *in situ* changes to be effective. It is recommended to use -arrays with proper type but also other types work. - -.. topic:: Example - - Consider the following `Fortran 77 code`__: - - .. include:: scalar.f - :literal: - - and wrap it using ``f2py -c -m scalar scalar.f``. - - __ scalar.f - - In Python: - - .. include:: scalar_session.dat - :literal: - - -String arguments -================= - -F2PY generated wrapper functions accept (almost) any Python object as -a string argument, ``str`` is applied for non-string objects. -Exceptions are Numeric arrays that must have type code ``'c'`` or -``'1'`` when used as string arguments. - -A string can have arbitrary length when using it as a string argument -to F2PY generated wrapper function. If the length is greater than -expected, the string is truncated. If the length is smaller that -expected, additional memory is allocated and filled with ``\0``. - -Because Python strings are immutable, an ``intent(inout)`` argument -expects an array version of a string in order to *in situ* changes to -be effective. - -.. topic:: Example - - Consider the following `Fortran 77 code`__: - - .. include:: string.f - :literal: - - and wrap it using ``f2py -c -m mystring string.f``. - - __ string.f - - Python session: - - .. include:: string_session.dat - :literal: - - -Array arguments -================ - -In general, array arguments of F2PY generated wrapper functions accept -arbitrary sequences that can be transformed to Numeric array objects. -An exception is ``intent(inout)`` array arguments that always must be -proper-contiguous and have proper type, otherwise an exception is -raised. Another exception is ``intent(inplace)`` array arguments that -attributes will be changed in-situ if the argument has different type -than expected (see ``intent(inplace)`` attribute for more -information). - -In general, if a Numeric array is proper-contiguous and has a proper -type then it is directly passed to wrapped Fortran/C function. -Otherwise, an element-wise copy of an input array is made and the -copy, being proper-contiguous and with proper type, is used as an -array argument. - -There are two types of proper-contiguous Numeric arrays: - -* Fortran-contiguous arrays when data is stored column-wise, - i.e. indexing of data as stored in memory starts from the lowest - dimension; -* C-contiguous or simply contiguous arrays when data is stored - row-wise, i.e. indexing of data as stored in memory starts from the - highest dimension. - -For one-dimensional arrays these notions coincide. - -For example, an 2x2 array ``A`` is Fortran-contiguous if its elements -are stored in memory in the following order:: - - A[0,0] A[1,0] A[0,1] A[1,1] - -and C-contiguous if the order is as follows:: - - A[0,0] A[0,1] A[1,0] A[1,1] - -To test whether an array is C-contiguous, use ``.iscontiguous()`` -method of Numeric arrays. To test for Fortran-contiguousness, all -F2PY generated extension modules provide a function -``has_column_major_storage()``. This function is equivalent to -``Numeric.transpose().iscontiguous()`` but more efficient. - -Usually there is no need to worry about how the arrays are stored in -memory and whether the wrapped functions, being either Fortran or C -functions, assume one or another storage order. F2PY automatically -ensures that wrapped functions get arguments with proper storage -order; the corresponding algorithm is designed to make copies of -arrays only when absolutely necessary. However, when dealing with very -large multi-dimensional input arrays with sizes close to the size of -the physical memory in your computer, then a care must be taken to use -always proper-contiguous and proper type arguments. - -To transform input arrays to column major storage order before passing -them to Fortran routines, use a function -``as_column_major_storage()`` that is provided by all F2PY -generated extension modules. - -.. topic:: Example - - Consider `Fortran 77 code`__: - - .. include:: array.f - :literal: - - and wrap it using ``f2py -c -m arr array.f -DF2PY_REPORT_ON_ARRAY_COPY=1``. - - __ array.f - - In Python: - - .. include:: array_session.dat - :literal: - -Call-back arguments -==================== - -F2PY supports calling Python functions from Fortran or C codes. - - -.. topic:: Example - - Consider the following `Fortran 77 code`__ - - .. include:: callback.f - :literal: - - and wrap it using ``f2py -c -m callback callback.f``. - - __ callback.f - - In Python: - - .. include:: callback_session.dat - :literal: - -In the above example F2PY was able to guess accurately the signature -of a call-back function. However, sometimes F2PY cannot establish the -signature as one would wish and then the signature of a call-back -function must be modified in the signature file manually. Namely, -signature files may contain special modules (the names of such modules -contain a substring ``__user__``) that collect various signatures of -call-back functions. Callback arguments in routine signatures have -attribute ``external`` (see also ``intent(callback)`` attribute). To -relate a callback argument and its signature in ``__user__`` module -block, use ``use`` statement as illustrated below. The same signature -of a callback argument can be referred in different routine -signatures. - -.. topic:: Example - - We use the same `Fortran 77 code`__ as in previous example but now - we'll pretend that F2PY was not able to guess the signatures of - call-back arguments correctly. First, we create an initial signature - file ``callback2.pyf`` using F2PY:: - - f2py -m callback2 -h callback2.pyf callback.f - - Then modify it as follows - - .. include:: callback2.pyf - :literal: - - Finally, build the extension module using:: - - f2py -c callback2.pyf callback.f - - An example Python session would be identical to the previous example - except that argument names would differ. - - __ callback.f - -Sometimes a Fortran package may require that users provide routines -that the package will use. F2PY can construct an interface to such -routines so that Python functions could be called from Fortran. - -.. topic:: Example - - Consider the following `Fortran 77 subroutine`__ that takes an array - and applies a function ``func`` to its elements. - - .. include:: calculate.f - :literal: - - __ calculate.f - - It is expected that function ``func`` has been defined - externally. In order to use a Python function as ``func``, it must - have an attribute ``intent(callback)`` (it must be specified before - the ``external`` statement). - - Finally, build an extension module using:: - - f2py -c -m foo calculate.f - - In Python: - - .. include:: calculate_session.dat - :literal: - -The function is included as an argument to the python function call to -the FORTRAN subroutine eventhough it was NOT in the FORTRAN subroutine argument -list. The "external" refers to the C function generated by f2py, not the python -function itself. The python function must be supplied to the C function. - -The callback function may also be explicitly set in the module. -Then it is not necessary to pass the function in the argument list to -the FORTRAN function. This may be desired if the FORTRAN function calling -the python callback function is itself called by another FORTRAN function. - -.. topic:: Example - - Consider the following `Fortran 77 subroutine`__. - - .. include:: extcallback.f - :literal: - - __ extcallback.f - - and wrap it using ``f2py -c -m pfromf extcallback.f``. - - In Python: - - .. include:: extcallback_session.dat - :literal: - -Resolving arguments to call-back functions ------------------------------------------- - -F2PY generated interface is very flexible with respect to call-back -arguments. For each call-back argument an additional optional -argument ``_extra_args`` is introduced by F2PY. This argument -can be used to pass extra arguments to user provided call-back -arguments. - -If a F2PY generated wrapper function expects the following call-back -argument:: - - def fun(a_1,...,a_n): - ... - return x_1,...,x_k - -but the following Python function - -:: - - def gun(b_1,...,b_m): - ... - return y_1,...,y_l - -is provided by an user, and in addition, - -:: - - fun_extra_args = (e_1,...,e_p) - -is used, then the following rules are applied when a Fortran or C -function calls the call-back argument ``gun``: - -* If ``p==0`` then ``gun(a_1,...,a_q)`` is called, here - ``q=min(m,n)``. -* If ``n+p<=m`` then ``gun(a_1,...,a_n,e_1,...,e_p)`` is called. -* If ``p<=mm`` then ``gun(e_1,...,e_m)`` is called. -* If ``n+p`` is less than the number of required arguments to ``gun`` - then an exception is raised. - -The function ``gun`` may return any number of objects as a tuple. Then -following rules are applied: - -* If ``kl``, then only ``x_1,...,x_l`` are set. - - - -Common blocks -============== - -F2PY generates wrappers to ``common`` blocks defined in a routine -signature block. Common blocks are visible by all Fortran codes linked -with the current extension module, but not to other extension modules -(this restriction is due to how Python imports shared libraries). In -Python, the F2PY wrappers to ``common`` blocks are ``fortran`` type -objects that have (dynamic) attributes related to data members of -common blocks. When accessed, these attributes return as Numeric array -objects (multi-dimensional arrays are Fortran-contiguous) that -directly link to data members in common blocks. Data members can be -changed by direct assignment or by in-place changes to the -corresponding array objects. - -.. topic:: Example - - Consider the following `Fortran 77 code`__ - - .. include:: common.f - :literal: - - and wrap it using ``f2py -c -m common common.f``. - - __ common.f - - In Python: - - .. include:: common_session.dat - :literal: - -Fortran 90 module data -======================= - -The F2PY interface to Fortran 90 module data is similar to Fortran 77 -common blocks. - -.. topic:: Example - - Consider the following `Fortran 90 code`__ - - .. include:: moddata.f90 - :literal: - - and wrap it using ``f2py -c -m moddata moddata.f90``. - - __ moddata.f90 - - In Python: - - .. include:: moddata_session.dat - :literal: - -Allocatable arrays -------------------- - -F2PY has basic support for Fortran 90 module allocatable arrays. - -.. topic:: Example - - Consider the following `Fortran 90 code`__ - - .. include:: allocarr.f90 - :literal: - - and wrap it using ``f2py -c -m allocarr allocarr.f90``. - - __ allocarr.f90 - - In Python: - - .. include:: allocarr_session.dat - :literal: - - -=========== -Using F2PY -=========== - -F2PY can be used either as a command line tool ``f2py`` or as a Python -module ``f2py2e``. - -Command ``f2py`` -================= - -When used as a command line tool, ``f2py`` has three major modes, -distinguished by the usage of ``-c`` and ``-h`` switches: - -1. To scan Fortran sources and generate a signature file, use - - :: - - f2py -h \ - [[ only: : ] \ - [ skip: : ]]... \ - [ ...] - - Note that a Fortran source file can contain many routines, and not - necessarily all routines are needed to be used from Python. So, you - can either specify which routines should be wrapped (in ``only: .. :`` - part) or which routines F2PY should ignored (in ``skip: .. :`` part). - - If ```` is specified as ``stdout`` then signatures - are send to standard output instead of a file. - - Among other options (see below), the following options can be used - in this mode: - - ``--overwrite-signature`` - Overwrite existing signature file. - -2. To construct an extension module, use - - :: - - f2py \ - [[ only: : ] \ - [ skip: : ]]... \ - [ ...] - - The constructed extension module is saved as - ``module.c`` to the current directory. - - Here ```` may also contain signature files. - Among other options (see below), the following options can be used - in this mode: - - ``--debug-capi`` - Add debugging hooks to the extension module. When using this - extension module, various information about the wrapper is printed - to standard output, for example, the values of variables, the - steps taken, etc. - - ``-include''`` - Add a CPP ``#include`` statement to the extension module source. - ```` should be given in one of the following forms:: - - "filename.ext" - - - The include statement is inserted just before the wrapper - functions. This feature enables using arbitrary C functions - (defined in ````) in F2PY generated wrappers. - - This option is deprecated. Use ``usercode`` statement to specify - C codelets directly in signature filess - - - ``--[no-]wrap-functions`` - - Create Fortran subroutine wrappers to Fortran functions. - ``--wrap-functions`` is default because it ensures maximum - portability and compiler independence. - - ``--include-paths ::..`` - Search include files from given directories. - - ``--help-link []`` - List system resources found by ``numpy_distutils/system_info.py``. - For example, try ``f2py --help-link lapack_opt``. - -3. To build an extension module, use - - :: - - f2py -c \ - [[ only: : ] \ - [ skip: : ]]... \ - [ ] [ <.o, .a, .so files> ] - - If ```` contains a signature file, then a source for - an extension module is constructed, all Fortran and C sources are - compiled, and finally all object and library files are linked to the - extension module ``.so`` which is saved into the current - directory. - - If ```` does not contain a signature file, then an - extension module is constructed by scanning all Fortran source codes - for routine signatures. - - Among other options (see below) and options described in previous - mode, the following options can be used in this mode: - - ``--help-fcompiler`` - List available Fortran compilers. - ``--help-compiler`` [depreciated] - List available Fortran compilers. - ``--fcompiler=`` - Specify Fortran compiler type by vendor. - ``--f77exec=`` - Specify the path to F77 compiler - ``--fcompiler-exec=`` [depreciated] - Specify the path to F77 compiler - ``--f90exec=`` - Specify the path to F90 compiler - ``--f90compiler-exec=`` [depreciated] - Specify the path to F90 compiler - - ``--f77flags=`` - Specify F77 compiler flags - ``--f90flags=`` - Specify F90 compiler flags - ``--opt=`` - Specify optimization flags - ``--arch=`` - Specify architecture specific optimization flags - ``--noopt`` - Compile without optimization - ``--noarch`` - Compile without arch-dependent optimization - ``--debug`` - Compile with debugging information - - ``-l`` - Use the library ```` when linking. - ``-D[=]`` - Define macro ```` as ````. - ``-U`` - Define macro ```` - ``-I`` - Append directory ```` to the list of directories searched for - include files. - ``-L`` - Add directory ```` to the list of directories to be searched - for ``-l``. - - ``link-`` - - Link extension module with as defined by - ``numpy_distutils/system_info.py``. E.g. to link with optimized - LAPACK libraries (vecLib on MacOSX, ATLAS elsewhere), use - ``--link-lapack_opt``. See also ``--help-link`` switch. - - When building an extension module, a combination of the following - macros may be required for non-gcc Fortran compilers:: - - -DPREPEND_FORTRAN - -DNO_APPEND_FORTRAN - -DUPPERCASE_FORTRAN - - To test the performance of F2PY generated interfaces, use - ``-DF2PY_REPORT_ATEXIT``. Then a report of various timings is - printed out at the exit of Python. This feature may not work on - all platforms, currently only Linux platform is supported. - - To see whether F2PY generated interface performs copies of array - arguments, use ``-DF2PY_REPORT_ON_ARRAY_COPY=``. When the size - of an array argument is larger than ````, a message about - the coping is sent to ``stderr``. - -Other options: - -``-m `` - Name of an extension module. Default is ``untitled``. Don't use this option - if a signature file (*.pyf) is used. -``--[no-]lower`` - Do [not] lower the cases in ````. By default, - ``--lower`` is assumed with ``-h`` switch, and ``--no-lower`` - without the ``-h`` switch. -``--build-dir `` - All F2PY generated files are created in ````. Default is - ``tempfile.mktemp()``. -``--quiet`` - Run quietly. -``--verbose`` - Run with extra verbosity. -``-v`` - Print f2py version ID and exit. - -Execute ``f2py`` without any options to get an up-to-date list of -available options. - -Python module ``f2py2e`` -========================= - -.. topic:: Warning - - The current Python interface to ``f2py2e`` module is not mature and - may change in future depending on users needs. - -The following functions are provided by the ``f2py2e`` module: - -``run_main()`` - Equivalent to running:: - - f2py - - where ``=string.join(,' ')``, but in Python. Unless - ``-h`` is used, this function returns a dictionary containing - information on generated modules and their dependencies on source - files. For example, the command ``f2py -m scalar scalar.f`` can be - executed from Python as follows - - .. include:: run_main_session.dat - :literal: - - You cannot build extension modules with this function, that is, - using ``-c`` is not allowed. Use ``compile`` command instead, see - below. - -``compile(source, modulename='untitled', extra_args='', verbose=1, source_fn=None)`` - - Build extension module from Fortran 77 source string ``source``. - Return 0 if successful. - Note that this function actually calls ``f2py -c ..`` from shell to - ensure safety of the current Python process. - For example, - - .. include:: compile_session.dat - :literal: - -========================== -Using ``numpy_distutils`` -========================== - -``numpy_distutils`` is part of the SciPy_ project and aims to extend -standard Python ``distutils`` to deal with Fortran sources and F2PY -signature files, e.g. compile Fortran sources, call F2PY to construct -extension modules, etc. - -.. topic:: Example - - Consider the following `setup file`__: - - .. include:: setup_example.py - :literal: - - Running - - :: - - python setup_example.py build - - will build two extension modules ``scalar`` and ``fib2`` to the - build directory. - - __ setup_example.py - -``numpy_distutils`` extends ``distutils`` with the following features: - -* ``Extension`` class argument ``sources`` may contain Fortran source - files. In addition, the list ``sources`` may contain at most one - F2PY signature file, and then the name of an Extension module must - match with the ```` used in signature file. It is - assumed that an F2PY signature file contains exactly one ``python - module`` block. - - If ``sources`` does not contain a signature files, then F2PY is used - to scan Fortran source files for routine signatures to construct the - wrappers to Fortran codes. - - Additional options to F2PY process can be given using ``Extension`` - class argument ``f2py_options``. - -``numpy_distutils`` 0.2.2 and up -================================ - -* The following new ``distutils`` commands are defined: - - ``build_src`` - to construct Fortran wrapper extension modules, among many other things. - ``config_fc`` - to change Fortran compiler options - - as well as ``build_ext`` and ``build_clib`` commands are enhanced - to support Fortran sources. - - Run - - :: - - python config_fc build_src build_ext --help - - to see available options for these commands. - -* When building Python packages containing Fortran sources, then one - can choose different Fortran compilers by using ``build_ext`` - command option ``--fcompiler=``. Here ```` can be one of the - following names:: - - absoft sun mips intel intelv intele intelev nag compaq compaqv gnu vast pg hpux - - See ``numpy_distutils/fcompiler.py`` for up-to-date list of - supported compilers or run - - :: - - f2py -c --help-fcompiler - -``numpy_distutils`` pre 0.2.2 -============================= - -* The following new ``distutils`` commands are defined: - - ``build_flib`` - to build f77/f90 libraries used by Python extensions; - ``run_f2py`` - to construct Fortran wrapper extension modules. - - Run - - :: - - python build_flib run_f2py --help - - to see available options for these commands. - -* When building Python packages containing Fortran sources, then one - can choose different Fortran compilers either by using ``build_flib`` - command option ``--fcompiler=`` or by defining environment - variable ``FC_VENDOR=``. Here ```` can be one of the - following names:: - - Absoft Sun SGI Intel Itanium NAG Compaq Digital Gnu VAST PG - - See ``numpy_distutils/command/build_flib.py`` for up-to-date list of - supported compilers. - -====================== - Extended F2PY usages -====================== - -Adding self-written functions to F2PY generated modules -======================================================= - -Self-written Python C/API functions can be defined inside -signature files using ``usercode`` and ``pymethoddef`` statements -(they must be used inside the ``python module`` block). For -example, the following signature file ``spam.pyf`` - -.. include:: spam.pyf - :literal: - -wraps the C library function ``system()``:: - - f2py -c spam.pyf - -In Python: - -.. include:: spam_session.dat - :literal: - -Modifying the dictionary of a F2PY generated module -=================================================== - -The following example illustrates how to add an user-defined -variables to a F2PY generated extension module. Given the following -signature file - -.. include:: var.pyf - :literal: - -compile it as ``f2py -c var.pyf``. - -Notice that the second ``usercode`` statement must be defined inside -an ``interface`` block and where the module dictionary is available through -the variable ``d`` (see ``f2py var.pyf``-generated ``varmodule.c`` for -additional details). - -In Python: - -.. include:: var_session.dat - :literal: - -.. References - ========== -.. _F2PY: http://cens.ioc.ee/projects/f2py2e/ -.. _Python: http://www.python.org/ -.. _NumPy: http://www.numpy.org/ -.. _SciPy: http://www.numpy.org/ diff --git a/numpy/f2py/docs/usersguide/moddata.f90 b/numpy/f2py/docs/usersguide/moddata.f90 deleted file mode 100644 index 0e98f0467..000000000 --- a/numpy/f2py/docs/usersguide/moddata.f90 +++ /dev/null @@ -1,18 +0,0 @@ -module mod - integer i - integer :: x(4) - real, dimension(2,3) :: a - real, allocatable, dimension(:,:) :: b -contains - subroutine foo - integer k - print*, "i=",i - print*, "x=[",x,"]" - print*, "a=[" - print*, "[",a(1,1),",",a(1,2),",",a(1,3),"]" - print*, "[",a(2,1),",",a(2,2),",",a(2,3),"]" - print*, "]" - print*, "Setting a(1,2)=a(1,2)+3" - a(1,2) = a(1,2)+3 - end subroutine foo -end module mod diff --git a/numpy/f2py/docs/usersguide/moddata_session.dat b/numpy/f2py/docs/usersguide/moddata_session.dat deleted file mode 100644 index 1ec212f8b..000000000 --- a/numpy/f2py/docs/usersguide/moddata_session.dat +++ /dev/null @@ -1,23 +0,0 @@ ->>> import moddata ->>> print moddata.mod.__doc__ -i - 'i'-scalar -x - 'i'-array(4) -a - 'f'-array(2,3) -foo - Function signature: - foo() - - ->>> moddata.mod.i = 5 ->>> moddata.mod.x[:2] = [1,2] ->>> moddata.mod.a = [[1,2,3],[4,5,6]] ->>> moddata.mod.foo() - i= 5 - x=[ 1 2 0 0 ] - a=[ - [ 1.000000 , 2.000000 , 3.000000 ] - [ 4.000000 , 5.000000 , 6.000000 ] - ] - Setting a(1,2)=a(1,2)+3 ->>> moddata.mod.a # a is Fortran-contiguous -array([[ 1., 5., 3.], - [ 4., 5., 6.]],'f') diff --git a/numpy/f2py/docs/usersguide/run_main_session.dat b/numpy/f2py/docs/usersguide/run_main_session.dat deleted file mode 100644 index 29ecc3dfe..000000000 --- a/numpy/f2py/docs/usersguide/run_main_session.dat +++ /dev/null @@ -1,14 +0,0 @@ ->>> import f2py2e ->>> r=f2py2e.run_main(['-m','scalar','docs/usersguide/scalar.f']) -Reading fortran codes... - Reading file 'docs/usersguide/scalar.f' -Post-processing... - Block: scalar - Block: FOO -Building modules... - Building module "scalar"... - Wrote C/API module "scalar" to file "./scalarmodule.c" ->>> print r -{'scalar': {'h': ['/home/users/pearu/src_cvs/f2py2e/src/fortranobject.h'], - 'csrc': ['./scalarmodule.c', - '/home/users/pearu/src_cvs/f2py2e/src/fortranobject.c']}} diff --git a/numpy/f2py/docs/usersguide/scalar.f b/numpy/f2py/docs/usersguide/scalar.f deleted file mode 100644 index c22f639ed..000000000 --- a/numpy/f2py/docs/usersguide/scalar.f +++ /dev/null @@ -1,12 +0,0 @@ -C FILE: SCALAR.F - SUBROUTINE FOO(A,B) - REAL*8 A, B -Cf2py intent(in) a -Cf2py intent(inout) b - PRINT*, " A=",A," B=",B - PRINT*, "INCREMENT A AND B" - A = A + 1D0 - B = B + 1D0 - PRINT*, "NEW A=",A," B=",B - END -C END OF FILE SCALAR.F diff --git a/numpy/f2py/docs/usersguide/scalar_session.dat b/numpy/f2py/docs/usersguide/scalar_session.dat deleted file mode 100644 index 4fe8c03b1..000000000 --- a/numpy/f2py/docs/usersguide/scalar_session.dat +++ /dev/null @@ -1,21 +0,0 @@ ->>> import scalar ->>> print scalar.foo.__doc__ -foo - Function signature: - foo(a,b) -Required arguments: - a : input float - b : in/output rank-0 array(float,'d') - ->>> scalar.foo(2,3) - A= 2. B= 3. - INCREMENT A AND B - NEW A= 3. B= 4. ->>> import Numeric ->>> a=Numeric.array(2) # these are integer rank-0 arrays ->>> b=Numeric.array(3) ->>> scalar.foo(a,b) - A= 2. B= 3. - INCREMENT A AND B - NEW A= 3. B= 4. ->>> print a,b # note that only b is changed in situ -2 4 \ No newline at end of file diff --git a/numpy/f2py/docs/usersguide/setup_example.py b/numpy/f2py/docs/usersguide/setup_example.py deleted file mode 100644 index e5f5e8441..000000000 --- a/numpy/f2py/docs/usersguide/setup_example.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# File: setup_example.py - -from numpy_distutils.core import Extension - -ext1 = Extension(name = 'scalar', - sources = ['scalar.f']) -ext2 = Extension(name = 'fib2', - sources = ['fib2.pyf','fib1.f']) - -if __name__ == "__main__": - from numpy_distutils.core import setup - setup(name = 'f2py_example', - description = "F2PY Users Guide examples", - author = "Pearu Peterson", - author_email = "pearu@cens.ioc.ee", - ext_modules = [ext1,ext2] - ) -# End of setup_example.py diff --git a/numpy/f2py/docs/usersguide/spam.pyf b/numpy/f2py/docs/usersguide/spam.pyf deleted file mode 100644 index 21ea18b77..000000000 --- a/numpy/f2py/docs/usersguide/spam.pyf +++ /dev/null @@ -1,19 +0,0 @@ -! -*- f90 -*- -python module spam - usercode ''' - static char doc_spam_system[] = "Execute a shell command."; - static PyObject *spam_system(PyObject *self, PyObject *args) - { - char *command; - int sts; - - if (!PyArg_ParseTuple(args, "s", &command)) - return NULL; - sts = system(command); - return Py_BuildValue("i", sts); - } - ''' - pymethoddef ''' - {"system", spam_system, METH_VARARGS, doc_spam_system}, - ''' -end python module spam diff --git a/numpy/f2py/docs/usersguide/spam_session.dat b/numpy/f2py/docs/usersguide/spam_session.dat deleted file mode 100644 index 7f99d13f9..000000000 --- a/numpy/f2py/docs/usersguide/spam_session.dat +++ /dev/null @@ -1,5 +0,0 @@ ->>> import spam ->>> status = spam.system('whoami') -pearu ->> status = spam.system('blah') -sh: line 1: blah: command not found \ No newline at end of file diff --git a/numpy/f2py/docs/usersguide/string.f b/numpy/f2py/docs/usersguide/string.f deleted file mode 100644 index 9246f02e7..000000000 --- a/numpy/f2py/docs/usersguide/string.f +++ /dev/null @@ -1,21 +0,0 @@ -C FILE: STRING.F - SUBROUTINE FOO(A,B,C,D) - CHARACTER*5 A, B - CHARACTER*(*) C,D -Cf2py intent(in) a,c -Cf2py intent(inout) b,d - PRINT*, "A=",A - PRINT*, "B=",B - PRINT*, "C=",C - PRINT*, "D=",D - PRINT*, "CHANGE A,B,C,D" - A(1:1) = 'A' - B(1:1) = 'B' - C(1:1) = 'C' - D(1:1) = 'D' - PRINT*, "A=",A - PRINT*, "B=",B - PRINT*, "C=",C - PRINT*, "D=",D - END -C END OF FILE STRING.F diff --git a/numpy/f2py/docs/usersguide/string_session.dat b/numpy/f2py/docs/usersguide/string_session.dat deleted file mode 100644 index 64ebcb3f4..000000000 --- a/numpy/f2py/docs/usersguide/string_session.dat +++ /dev/null @@ -1,27 +0,0 @@ ->>> import mystring ->>> print mystring.foo.__doc__ -foo - Function signature: - foo(a,b,c,d) -Required arguments: - a : input string(len=5) - b : in/output rank-0 array(string(len=5),'c') - c : input string(len=-1) - d : in/output rank-0 array(string(len=-1),'c') - ->>> import Numeric ->>> a=Numeric.array('123') ->>> b=Numeric.array('123') ->>> c=Numeric.array('123') ->>> d=Numeric.array('123') ->>> mystring.foo(a,b,c,d) - A=123 - B=123 - C=123 - D=123 - CHANGE A,B,C,D - A=A23 - B=B23 - C=C23 - D=D23 ->>> a.tostring(),b.tostring(),c.tostring(),d.tostring() -('123', 'B23', '123', 'D23') \ No newline at end of file diff --git a/numpy/f2py/docs/usersguide/var.pyf b/numpy/f2py/docs/usersguide/var.pyf deleted file mode 100644 index 8275ff3af..000000000 --- a/numpy/f2py/docs/usersguide/var.pyf +++ /dev/null @@ -1,11 +0,0 @@ -! -*- f90 -*- -python module var - usercode ''' - int BAR = 5; - ''' - interface - usercode ''' - PyDict_SetItemString(d,"BAR",PyInt_FromLong(BAR)); - ''' - end interface -end python module diff --git a/numpy/f2py/docs/usersguide/var_session.dat b/numpy/f2py/docs/usersguide/var_session.dat deleted file mode 100644 index fb0f798bf..000000000 --- a/numpy/f2py/docs/usersguide/var_session.dat +++ /dev/null @@ -1,3 +0,0 @@ ->>> import var ->>> var.BAR -5 \ No newline at end of file diff --git a/numpy/f2py/f2py.1 b/numpy/f2py/f2py.1 deleted file mode 100644 index b8769a0cc..000000000 --- a/numpy/f2py/f2py.1 +++ /dev/null @@ -1,209 +0,0 @@ -.TH "F2PY" 1 -.SH NAME -f2py \- Fortran to Python interface generator -.SH SYNOPSIS -(1) To construct extension module sources: - -.B f2py -[] [[[only:]||[skip:]] ] [: ...] - -(2) To compile fortran files and build extension modules: - -.B f2py --c [, , ] - -(3) To generate signature files: - -.B f2py --h ...< same options as in (1) > -.SH DESCRIPTION -This program generates a Python C/API file (module.c) -that contains wrappers for given Fortran or C functions so that they -can be called from Python. -With the -c option the corresponding -extension modules are built. -.SH OPTIONS -.TP -.B \-h -Write signatures of the fortran routines to file and -exit. You can then edit and use it instead of . If ==stdout then the signatures are printed to -stdout. -.TP -.B -Names of fortran routines for which Python C/API functions will be -generated. Default is all that are found in . -.TP -.B skip: -Ignore fortran functions that follow until `:'. -.TP -.B only: -Use only fortran functions that follow until `:'. -.TP -.B : -Get back to mode. -.TP -.B \-m -Name of the module; f2py generates a Python/C API file -module.c or extension module . Default is -\'untitled\'. -.TP -.B \-\-[no\-]lower -Do [not] lower the cases in . By default, --lower is -assumed with -h key, and --no-lower without -h key. -.TP -.B \-\-build\-dir -All f2py generated files are created in . Default is tempfile.mktemp(). -.TP -.B \-\-overwrite\-signature -Overwrite existing signature file. -.TP -.B \-\-[no\-]latex\-doc -Create (or not) module.tex. Default is --no-latex-doc. -.TP -.B \-\-short\-latex -Create 'incomplete' LaTeX document (without commands \\documentclass, -\\tableofcontents, and \\begin{document}, \\end{document}). -.TP -.B \-\-[no\-]rest\-doc -Create (or not) module.rst. Default is --no-rest-doc. -.TP -.B \-\-debug\-capi -Create C/API code that reports the state of the wrappers during -runtime. Useful for debugging. -.TP -.B \-include\'\' -Add CPP #include statement to the C/API code. should be -in the format of either `"filename.ext"' or `'. As a -result will be included just before wrapper functions -part in the C/API code. The option is depreciated, use `usercode` -statement in signature files instead. -.TP -.B \-\-[no\-]wrap\-functions -Create Fortran subroutine wrappers to Fortran 77 -functions. --wrap-functions is default because it ensures maximum -portability/compiler independence. -.TP -.B \-\-help\-link [..] -List system resources found by system_info.py. [..] may contain -a list of resources names. See also --link- switch below. -.TP -.B \-\-quiet -Run quietly. -.TP -.B \-\-verbose -Run with extra verbosity. -.TP -.B \-v -Print f2py version ID and exit. -.TP -.B \-\-include_paths path1:path2:... -Search include files (that f2py will scan) from the given directories. -.SH "CONFIG_FC OPTIONS" -The following options are effective only when -c switch is used. -.TP -.B \-\-help-compiler -List available Fortran compilers [DEPRECIATED]. -.TP -.B \-\-fcompiler= -Specify Fortran compiler type by vendor. -.TP -.B \-\-compiler= -Specify C compiler type (as defined by distutils) -.TP -.B \-\-fcompiler-exec= -Specify the path to F77 compiler [DEPRECIATED]. -.TP -.B \-\-f90compiler\-exec= -Specify the path to F90 compiler [DEPRECIATED]. -.TP -.B \-\-help\-fcompiler -List available Fortran compilers and exit. -.TP -.B \-\-f77exec= -Specify the path to F77 compiler. -.TP -.B \-\-f90exec= -Specify the path to F90 compiler. -.TP -.B \-\-f77flags="..." -Specify F77 compiler flags. -.TP -.B \-\-f90flags="..." -Specify F90 compiler flags. -.TP -.B \-\-opt="..." -Specify optimization flags. -.TP -.B \-\-arch="..." -Specify architecture specific optimization flags. -.TP -.B \-\-noopt -Compile without optimization. -.TP -.B \-\-noarch -Compile without arch-dependent optimization. -.TP -.B \-\-debug -Compile with debugging information. -.SH "EXTRA OPTIONS" -The following options are effective only when -c switch is used. -.TP -.B \-\-link- -Link extension module with as defined by -numpy_distutils/system_info.py. E.g. to link with optimized LAPACK -libraries (vecLib on MacOSX, ATLAS elsewhere), use ---link-lapack_opt. See also --help-link switch. - -.TP -.B -L/path/to/lib/ -l -.TP -.B -D -U -I/path/to/include/ -.TP -.B .o .so .a - -.TP -.B -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN -DUNDERSCORE_G77 -Macros that might be required with non-gcc Fortran compilers. - -.TP -.B -DF2PY_REPORT_ATEXIT -To print out a performance report of F2PY interface when python -exits. Available for Linux. - -.TP -.B -DF2PY_REPORT_ON_ARRAY_COPY= -To send a message to stderr whenever F2PY interface makes a copy of an -array. Integer sets the threshold for array sizes when a message -should be shown. - -.SH REQUIREMENTS -Python 1.5.2 or higher (2.x is supported). - -Numerical Python 13 or higher (20.x,21.x,22.x,23.x are supported). - -Optional Numarray 0.9 or higher partially supported. - -numpy_distutils from Scipy (can be downloaded from F2PY homepage) -.SH "SEE ALSO" -python(1) -.SH BUGS -For instructions on reporting bugs, see - - http://cens.ioc.ee/projects/f2py2e/FAQ.html -.SH AUTHOR -Pearu Peterson -.SH "INTERNET RESOURCES" -Main website: http://cens.ioc.ee/projects/f2py2e/ - -User's Guide: http://cens.ioc.ee/projects/f2py2e/usersguide/ - -Mailing list: http://cens.ioc.ee/mailman/listinfo/f2py-users/ - -Scipy website: http://www.numpy.org -.SH COPYRIGHT -Copyright (c) 1999, 2000, 2001, 2002, 2003, 2004, 2005 Pearu Peterson -.SH LICENSE -NumPy License -.SH VERSION -2.45.241 diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py deleted file mode 100755 index 657e6ae32..000000000 --- a/numpy/f2py/f2py2e.py +++ /dev/null @@ -1,568 +0,0 @@ -#!/usr/bin/env python -""" - -f2py2e - Fortran to Python C/API generator. 2nd Edition. - See __usage__ below. - -Copyright 1999--2005 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 08:31:19 $ -Pearu Peterson -""" -__version__ = "$Revision: 1.90 $"[10:-1] - -import __version__ -f2py_version = __version__.version - -import sys -import os -import pprint -import shutil -import types -import re -errmess=sys.stderr.write -#outmess=sys.stdout.write -show=pprint.pprint - -import crackfortran -import rules -import cb_rules -import common_rules -import auxfuncs -import cfuncs -import capi_maps -import func2subr -import f90mod_rules - -outmess = auxfuncs.outmess - -try: - from numpy import __version__ as numpy_version -except ImportError: - numpy_version = 'N/A' - -__usage__ = """\ -Usage: - -1) To construct extension module sources: - - f2py [] [[[only:]||[skip:]] \\ - ] \\ - [: ...] - -2) To compile fortran files and build extension modules: - - f2py -c [, , ] - -3) To generate signature files: - - f2py -h ...< same options as in (1) > - -Description: This program generates a Python C/API file (module.c) - that contains wrappers for given fortran functions so that they - can be called from Python. With the -c option the corresponding - extension modules are built. - -Options: - - --g3-numpy Use numpy.f2py.lib tool, the 3rd generation of F2PY, - with NumPy support. - --2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT] - --2d-numeric Use f2py2e tool with Numeric support. - --2d-numarray Use f2py2e tool with Numarray support. - - -h Write signatures of the fortran routines to file - and exit. You can then edit and use it instead - of . If ==stdout then the - signatures are printed to stdout. - Names of fortran routines for which Python C/API - functions will be generated. Default is all that are found - in . - Paths to fortran/signature files that will be scanned for - in order to determine their signatures. - skip: Ignore fortran functions that follow until `:'. - only: Use only fortran functions that follow until `:'. - : Get back to mode. - - -m Name of the module; f2py generates a Python/C API - file module.c or extension module . - Default is 'untitled'. - - --[no-]lower Do [not] lower the cases in . By default, - --lower is assumed with -h key, and --no-lower without -h key. - - --build-dir All f2py generated files are created in . - Default is tempfile.mktemp(). - - --overwrite-signature Overwrite existing signature file. - - --[no-]latex-doc Create (or not) module.tex. - Default is --no-latex-doc. - --short-latex Create 'incomplete' LaTeX document (without commands - \\documentclass, \\tableofcontents, and \\begin{document}, - \\end{document}). - - --[no-]rest-doc Create (or not) module.rst. - Default is --no-rest-doc. - - --debug-capi Create C/API code that reports the state of the wrappers - during runtime. Useful for debugging. - - --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 - functions. --wrap-functions is default because it ensures - maximum portability/compiler independence. - - --include_paths ::... Search include files from the given - directories. - - --help-link [..] List system resources found by system_info.py. See also - --link- switch below. [..] is optional list - of resources names. E.g. try 'f2py --help-link lapack_opt'. - - --quiet Run quietly. - --verbose Run with extra verbosity. - -v Print f2py version ID and exit. - - -numpy.distutils options (only effective with -c): - - --fcompiler= Specify Fortran compiler type by vendor - --compiler= Specify C compiler type (as defined by distutils) - - --help-fcompiler List available Fortran compilers and exit - --f77exec= Specify the path to F77 compiler - --f90exec= Specify the path to F90 compiler - --f77flags= Specify F77 compiler flags - --f90flags= Specify F90 compiler flags - --opt= Specify optimization flags - --arch= Specify architecture specific optimization flags - --noopt Compile without optimization - --noarch Compile without arch-dependent optimization - --debug Compile with debugging information - -Extra options (only effective with -c): - - --link- Link extension module with as defined - by numpy.distutils/system_info.py. E.g. to link - with optimized LAPACK libraries (vecLib on MacOSX, - ATLAS elsewhere), use --link-lapack_opt. - See also --help-link switch. - - -L/path/to/lib/ -l - -D -U - -I/path/to/include/ - .o .so .a - - Using the following macros may be required with non-gcc Fortran - compilers: - -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN - -DUNDERSCORE_G77 - - When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY - interface is printed out at exit (platforms: Linux). - - When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is - sent to stderr whenever F2PY interface makes a copy of an - array. Integer sets the threshold for array sizes when - a message should be shown. - -Version: %s -numpy Version: %s -Requires: Python 2.3 or higher. -License: NumPy license (see LICENSE.txt in the NumPy source code) -Copyright 1999 - 2005 Pearu Peterson all rights reserved. -http://cens.ioc.ee/projects/f2py2e/"""%(f2py_version, numpy_version) - - -def scaninputline(inputline): - files,funcs,skipfuncs,onlyfuncs,debug=[],[],[],[],[] - f,f2,f3,f4,f5,f6,f7=1,0,0,0,0,0,0 - verbose = 1 - dolc=-1 - dolatexdoc = 0 - dorestdoc = 0 - wrapfuncs = 1 - buildpath = '.' - include_paths = [] - signsfile,modulename=None,None - options = {'buildpath':buildpath} - for l in inputline: - if l=='': pass - elif l=='only:': f=0 - elif l=='skip:': f=-1 - elif l==':': f=1;f4=0 - elif l[:8]=='--debug-': debug.append(l[8:]) - elif l=='--lower': dolc=1 - elif l=='--build-dir': f6=1 - elif l=='--no-lower': dolc=0 - elif l=='--quiet': verbose = 0 - elif l=='--verbose': verbose += 1 - elif l=='--latex-doc': dolatexdoc=1 - elif l=='--no-latex-doc': dolatexdoc=0 - elif l=='--rest-doc': dorestdoc=1 - elif l=='--no-rest-doc': dorestdoc=0 - elif l=='--wrap-functions': wrapfuncs=1 - elif l=='--no-wrap-functions': wrapfuncs=0 - elif l=='--short-latex': options['shortlatex']=1 - elif l=='--overwrite-signature': options['h-overwrite']=1 - elif l=='-h': f2=1 - elif l=='-m': f3=1 - elif l[:2]=='-v': - print f2py_version - sys.exit() - elif l=='--show-compilers': - f5=1 - elif l[:8]=='-include': - cfuncs.outneeds['userincludes'].append(l[9:-1]) - cfuncs.userincludes[l[9:-1]]='#include '+l[8:] - elif l[:15]=='--include_paths': - f7=1 - elif l[0]=='-': - errmess('Unknown option %s\n'%`l`) - sys.exit() - elif f2: f2=0;signsfile=l - elif f3: f3=0;modulename=l - elif f6: f6=0;buildpath=l - elif f7: f7=0;include_paths.extend(l.split(os.pathsep)) - elif f==1: - try: - open(l).close() - files.append(l) - except IOError,detail: - errmess('IOError: %s. Skipping file "%s".\n'%(str(detail),l)) - elif f==-1: skipfuncs.append(l) - elif f==0: onlyfuncs.append(l) - if not f5 and not files and not modulename: - print __usage__ - sys.exit() - if not os.path.isdir(buildpath): - if not verbose: - outmess('Creating build directory %s'%(buildpath)) - os.mkdir(buildpath) - if signsfile: - signsfile = os.path.join(buildpath,signsfile) - if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: - errmess('Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n'%(signsfile)) - sys.exit() - - options['debug']=debug - options['verbose']=verbose - if dolc==-1 and not signsfile: options['do-lower']=0 - else: options['do-lower']=dolc - if modulename: options['module']=modulename - if signsfile: options['signsfile']=signsfile - if onlyfuncs: options['onlyfuncs']=onlyfuncs - if skipfuncs: options['skipfuncs']=skipfuncs - options['dolatexdoc'] = dolatexdoc - options['dorestdoc'] = dorestdoc - options['wrapfuncs'] = wrapfuncs - options['buildpath']=buildpath - options['include_paths']=include_paths - return files,options - -def callcrackfortran(files,options): - rules.options=options - funcs=[] - crackfortran.debug=options['debug'] - crackfortran.verbose=options['verbose'] - if 'module' in options: - crackfortran.f77modulename=options['module'] - if 'skipfuncs' in options: - crackfortran.skipfuncs=options['skipfuncs'] - if 'onlyfuncs' in options: - crackfortran.onlyfuncs=options['onlyfuncs'] - crackfortran.include_paths[:]=options['include_paths'] - crackfortran.dolowercase=options['do-lower'] - postlist=crackfortran.crackfortran(files) - if 'signsfile' in options: - outmess('Saving signatures to file "%s"\n'%(options['signsfile'])) - pyf=crackfortran.crack2fortran(postlist) - if options['signsfile'][-6:]=='stdout': - sys.stdout.write(pyf) - else: - f=open(options['signsfile'],'w') - f.write(pyf) - f.close() - return postlist - -def buildmodules(list): - cfuncs.buildcfuncs() - outmess('Building modules...\n') - modules,mnames,isusedby=[],[],{} - for i in range(len(list)): - if '__user__' in list[i]['name']: - cb_rules.buildcallbacks(list[i]) - else: - if 'use' in list[i]: - for u in list[i]['use'].keys(): - if u not in isusedby: - isusedby[u]=[] - isusedby[u].append(list[i]['name']) - modules.append(list[i]) - mnames.append(list[i]['name']) - ret = {} - for i in range(len(mnames)): - if mnames[i] in isusedby: - outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i],','.join(map(lambda s:'"%s"'%s,isusedby[mnames[i]])))) - else: - um=[] - if 'use' in modules[i]: - for u in modules[i]['use'].keys(): - if u in isusedby and u in mnames: - um.append(modules[mnames.index(u)]) - else: - outmess('\tModule "%s" uses nonexisting "%s" which will be ignored.\n'%(mnames[i],u)) - ret[mnames[i]] = {} - dict_append(ret[mnames[i]],rules.buildmodule(modules[i],um)) - return ret - -def dict_append(d_out,d_in): - for (k,v) in d_in.items(): - if k not in d_out: - d_out[k] = [] - if type(v) is types.ListType: - d_out[k] = d_out[k] + v - else: - d_out[k].append(v) - -def run_main(comline_list): - """Run f2py as if string.join(comline_list,' ') is used as a command line. - In case of using -h flag, return None. - """ - reload(crackfortran) - f2pydir=os.path.dirname(os.path.abspath(cfuncs.__file__)) - fobjhsrc = os.path.join(f2pydir,'src','fortranobject.h') - fobjcsrc = os.path.join(f2pydir,'src','fortranobject.c') - files,options=scaninputline(comline_list) - auxfuncs.options=options - postlist=callcrackfortran(files,options) - isusedby={} - for i in range(len(postlist)): - if 'use' in postlist[i]: - for u in postlist[i]['use'].keys(): - if u not in isusedby: - isusedby[u]=[] - isusedby[u].append(postlist[i]['name']) - for i in range(len(postlist)): - if postlist[i]['block']=='python module' and '__user__' in postlist[i]['name']: - if postlist[i]['name'] in isusedby: - #if not quiet: - outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'],','.join(map(lambda s:'"%s"'%s,isusedby[postlist[i]['name']])))) - if 'signsfile' in options: - if options['verbose']>1: - outmess('Stopping. Edit the signature file and then run f2py on the signature file: ') - outmess('%s %s\n'%(os.path.basename(sys.argv[0]),options['signsfile'])) - return - for i in range(len(postlist)): - if postlist[i]['block']!='python module': - if 'python module' not in options: - errmess('Tip: If your original code is Fortran source then you must use -m option.\n') - raise TypeError,'All blocks must be python module blocks but got %s'%(`postlist[i]['block']`) - auxfuncs.debugoptions=options['debug'] - f90mod_rules.options=options - auxfuncs.wrapfuncs=options['wrapfuncs'] - - ret=buildmodules(postlist) - - for mn in ret.keys(): - dict_append(ret[mn],{'csrc':fobjcsrc,'h':fobjhsrc}) - return ret - -def filter_files(prefix,suffix,files,remove_prefix=None): - """ - Filter files by prefix and suffix. - """ - filtered,rest = [],[] - match = re.compile(prefix+r'.*'+suffix+r'\Z').match - if remove_prefix: - ind = len(prefix) - else: - ind = 0 - for file in [x.strip() for x in files]: - if match(file): filtered.append(file[ind:]) - else: rest.append(file) - return filtered,rest - -def get_prefix(module): - p = os.path.dirname(os.path.dirname(module.__file__)) - return p - -def run_compile(): - """ - Do it all in one call! - """ - import tempfile,os,shutil - - i = sys.argv.index('-c') - del sys.argv[i] - - remove_build_dir = 0 - try: i = sys.argv.index('--build-dir') - except ValueError: i=None - if i is not None: - build_dir = sys.argv[i+1] - del sys.argv[i+1] - del sys.argv[i] - else: - remove_build_dir = 1 - build_dir = os.path.join(tempfile.mktemp()) - - sysinfo_flags = filter(re.compile(r'[-][-]link[-]').match,sys.argv[1:]) - sys.argv = filter(lambda a,flags=sysinfo_flags:a not in flags,sys.argv) - if sysinfo_flags: - sysinfo_flags = [f[7:] for f in sysinfo_flags] - - f2py_flags = filter(re.compile(r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include').match,sys.argv[1:]) - sys.argv = filter(lambda a,flags=f2py_flags:a not in flags,sys.argv) - f2py_flags2 = [] - fl = 0 - for a in sys.argv[1:]: - if a in ['only:','skip:']: - fl = 1 - elif a==':': - fl = 0 - if fl or a==':': - f2py_flags2.append(a) - if f2py_flags2 and f2py_flags2[-1]!=':': - f2py_flags2.append(':') - f2py_flags.extend(f2py_flags2) - - sys.argv = filter(lambda a,flags=f2py_flags2:a not in flags,sys.argv) - - flib_flags = filter(re.compile(r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)').match,sys.argv[1:]) - sys.argv = filter(lambda a,flags=flib_flags:a not in flags,sys.argv) - fc_flags = filter(re.compile(r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))').match,sys.argv[1:]) - sys.argv = filter(lambda a,flags=fc_flags:a not in flags,sys.argv) - - if 1: - del_list = [] - for s in flib_flags: - v = '--fcompiler=' - if s[:len(v)]==v: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = fcompiler.fcompiler_class.keys() - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print 'Unknown vendor: "%s"' % (s[len(v):]) - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv - continue - for s in del_list: - i = flib_flags.index(s) - del flib_flags[i] - assert len(flib_flags)<=2,`flib_flags` - setup_flags = filter(re.compile(r'[-][-](verbose)').match,sys.argv[1:]) - sys.argv = filter(lambda a,flags=setup_flags:a not in flags,sys.argv) - if '--quiet' in f2py_flags: - setup_flags.append('--quiet') - - modulename = 'untitled' - sources = sys.argv[1:] - if '-m' in sys.argv: - i = sys.argv.index('-m') - modulename = sys.argv[i+1] - del sys.argv[i+1],sys.argv[i] - sources = sys.argv[1:] - else: - from numpy.distutils.command.build_src import get_f2py_modulename - pyf_files,sources = filter_files('','[.]pyf([.]src|)',sources) - sources = pyf_files + sources - for f in pyf_files: - modulename = get_f2py_modulename(f) - if modulename: - break - - extra_objects, sources = filter_files('','[.](o|a|so)',sources) - include_dirs, sources = filter_files('-I','',sources,remove_prefix=1) - library_dirs, sources = filter_files('-L','',sources,remove_prefix=1) - libraries, sources = filter_files('-l','',sources,remove_prefix=1) - undef_macros, sources = filter_files('-U','',sources,remove_prefix=1) - define_macros, sources = filter_files('-D','',sources,remove_prefix=1) - using_numarray = 0 - using_numeric = 0 - for i in range(len(define_macros)): - name_value = define_macros[i].split('=',1) - if len(name_value)==1: - name_value.append(None) - if len(name_value)==2: - define_macros[i] = tuple(name_value) - else: - print 'Invalid use of -D:',name_value - - from numpy.distutils.system_info import get_info - - num_include_dir = None - num_info = {} - #import numpy - #n = 'numpy' - #p = get_prefix(numpy) - #from numpy.distutils.misc_util import get_numpy_include_dirs - #num_info = {'include_dirs': get_numpy_include_dirs()} - - if num_info: - include_dirs.extend(num_info.get('include_dirs',[])) - - from numpy.distutils.core import setup,Extension - ext_args = {'name':modulename,'sources':sources, - 'include_dirs': include_dirs, - 'library_dirs': library_dirs, - 'libraries': libraries, - 'define_macros': define_macros, - 'undef_macros': undef_macros, - 'extra_objects': extra_objects, - 'f2py_options': f2py_flags, - } - - if sysinfo_flags: - from numpy.distutils.misc_util import dict_append - for n in sysinfo_flags: - i = get_info(n) - if not i: - outmess('No %s resources found in system'\ - ' (try `f2py --help-link`)\n' % (`n`)) - dict_append(ext_args,**i) - - ext = Extension(**ext_args) - sys.argv = [sys.argv[0]] + setup_flags - sys.argv.extend(['build', - '--build-temp',build_dir, - '--build-base',build_dir, - '--build-platlib','.']) - if fc_flags: - sys.argv.extend(['config_fc']+fc_flags) - if flib_flags: - sys.argv.extend(['build_ext']+flib_flags) - - setup(ext_modules = [ext]) - - if remove_build_dir and os.path.exists(build_dir): - outmess('Removing build directory %s\n'%(build_dir)) - shutil.rmtree(build_dir) - -def main(): - if '--help-link' in sys.argv[1:]: - sys.argv.remove('--help-link') - from numpy.distutils.system_info import show_all - show_all() - return - if '-c' in sys.argv[1:]: - run_compile() - else: - run_main(sys.argv[1:]) - -#if __name__ == "__main__": -# main() - - -# EOF diff --git a/numpy/f2py/f2py_testing.py b/numpy/f2py/f2py_testing.py deleted file mode 100644 index aabf0c569..000000000 --- a/numpy/f2py/f2py_testing.py +++ /dev/null @@ -1,43 +0,0 @@ -import os,sys,re,time - -from numpy.testing.utils import jiffies, memusage - -def cmdline(): - m=re.compile(r'\A\d+\Z') - args = [] - repeat = 1 - for a in sys.argv[1:]: - if m.match(a): - repeat = eval(a) - else: - args.append(a) - f2py_opts = ' '.join(args) - return repeat,f2py_opts - -def run(runtest,test_functions,repeat=1): - l = [(t,repr(t.__doc__.split('\n')[1].strip())) for t in test_functions] - #l = [(t,'') for t in test_functions] - start_memusage = memusage() - diff_memusage = None - start_jiffies = jiffies() - i = 0 - while i -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/02/03 19:30:23 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.27 $"[10:-1] - -f2py_version='See `f2py -v`' - -import pprint -import sys -import time -import types -import copy -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from auxfuncs import * -import numpy as N -import capi_maps -import cfuncs -import rules -import func2subr -from crackfortran import undo_rmbadname, undo_rmbadname1 - -options={} - -def findf90modules(m): - if ismodule(m): return [m] - if not hasbody(m): return [] - ret = [] - for b in m['body']: - if ismodule(b): ret.append(b) - else: ret=ret+findf90modules(b) - return ret - -fgetdims1 = """\ - external f2pysetdata - logical ns - integer r,i,j - integer(%d) s(*) - ns = .FALSE. - if (allocated(d)) then - do i=1,r - if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then - ns = .TRUE. - end if - end do - if (ns) then - deallocate(d) - end if - end if - if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % N.intp().itemsize - -fgetdims2="""\ - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,i) - end do - end if - flag = 1 - call f2pysetdata(d,allocated(d))""" - -fgetdims2_sa="""\ - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,i) - end do - !s(r) must be equal to len(d(1)) - end if - flag = 2 - call f2pysetdata(d,allocated(d))""" - - -def buildhooks(pymod): - global fgetdims1,fgetdims2 - ret = {'f90modhooks':[],'initf90modhooks':[],'body':[], - 'need':['F_FUNC','arrayobject.h'], - 'separatorsfor':{'includes0':'\n','includes':'\n'}, - 'docs':['"Fortran 90/95 modules:\\n"'], - 'latexdoc':[]} - fhooks=[''] - def fadd(line,s=fhooks): s[0] = '%s\n %s'%(s[0],line) - doc = [''] - def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0],line) - for m in findf90modules(pymod): - sargs,fargs,efargs,modobjs,notvars,onlyvars=[],[],[],[],[m['name']],[] - sargsp = [] - ifargs = [] - mfargs = [] - if hasbody(m): - for b in m['body']: notvars.append(b['name']) - for n in m['vars'].keys(): - var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide,isprivate)(var)): - onlyvars.append(n) - mfargs.append(n) - outmess('\t\tConstructing F90 module support for "%s"...\n'%(m['name'])) - if onlyvars: - outmess('\t\t Variables: %s\n'%(' '.join(onlyvars))) - chooks=[''] - def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0],line) - ihooks=[''] - def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line) - - vrd=capi_maps.modsign2map(m) - cadd('static FortranDataDef f2py_%s_def[] = {'%(m['name'])) - dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n'%(m['name'])) - if hasnote(m): - note = m['note'] - if type(note) is type([]): note='\n'.join(note) - dadd(note) - if onlyvars: - dadd('\\begin{description}') - for n in onlyvars: - var = m['vars'][n] - modobjs.append(n) - ct = capi_maps.getctype(var) - at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n,var) - dms = dm['dims'].replace('*','-1').strip() - dms = dms.replace(':','-1').strip() - if not dms: dms='-1' - use_fgetdims2 = fgetdims2 - if isstringarray(var): - if 'charselector' in var and 'len' in var['charselector']: - cadd('\t{"%s",%s,{{%s,%s}},%s},'\ - %(undo_rmbadname1(n),dm['rank'],dms,var['charselector']['len'],at)) - use_fgetdims2 = fgetdims2_sa - else: - cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n),dm['rank'],dms,at)) - else: - cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n),dm['rank'],dms,at)) - dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,var))) - if hasnote(var): - note = var['note'] - if type(note) is type([]): note='\n'.join(note) - dadd('--- %s'%(note)) - if isallocatable(var): - fargs.append('f2py_%s_getdims_%s'%(m['name'],n)) - efargs.append(fargs[-1]) - sargs.append('void (*%s)(int*,int*,void(*)(char*,int*),int*)'%(n)) - sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)') - iadd('\tf2py_%s_def[i_f2py++].func = %s;'%(m['name'],n)) - fadd('subroutine %s(r,s,f2pysetdata,flag)'%(fargs[-1])) - fadd('use %s, only: d => %s\n'%(m['name'],undo_rmbadname1(n))) - fadd('integer flag\n') - fhooks[0]=fhooks[0]+fgetdims1 - dms = eval('range(1,%s+1)'%(dm['rank'])) - fadd(' allocate(d(%s))\n'%(','.join(map(lambda i:'s(%s)'%i,dms)))) - fhooks[0]=fhooks[0]+use_fgetdims2 - fadd('end subroutine %s'%(fargs[-1])) - else: - fargs.append(n) - sargs.append('char *%s'%(n)) - sargsp.append('char*') - iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'],n)) - if onlyvars: - dadd('\\end{description}') - if hasbody(m): - for b in m['body']: - if not isroutine(b): - print 'Skipping',b['block'],b['name'] - continue - modobjs.append('%s()'%(b['name'])) - b['modulename'] = m['name'] - api,wrap=rules.buildapi(b) - if isfunction(b): - fhooks[0]=fhooks[0]+wrap - fargs.append('f2pywrap_%s_%s'%(m['name'],b['name'])) - #efargs.append(fargs[-1]) - ifargs.append(func2subr.createfuncwrapper(b,signature=1)) - else: - fargs.append(b['name']) - mfargs.append(fargs[-1]) - #if '--external-modroutines' in options and options['--external-modroutines']: - # outmess('\t\t\tapplying --external-modroutines for %s\n'%(b['name'])) - # efargs.append(fargs[-1]) - api['externroutines']=[] - ar=applyrules(api,vrd) - ar['docs']=[] - ar['docshort']=[] - ret=dictappend(ret,ar) - cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},'%(b['name'],m['name'],b['name'],m['name'],b['name'])) - sargs.append('char *%s'%(b['name'])) - sargsp.append('char *') - iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'],b['name'])) - cadd('\t{NULL}\n};\n') - iadd('}') - ihooks[0]='static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s'%(m['name'],','.join(sargs),ihooks[0]) - if '_' in m['name']: - F_FUNC='F_FUNC_US' - else: - F_FUNC='F_FUNC' - iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));'\ - %(F_FUNC,m['name'],m['name'].upper(),','.join(sargsp))) - iadd('static void f2py_init_%s(void) {'%(m['name'])) - iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ - %(F_FUNC,m['name'],m['name'].upper(),m['name'])) - iadd('}\n') - ret['f90modhooks']=ret['f90modhooks']+chooks+ihooks - ret['initf90modhooks']=['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(m['name'],m['name'],m['name'])]+ret['initf90modhooks'] - fadd('') - fadd('subroutine f2pyinit%s(f2pysetupfunc)'%(m['name'])) - #fadd('use %s'%(m['name'])) - if mfargs: - for a in undo_rmbadname(mfargs): - fadd('use %s, only : %s'%(m['name'],a)) - if ifargs: - fadd(' '.join(['interface']+ifargs)) - fadd('end interface') - fadd('external f2pysetupfunc') - if efargs: - for a in undo_rmbadname(efargs): - fadd('external %s'%(a)) - fadd('call f2pysetupfunc(%s)'%(','.join(undo_rmbadname(fargs)))) - fadd('end subroutine f2pyinit%s\n'%(m['name'])) - - dadd('\n'.join(ret['latexdoc']).replace(r'\subsection{',r'\subsubsection{')) - - ret['latexdoc']=[] - ret['docs'].append('"\t%s --- %s"'%(m['name'], - ','.join(undo_rmbadname(modobjs)))) - - ret['routine_defs']='' - ret['doc']=[] - ret['docshort']=[] - ret['latexdoc']=doc[0] - if len(ret['docs'])<=1: ret['docs']='' - return ret,fhooks[0] diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py deleted file mode 100644 index b0421bb55..000000000 --- a/numpy/f2py/func2subr.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python -""" - -Rules for building C/API module with f2py2e. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2004/11/26 11:13:06 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.16 $"[10:-1] - -f2py_version='See `f2py -v`' - -import pprint -import copy -import sys -import time -import types -import copy -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from auxfuncs import * -def var2fixfortran(vars,a,fa=None,f90mode=None): - if fa is None: - fa = a - if a not in vars: - show(vars) - outmess('var2fixfortran: No definition for argument "%s".\n'%a) - return '' - if 'typespec' not in vars[a]: - show(vars[a]) - outmess('var2fixfortran: No typespec for argument "%s".\n'%a) - return '' - vardef=vars[a]['typespec'] - if vardef=='type' and 'typename' in vars[a]: - vardef='%s(%s)'%(vardef,vars[a]['typename']) - selector={} - lk = '' - if 'kindselector' in vars[a]: - selector=vars[a]['kindselector'] - lk = 'kind' - elif 'charselector' in vars[a]: - selector=vars[a]['charselector'] - lk = 'len' - if '*' in selector: - if f90mode: - if selector['*'] in ['*',':','(*)']: - vardef='%s(len=*)'%(vardef) - else: - vardef='%s(%s=%s)'%(vardef,lk,selector['*']) - else: - if selector['*'] in ['*',':']: - vardef='%s*(%s)'%(vardef,selector['*']) - else: - vardef='%s*%s'%(vardef,selector['*']) - else: - if 'len' in selector: - vardef='%s(len=%s'%(vardef,selector['len']) - if 'kind' in selector: - vardef='%s,kind=%s)'%(vardef,selector['kind']) - else: - vardef='%s)'%(vardef) - elif 'kind' in selector: - vardef='%s(kind=%s)'%(vardef,selector['kind']) - - vardef='%s %s'%(vardef,fa) - if 'dimension' in vars[a]: - vardef='%s(%s)'%(vardef,','.join(vars[a]['dimension'])) - return vardef - -def createfuncwrapper(rout,signature=0): - assert isfunction(rout) - ret = [''] - def add(line,ret=ret): - ret[0] = '%s\n %s'%(ret[0],line) - name = rout['name'] - fortranname = getfortranname(rout) - f90mode = ismoduleroutine(rout) - newname = '%sf2pywrap'%(name) - vars = rout['vars'] - if newname not in vars: - vars[newname] = vars[name] - args = [newname]+rout['args'][1:] - else: - args = [newname]+rout['args'] - - l = var2fixfortran(vars,name,newname,f90mode) - return_char_star = 0 - if l[:13]=='character*(*)': - return_char_star = 1 - if f90mode: l = 'character(len=10)'+l[13:] - else: l = 'character*10'+l[13:] - charselect = vars[name]['charselector'] - if charselect.get('*','')=='(*)': - charselect['*'] = '10' - if f90mode: - sargs = ', '.join(args) - add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'],name,sargs)) - if not signature: - add('use %s, only : %s'%(rout['modulename'],fortranname)) - else: - add('subroutine f2pywrap%s (%s)'%(name,', '.join(args))) - add('external %s'%(fortranname)) - #if not return_char_star: - l = l + ', '+fortranname - args = args[1:] - dumped_args = [] - for a in args: - if isexternal(vars[a]): - add('external %s'%(a)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - if isscalar(vars[a]): - add(var2fixfortran(vars,a,f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - add(var2fixfortran(vars,a,f90mode=f90mode)) - - add(l) - - if not signature: - if islogicalfunction(rout): - add('%s = .not.(.not.%s(%s))'%(newname,fortranname,', '.join(args))) - else: - add('%s = %s(%s)'%(newname,fortranname,', '.join(args))) - if f90mode: - add('end subroutine f2pywrap_%s_%s'%(rout['modulename'],name)) - else: - add('end') - #print '**'*10 - #print ret[0] - #print '**'*10 - return ret[0] - -def assubr(rout): - if not isfunction_wrap(rout): return rout,'' - fortranname = getfortranname(rout) - name = rout['name'] - outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n'%(name,fortranname)) - rout = copy.copy(rout) - fname = name - rname = fname - if 'result' in rout: - rname = rout['result'] - rout['vars'][fname]=rout['vars'][rname] - fvar = rout['vars'][fname] - if not isintent_out(fvar): - if 'intent' not in fvar: - fvar['intent']=[] - fvar['intent'].append('out') - flag=1 - for i in fvar['intent']: - if i.startswith('out='): - flag = 0 - break - if flag: - fvar['intent'].append('out=%s' % (rname)) - - rout['args'] = [fname] + rout['args'] - return rout,createfuncwrapper(rout) diff --git a/numpy/f2py/info.py b/numpy/f2py/info.py deleted file mode 100644 index 8beaba228..000000000 --- a/numpy/f2py/info.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Fortran to Python Interface Generator. - -""" - -postpone_import = True diff --git a/numpy/f2py/lib/__init__.py b/numpy/f2py/lib/__init__.py deleted file mode 100644 index c3b40cb76..000000000 --- a/numpy/f2py/lib/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -F2PY G3 --- The third generation of Fortran to Python Interface Generator. - -Use api module for importing public symbols. - ------ -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. See http://scipy.org. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Author: Pearu Peterson -Created: Oct 2006 ------ -""" diff --git a/numpy/f2py/lib/api.py b/numpy/f2py/lib/api.py deleted file mode 100644 index 0d21da28c..000000000 --- a/numpy/f2py/lib/api.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Public API for F2PY G3. - ------ -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. See http://scipy.org. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Author: Pearu Peterson -Created: Oct 2006 ------ -""" - -from main import main diff --git a/numpy/f2py/lib/doc.txt b/numpy/f2py/lib/doc.txt deleted file mode 100644 index 79ccb0768..000000000 --- a/numpy/f2py/lib/doc.txt +++ /dev/null @@ -1,239 +0,0 @@ -.. -*- rest -*- - -======================= -G3 F2PY library package -======================= - -:Author: - Pearu Peterson -:Created: July 2007 - -.. contents:: Table of Contents - -Overview -======== - -The G3 F2PY library package contains tools to parse Fortran codes and -construct Python C/API extension modules for wrapping Fortran -programs. These tools are also suitable for implementing Fortran -program translators or wrappers to any other programming language. In -fact, wrapping Fortran programs to Python would be just one example of -using these tools. - -Wrapping Fortran with Python - UI -================================= - -There are two user interfaces to wrap Fortran programs with Python: - - - the command line program `f2py` that scans Fortran files - given as command line arguments and builds extension modules - that can be used to call Fortran programs from Python. - The `f2py` program has four different ways of building - extension modules as specified with the following command - line flags: - - - `--g3-numpy` --- create extension modules with NumPy array - support using the new tools from `the 3rd generation of F2PY`__. - This is a work-in-progress feature. - - - `--2d-numpy` --- create extension modules with NumPy array - support using `the stable version of F2PY`__. This is default. - - - `--2d-numeric` --- create extension modules with Numeric - array support using the old version of f2py2e. The f2py2e - package must be downloaded and installed separately from - the `f2py2e homepage`__. - - - `--2d-numarray` --- create extension modules with Numarray - array support using the old version of f2py2e. - - Example:: - - $ cat hello.f90 - subroutine hello - print*, "Hello!" - end subroutine hello - $ f2py --g3-numpy -m foo -c hello.f90 --fcompiler=gnu95 - $ python - >>> import foo - >>> foo.hello() - Hello! - >>> - - See the output of `f2py` for more information. - -__ http://projects.scipy.org/scipy/numpy/wiki/G3F2PY/ -__ http://www.scipy.org/F2py/ -__ http://cens.ioc.ee/projects/f2py2e/ - - - the function `compile()` that scans its string input containing - Fortran code and returns a list of imported extension modules - that can be used to call Fortran programs from Python. - - Example:: - - $ python - >>> from numpy.f2py.lib.api import compile - >>> code = 'subroutine hello\n print*, "Hello!"\nend' - >>> print code - subroutine hello - print*, "Hello!" - end - >>> foo, = compile(code, extra_args = ['--fcompiler=gnu95']) - >>> foo.hello() - Hello! - >>> - -Wrapping Fortran with Python - how it works? -============================================ - -The two users interfaces discussed above are implemented by functions -`main(sys_argv=None)` and `compile(source, jobname='untitled', -extra_args=[], source_ext=None, modulenames=None)` in file -`numpy/f2py/lib/main.py`. Both these functions call -`build_extension(sys_argv, sources_only=False)` function that reads -`sys_argv` list for files and options, constructs wrapper functions, -creates a `numpy.distutils` style `setup.py` file, and finally runs -it. - -`build_extension` options -------------------------- - -The following options are defined that can be used as command line -arguments to `f2py` or in `extra_args` list argument of the `compiler` -function: - - - `-m []` --- specify the name of a wrapper extension - module. Default is `untitled` or `unspecified` (if `` part - is not specified). - - - `--target-dir=` --- specify the directory path where - extension modules are saved. Default is the current working - directory. - - - `--build-dir=` --- specify the directory path where - temporary files are created during the build process. Default is - `_g2f2py` or temporary directory (when `` part is not - specified)). - - - `-I` --- specifies include directory path that is used for - finding Fortran 90 modules and when compiling sources. - - - `-D[=]`, `-U` --- defines or undefines CPP - macro used in compiling sources. See below for F2PY specific - macros. - - - `-L` --- specifies library directory path that is used in - linking extension modules. - - - `-l` --- specifies library that is used in linking extension - modules. - - - `.(o|a|so|dll|dylib|sl)` --- specifies extra object - files used in linking extension modules. - - - ``.pyf --- specifies signature file used for scanning - Fortran program signatures. - - - `.(f|f90|F90|F)` --- specifies Fortran source files used - for scanning Fortran program signatures (in case no signature - files are specified). These sources will be compiled and linked - with extension modules. - - - `.(c|cpp|C|CPP|c++)` --- specifies C/C++ source files - that will be compiled and linked with extension modules. Note that - if no signature file is specified then also C/C++ files are - scanned for Fortran program signatures. The Fortran program - signatures are assumed to be inside the C comment `/* f2py ... */` - block. - - - `--fcompiler=` --- specifies Fortran compiler to be - used in compiling/linking the sources. See also `--help-fcompiler` - option. - - - `--help-fcompiler` --- list Fortran compilers that are found in - the system, that are available or unavailable for the current - system. Then return without processing any other options. - - - `--compiler=` --- specifies C/C++ compiler to be - used in compiling/linking the sources. See also `--help-compiler` - option. - - - `--help-compiler` --- list C/C++ compilers that are available - for the current system. Then return without processing any other - options. - -Additional `f2py` command line options --------------------------------------- - -Command line tool `f2py` uses the following additional command line -options: - - - `-c` --- specifies that F2PY should build extension - modules. Without this option F2PY just scans source files for - signatures and constructs extension module sources --- useful when - one needs to build extension modules manually (in Makefile, for - instance). See also `-h` option below. - - - `-h ` --- specifies the signature file name - where the results of scanning sources will be saved. With this - option F2PY just scans the sources but does not construct extension - modules sources --- useful when one needs to edit the signatures - of Fortran programs. If `` is `stdout` or `stderr` is - specified then the scanned signatures will be dumped to `stdout` - or `stderr` streams, respectively. - - - `--help-link [ ]...` --- list system resources as - defined in `numpy/distutils/system_info.py` and return. - - - `--parse` --- specifies that F2PY should just parce the - source files and dump the results to `stdout` stream. - Useful for debugging F2PY parser. - - -F2PY specific CPP macros ------------------------- - -F2PY may use the following CPP macros: - - - `-DF2PY_DEBUG_PYOBJ_TOFROM` --- show debug messages from - `pyobj_(to|from)_` functions. - - - `-DPREPEND_FORTRAN`, `-DNO_APPEND_FORTRAN`, `-DUPPERCASE_FORTRAN`, - `-DUNDERSCORE_G77` --- specify how Fortran compiler mangles Fortran - symbol names that need to be accessed from C extension modules. - Usually one never needs to specify these macros as supported - Fortran compilers should always mangle the names to be lower-cased - and with exactly one underscore after the name. - -Options for `compile` ---------------------- - -The `compile` function has the following options: - - - `source` --- a string containing Fortran code. To specify the - Fortran format of the `source` use the following header lines - in the `source` string: - - - `C -*- f77 -*-` --- the string contains Fortran 77 code - - `! -*- f90 -*-` --- the string contains Fortran 90 code in - free format. - - `! -*- fix -*-` --- the string contains Fortran 90 code in - fixed format. - - `! -*- pyf -*-` --- the string contains Fortran signatures. - - - `jobname='untitled'` --- a string specifing the name of an - extension module - - - `extra_args=[]` --- a list of `build_extension(..)` arguments, - see above. - - - `source_ext=None` --- a string specifying the extension (`.f` or - `.f90`) of the file where `source` will be saved for further - processing. Default extension will be determined from the `source` - string. - - - `modulenames=None` --- a list of module names that the build - process should create. `compile` function will try to import - these modules and return the corresponding module objects - as a list. diff --git a/numpy/f2py/lib/extgen/__init__.py b/numpy/f2py/lib/extgen/__init__.py deleted file mode 100644 index 58c965745..000000000 --- a/numpy/f2py/lib/extgen/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -""" -Python Extensions Generator -""" - -__all__ = ['Component'] - -from base import Component - -for _m in ['utils', 'c_support', 'py_support', 'setup_py']: - exec 'from %s import *' % (_m) - exec 'import %s as _m' % (_m) - __all__.extend(_m.__all__) - -#from pyc_function import PyCFunction -#from pyc_argument import PyCArgument -#from c_code import CCode - -#import c_type -#from c_type import * -#__all__ += c_type.__all__ -#import c_struct -#from c_struct import * -#__all__ += c_struct.__all__# - -#import predefined_components -#import converters -#c_type.register() diff --git a/numpy/f2py/lib/extgen/base.py b/numpy/f2py/lib/extgen/base.py deleted file mode 100644 index d6f1ee7fd..000000000 --- a/numpy/f2py/lib/extgen/base.py +++ /dev/null @@ -1,543 +0,0 @@ -""" -ExtGen --- Python Extension module Generator. - -Defines Component and Container classes. -""" - -import os -import re -import sys -import time - -class ComponentMetaClass(type): - - classnamespace = {} - - def __new__(mcls, *args, **kws): - cls = type.__new__(mcls, *args, **kws) - n = cls.__name__ - c = ComponentMetaClass.classnamespace.get(n) - if c is None: - ComponentMetaClass.classnamespace[n] = cls - else: - if not c.__module__=='__main__': - sys.stderr.write('ComponentMetaClass: returning %s as %s\n'\ - % (cls, c)) - ComponentMetaClass.classnamespace[n] = c - cls = c - return cls - - def __getattr__(cls, name): - try: return ComponentMetaClass.classnamespace[name] - except KeyError: pass - raise AttributeError("'%s' object has no attribute '%s'"% - (cls.__name__, name)) - -class Component(object): - - __metaclass__ = ComponentMetaClass - - container_options = dict() - component_container_map = dict() - default_container_label = None - default_component_class_name = 'Code' - template = '' - - def __new__(cls, *args, **kws): - obj = object.__new__(cls) - obj._provides = kws.get('provides', None) - obj.parent = None - obj.containers = {} # holds containers for named string lists - obj._components = [] # holds pairs (, ) - obj._generate_components = {} # temporary copy of components used for finalize and generate methods. - obj = obj.initialize(*args, **kws) # initialize from constructor arguments - return obj - - def components(self): - if Component._running_generate: - try: - return self._generate_components[Component._running_generate_id] - except KeyError: - pass - while self._generate_components: # clean up old cache - self._generate_components.popitem() - self._generate_components[Component._running_generate_id] = l = list(self._components) - return l - return self._components - components = property(components) - - def initialize(self, *components, **options): - """ - Set additional attributes, add components to instance, etc. - """ - # self.myattr = .. - # map(self.add, components) - return self - - def finalize(self): - """ - Set components after all components are added. - """ - return - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ', '.join([repr(c) for (c,l) in self.components])) - - def provides(self): - """ - Return a code idiom name that the current class defines. - - Used in avoiding redefinitions of functions and variables. - """ - if self._provides is None: - return '%s_%s' % (self.__class__.__name__, id(self)) - return self._provides - provides = property(provides) - - def warning(message): - #raise RuntimeError('extgen:' + message) - print >> sys.stderr, 'extgen:',message - warning = staticmethod(warning) - - def info(message): - print >> sys.stderr, message - info = staticmethod(info) - - def __getattr__(self, attr): - if attr.startswith('container_'): # convenience feature - return self.get_container(attr[10:]) - if attr.startswith('component_'): # convenience feature - return self.get_component(attr[10:]) - raise AttributeError('%s instance has no attribute %r' % (self.__class__.__name__, attr)) - - def __add__(self, other): # convenience method - self.add(other) - return self - __iadd__ = __add__ - - def _get_class_names(cls): - if not issubclass(cls, Component): - return [cls] - r = [cls] - for b in cls.__bases__: - r += Component._get_class_names(b) - return r - _get_class_names = staticmethod(_get_class_names) - - def add(self, component, container_label=None): - """ - Append component and its target container label to components list. - """ - if isinstance(component, tuple) and len(component)==2 and isinstance(component[0], Component): - assert container_label is None, `container_label` - component, container_label = component - if not isinstance(component, Component) and self.default_component_class_name!=component.__class__.__name__: - clsname = self.default_component_class_name - if clsname is not None: - component = getattr(Component, clsname)(component) - else: - raise ValueError('%s.add requires Component instance but got %r' \ - % (self.__class__.__name__, component.__class__.__name__)) - if container_label is None: - container_label = self.default_container_label - for n in self._get_class_names(component.__class__): - try: - container_label = self.component_container_map[n.__name__] - break - except KeyError: - pass - if container_label is None: - container_label = component.__class__.__name__ - self.components.append((component, container_label)) - component.update_parent(self) - return - - def update_parent(self, parent): - pass - - def get_path(self, *paths): - if not hasattr(self, 'path'): - if paths: - return os.path.join(*paths) - return '' - if not self.parent: - return os.path.join(*((self.path,) + paths)) - return os.path.join(*((self.parent.get_path(), self.path)+paths)) - - def get_component(self, cls): - if isinstance(cls, str): - cls = getattr(Component, cls) - if isinstance(self, cls): - return self - if self.parent: - return self.parent.get_component(cls) - self.warning('could not find %r parent component %s, returning self'\ - % (self.__class__.__name__, cls.__name__)) - return self - - _running_generate = False - _running_generate_id = 0 - _generate_dry_run = True - - def generate(self, dry_run=True): - old_dry_run = Component._generate_dry_run - Component._generate_dry_run = dry_run - Component._running_generate_id += 1 - Component._running_generate = True - self._finalize() - result = self._generate() - Component._running_generate = False - Component._generate_dry_run = old_dry_run - return result - - def _finalize(self): - # recursively finalize all components. - for component, container_key in self.components: - old_parent = component.parent - component.parent = self - component._finalize() - component.parent = old_parent - self.finalize() - - def _generate(self): - """ - Generate code idioms (saved in containers) and - return evaluated template strings. - """ - #self.finalize() - - # clean up containers - self.containers = {} - for n in dir(self): - if n.startswith('container_') and isinstance(getattr(self, n), Container): - delattr(self, n) - - # create containers - for k,kwargs in self.container_options.items(): - self.containers[k] = Container(**kwargs) - - # initialize code idioms - self.init_containers() - - # generate component code idioms - for component, container_key in self.components: - if not isinstance(component, Component): - result = str(component) - if container_key == '': - pass - elif container_key is not None: - self.get_container(container_key).add(result) - else: - self.warning('%s: no container label specified for component %r'\ - % (self.__class__.__name__,component)) - continue - old_parent = component.parent - component.parent = self - result = component._generate() - if container_key == '': - pass - elif container_key is not None: - if isinstance(container_key, tuple): - assert len(result)==len(container_key),`len(result),container_key` - results = result - keys = container_key - else: - assert isinstance(result, str) and isinstance(container_key, str), `result, container_key` - results = result, - keys = container_key, - for r,k in zip(results, keys): - container = component.get_container(k) - container.add(r, component.provides) - else: - - self.warning('%s: no container label specified for component providing %r'\ - % (self.__class__.__name__,component.provides)) - component.parent = old_parent - - # update code idioms - self.update_containers() - - # fill templates with code idioms - templates = self.get_templates() - if isinstance(templates, str): - result = self.evaluate(templates) - else: - assert isinstance(templates, (tuple, list)),`type(templates)` - result = tuple(map(self.evaluate, templates)) - return result - - def init_containers(self): - """ - Update containers before processing components. - """ - # container = self.get_container() - # container.add(, label=None) - return - - def update_containers(self): - """ - Update containers after processing components. - """ - # container = self.get_container() - # container.add(, label=None) - return - - def get_container(self, name): - """ Return named container. - - Rules for returning containers: - (1) return local container if exists - (2) return parent container if exists - (3) create local container and return it with warning - """ - # local container - try: - return self.containers[name] - except KeyError: - pass - - # parent container - parent = self.parent - while parent is not None: - try: - return parent.containers[name] - except KeyError: - parent = parent.parent - continue - - # create local container - self.warning('Created container for %r with name %r, define it in'\ - ' parent .container_options mapping to get rid of this warning' \ - % (self.__class__.__name__, name)) - c = self.containers[name] = Container() - return c - - def get_templates(self): - """ - Return instance templates. - """ - return self.template - - def evaluate(self, template, **attrs): - """ - Evaluate template using instance attributes and code - idioms from containers. - """ - d = self.containers.copy() - for n in dir(self): - if n in ['show', 'build'] or n.startswith('_'): - continue - v = getattr(self, n) - if isinstance(v, str): - d[n] = v - d.update(attrs) - for label, container in self.containers.items(): - if not container.use_indent: - continue - replace_list = set(re.findall(r'[ ]*%\('+label+r'\)s', template)) - for s in replace_list: - old_indent = container.indent_offset - container.indent_offset = old_indent + len(s) - len(s.lstrip()) - i = template.index(s) - template = template[:i] + str(container) + template[i+len(s):] - container.indent_offset = old_indent - try: - template = template % d - except KeyError, msg: - raise KeyError('%s.container_options needs %s item' % (self.__class__.__name__, msg)) - return re.sub(r'.*[<]KILLLINE[>].*(\n|$)','', template) - - - _registered_components_map = {} - - def register(*components): - """ - Register components so that component classes can use - predefined components via `.get()` method. - """ - d = Component._registered_components_map - for component in components: - provides = component.provides - if provides in d: - Component.warning('component that provides %r is already registered, ignoring.' % (provides)) - else: - d[provides] = component - return - register = staticmethod(register) - - def get(provides): - """ - Return predefined component with given provides property.. - """ - try: - return Component._registered_components_map[provides] - except KeyError: - pass - raise KeyError('no registered component provides %r' % (provides)) - get = staticmethod(get) - - def numpy_version(self): - import numpy - return numpy.__version__ - numpy_version = property(numpy_version) - -class Container(object): - """ - Container of a list of named strings. - - >>> c = Container(separator=', ', prefix='"', suffix='"') - >>> c.add('hey',1) - >>> c.add('hoo',2) - >>> print c - "hey, hoo" - >>> c.add('hey',1) - >>> c.add('hey2',1) - Traceback (most recent call last): - ... - ValueError: Container item 1 exists with different value - - >>> c2 = Container() - >>> c2.add('bar') - >>> c += c2 - >>> print c - "hey, hoo, bar" - - """ - __metaclass__ = ComponentMetaClass - - def __init__(self, - separator='\n', prefix='', suffix='', - skip_prefix_when_empty=False, - skip_suffix_when_empty=False, - default = '', reverse=False, - user_defined_str = None, - use_indent = False, - indent_offset = 0, - use_firstline_indent = False, # implies use_indent - replace_map = {}, - ignore_empty_content = False, - skip_prefix_suffix_when_single = False - ): - self.list = [] - self.label_map = {} - - self.separator = separator - self.prefix = prefix - self.suffix = suffix - self.skip_prefix = skip_prefix_when_empty - self.skip_suffix = skip_suffix_when_empty - self.default = default - self.reverse = reverse - self.user_str = user_defined_str - self.use_indent = use_indent or use_firstline_indent - self.indent_offset = indent_offset - self.use_firstline_indent = use_firstline_indent - self.replace_map = replace_map - self.ignore_empty_content = ignore_empty_content - self.skip_prefix_suffix_when_single = skip_prefix_suffix_when_single - - def __nonzero__(self): - return bool(self.list) - - def has(self, label): - return label in self.label_map - - def get(self, label): - return self.list[self.label_map[label]] - - def __add__(self, other): - if isinstance(other, Container): - lst = [(i,l) for (l,i) in other.label_map.items()] - lst.sort() - for i,l in lst: - self.add(other.list[i], l) - else: - self.add(other) - return self - __iadd__ = __add__ - - def add(self, content, label=None): - """ Add content to container using label. - If label is None, an unique label will be generated using time.time(). - """ - if content is None: - return - if content=='' and self.ignore_empty_content: - return - assert isinstance(content, str),`type(content)` - if label is None: - label = time.time() - if self.has(label): - d = self.get(label) - if d!=content: - raise ValueError("Container item %r exists with different value" % (label)) - return - for old, new in self.replace_map.items(): - content = content.replace(old, new) - self.list.append(content) - self.label_map[label] = len(self.list)-1 - return - - def __str__(self): - if self.user_str is not None: - return self.user_str(self) - if self.list: - l = self.list - if self.reverse: - l = l[:] - l.reverse() - if self.use_firstline_indent: - new_l = [] - for l1 in l: - lines = l1.split('\\n') - i = len(lines[0]) - len(lines[0].lstrip()) - indent = i * ' ' - new_l.append(lines[0]) - new_l.extend([indent + l2 for l2 in lines[1:]]) - l = new_l - r = self.separator.join(l) - if not (len(self.list)==1 and self.skip_prefix_suffix_when_single): - r = self.prefix + r - r = r + self.suffix - else: - r = self.default - if not self.skip_prefix: - r = self.prefix + r - if not self.skip_suffix: - r = r + self.suffix - if r and self.use_indent: - lines = r.splitlines(True) - indent = self.indent_offset * ' ' - r = ''.join([indent + line for line in lines]) - return r - - def copy(self, mapping=None, **extra_options): - options = dict(separator=self.separator, prefix=self.prefix, suffix=self.suffix, - skip_prefix_when_empty=self.skip_prefix, - skip_suffix_when_empty=self.skip_suffix, - default = self.default, reverse=self.reverse, - user_defined_str = self.user_str, - use_indent = self.use_indent, - indent_offset = self.indent_offset, - use_firstline_indent = self.use_firstline_indent, - replace_map = self.replace_map, - ignore_empty_content = self.ignore_empty_content, - skip_prefix_suffix_when_single = self.skip_prefix_suffix_when_single - ) - options.update(extra_options) - cpy = Container(**options) - if mapping is None: - cpy += self - else: - lst = [(i,l) for (l,i) in self.label_map.items()] - lst.sort() - for i,l in lst: - cpy.add(mapping(other.list[i]), l) - return cpy - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() diff --git a/numpy/f2py/lib/extgen/c_support.py b/numpy/f2py/lib/extgen/c_support.py deleted file mode 100644 index 95ef1ac55..000000000 --- a/numpy/f2py/lib/extgen/c_support.py +++ /dev/null @@ -1,293 +0,0 @@ - -__all__ = ['CLine', 'Keyword', 'CTypeSpec', 'CDeclarator', 'CDeclaration', - 'CArgument', 'CCode', 'CFunction', 'CSource', 'CHeader', 'CStdHeader'] - -from base import Component -from utils import Line, Code, FileSource - -class CLine(Line): - pass - -class Keyword(CLine): - pass - -class CInitExpr(CLine): - pass - -class CTypeSpec(CLine): - - """ - >>> i = CTypeSpec('int') - >>> print i.generate() - int - >>> print i.as_ptr().generate() - int* - """ - def as_ptr(self): return self.__class__(self.generate()+'*') - - -class CDeclarator(Component): - - """ - - >>> CDeclarator('name').generate() - 'name' - >>> CDeclarator('name','0').generate() - 'name = 0' - """ - container_options = dict( - Initializer = dict(default='',prefix=' = ', skip_prefix_when_empty=True, - ignore_empty_content = True - ), - ScalarInitializer = dict(default='',prefix=' = ', skip_prefix_when_empty=True, - ignore_empty_content = True - ), - SequenceInitializer = dict(default='',prefix=' = {\n', skip_prefix_when_empty=True, - suffix='}', skip_suffix_when_empty=True, - ignore_empty_content = True, - separator = ',\n', use_indent=True, - ), - StringInitializer = dict(default='',prefix=' = "', skip_prefix_when_empty=True, - suffix='"', skip_suffix_when_empty=True, - ignore_empty_content = True, - separator='\\n"\n"', replace_map = {'\n':'\\n'}, - use_firstline_indent = True, - ), - ) - - default_component_class_name = 'CInitExpr' - - component_container_map = dict( - CInitExpr = 'Initializer' - ) - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr,[self.name]+[c for (c,l) in self.components]))) - - def initialize(self, name, *initvalues, **options): - self.name = name - self.is_string = options.get('is_string', None) - if self.is_string: - assert not options.get('is_scalar', None) - self.is_scalar = False - else: - if name.endswith(']'): - self.is_scalar = False - else: - self.is_scalar = options.get('is_scalar', True) - - map(self.add, initvalues) - return self - - def update_containers(self): - if self.is_scalar: - self.container_ScalarInitializer += self.container_Initializer - self.template = '%(name)s%(ScalarInitializer)s' - elif self.is_string: - self.container_StringInitializer += self.container_Initializer - self.template = '%(name)s%(StringInitializer)s' - elif len(self.containers)>1 or not self.is_scalar: - self.container_SequenceInitializer += self.container_Initializer - self.template = '%(name)s%(SequenceInitializer)s' - else: - self.container_ScalarInitializer += self.container_Initializer - self.template = '%(name)s%(ScalarInitializer)s' - -class CDeclaration(Component): - - """ - >>> d = CDeclaration('int', 'a') - >>> print d.generate() - int a - >>> d += 'b' - >>> print d.generate() - int a, b - >>> d += CDeclarator('c',1) - >>> print d.generate() - int a, b, c = 1 - """ - - template = '%(CTypeSpec)s %(CDeclarator)s' - - container_options = dict( - CTypeSpec = dict(default='int', separator=' '), - CDeclarator = dict(default='', separator=', '), - ) - - component_container_map = dict( - CTypeSpec = 'CTypeSpec', - CDeclarator = 'CDeclarator', - ) - - default_component_class_name = 'CDeclarator' - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr,[c for (c,l) in self.components]))) - - def initialize(self, ctype, *declarators, **options): - ctype = CTypeSpec(ctype) - self.ctype = ctype - self.add(ctype) - map(self.add, declarators) - return self - -class CArgument(CDeclaration): - - def initialize(self, name, ctype, **options): - return CDeclaration.initialize(self, ctype, name, **options) - - -class CCode(Code): - parent_container_options = dict(default='', use_indent=True, ignore_empty_content=True) - -class CFunction(Component): - - """ - >>> f = CFunction('foo') - >>> print f.generate() - int - foo(void) { - } - >>> f += Keyword('static') - >>> f += CArgument('a', 'int') - >>> f += 'a = 2;' - >>> print f.generate() - static - int - foo(int a) { - a = 2; - } - >>> f += CArgument('b', 'float') - >>> f += CDeclaration('float', 'c') - >>> f += CDeclaration('float', CDeclarator('d','3.0')) - >>> print f.generate() - static - int - foo(int a, float b) { - float c; - float d = 3.0; - a = 2; - } - """ - - template = '''\ -%(CSpecifier)s -%(CTypeSpec)s -%(name)s(%(CArgument)s) { - %(CDeclaration)s - %(CBody)s -}''' - - container_options = dict( - CArgument = dict(separator=', ', default='void'), - CDeclaration = dict(default='', use_indent=True, ignore_empty_content=True, - separator = ';\n', suffix=';', skip_suffix_when_empty=True), - CBody = dict(default='', use_indent=True, ignore_empty_content=True), - CTypeSpec = dict(default='int', separator = ' ', ignore_empty_content=True), - CSpecifier = dict(default='', separator = ' ', ignore_empty_content = True) - ) - - component_container_map = dict( - CArgument = 'CArgument', - CDeclaration = 'CDeclaration', - CCode = 'CBody', - CTypeSpec = 'CTypeSpec', - Keyword = 'CSpecifier', - ) - - default_component_class_name = 'CCode' - - def initialize(self, name, rctype='int', *components, **options): - self.name = name - rctype = CTypeSpec(rctype) - self.rctype = rctype - self.add(rctype) - map(self.add, components) - if options: self.warning('%s unused options: %s\n' % (self.__class__.__name__, options)) - return self - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr,[self.name, self.rctype]+[c for (c,l) in self.components]))) - -class CHeader(CLine): - - """ - >>> h = CHeader('noddy.h') - >>> print h.generate() - #include "noddy.h" - - """ - template = '#include "%(line)s"' - -class CStdHeader(CHeader): - template = '#include <%(line)s>' - -class CSource(FileSource): - - """ - >>> s = CSource('foo.c') - >>> print s.generate() #doctest: +ELLIPSIS - /* -*- c -*- */ - /* This file 'foo.c' is generated using ExtGen tool - from NumPy version ... - ExtGen is developed by Pearu Peterson . - For more information see http://www.scipy.org/ExtGen/ . - */ - #ifdef __cplusplus - extern "C" { - #endif - #ifdef __cplusplus - } - #endif - - """ - - container_options = dict( - CHeader = dict(default='', prefix='\n/* CHeader */\n', skip_prefix_when_empty=True), - CTypeDef = dict(default='', prefix='\n/* CTypeDef */\n', skip_prefix_when_empty=True), - CProto = dict(default='', prefix='\n/* CProto */\n', skip_prefix_when_empty=True), - CDefinition = dict(default='', prefix='\n/* CDefinition */\n', skip_prefix_when_empty=True), - CDeclaration = dict(default='', separator=';\n', suffix=';', - prefix='\n/* CDeclaration */\n', skip_prefix_when_empty=True), - CMainProgram = dict(default='', prefix='\n/* CMainProgram */\n', skip_prefix_when_empty=True), - ) - - template_c_header = '''\ -/* -*- c -*- */ -/* This file %(path)r is generated using ExtGen tool - from NumPy version %(numpy_version)s. - ExtGen is developed by Pearu Peterson . - For more information see http://www.scipy.org/ExtGen/ . -*/''' - - - template = template_c_header + ''' -#ifdef __cplusplus -extern \"C\" { -#endif -%(CHeader)s -%(CTypeDef)s -%(CProto)s -%(CDefinition)s -%(CDeclaration)s -%(CMainProgram)s -#ifdef __cplusplus -} -#endif -''' - - component_container_map = dict( - CHeader = 'CHeader', - CFunction = 'CDefinition', - CDeclaration = 'CDeclaration', - ) - - - - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() diff --git a/numpy/f2py/lib/extgen/doc.txt b/numpy/f2py/lib/extgen/doc.txt deleted file mode 100644 index c86af8d96..000000000 --- a/numpy/f2py/lib/extgen/doc.txt +++ /dev/null @@ -1,449 +0,0 @@ -.. -*- rest -*- - -============================================ -ExtGen --- Python extension module generator -============================================ - -:Author: - Pearu Peterson -:Created: August 2007 - -.. contents:: Table of Contents - -Introduction -============ - -ExtGen is a pure Python package that provides a high-level -tool for constructing and building Python extension modules. -Even an inexperienced user with no background on writing extension -modules can build extension modules on fly when using ExtGen tool! - -Hello example follows - - >>> from numpy.f2py.lib.extgen import * - >>> m = PyCModule('foo') # define extension module component - >>> f = PyCFunction('hello') # define function component - >>> f += 'printf("Hello!\\n");' # put a C statement into function body - >>> m += f # add function to module - >>> print m.generate() # shows a string containing C source to extension module - # useful for debugging - >>> foo = m.build() # compile, build, and return extension module object - >>> foo.hello() # call function - Hello! - - -Users reference manual -====================== - -Writing a python extension module requires a knowledge of Pyhton C/API -details and may take lots of effort to get your first extension module -compile and working, even for a simple problem. See the `Simple Example`__ -in Python reference manual. ExtGen provides a high level tool for -constructing extension modules by automatically taking care of the -Pyhton C/API details while providing full control how an extension -module is created. - -__ http://docs.python.org/ext/ - -Getting started ---------------- - -Creating the `Simple Example`__ with the help of ExtGen tool is really simple - - >>> system = PyCFunction('system', - PyCArgument('command', 'c_const_char_ptr'), - PyCReturn('sts','c_int')) - >>> system += 'sts = system(command);' - >>> module = PyCModule('spam', system) - >>> spam = module.build() - >>> spam.system('pwd') - /home/pearu/svn/numpy/numpy/f2py/lib - 0 - -__ http://docs.python.org/ext/ - -ExtGen generated modules have automatically generated documentation -strings that accept also user input - - >>> a = PyCArgument('command', 'c_const_char_ptr', - input_description='a shell command string') - >>> r = PyCReturn('sts', 'c_int', - output_description='status value returned by shell command') - >>> system = PyCFunction('system', title='Execute a shell command.') - >>> system += a # add argument component to function - >>> system += r # add return value component to function - >>> system += 'sts = system(command);' # add C code to functon body - >>> module = PyCModule('spam', system) # create module instance with function component - >>> spam = module.build() - >>> print spam.__doc__ - This module 'spam' is generated with ExtGen from NumPy version 1.0.4.dev3744. - :Functions: - system(command) -> sts - >>> print spam.system.__doc__ - system(command) -> sts - Execute a shell command. - :Parameters: - command : a to C const char ptr convertable object - a shell command string - :Returns: - sts : a to C int convertable object - status value returned by shell command - >>> - -To see the source code that ExtGen generates, use `.generate()` method for any component instance - - >>> print system.generate() - static - char pyc_function_system_doc[] = - " system(command) -> sts" - "\n\nExecute a shell command." - "\n\n:Parameters:\n" - " command : a to C const char ptr convertable object\n" - " a shell command string" - "\n\n:Returns:\n" - " sts : a to C int convertable object\n" - " status value returned by shell command" - ; - static - PyObject* - pyc_function_system(PyObject *pyc_self, PyObject *pyc_args, PyObject *pyc_keywds) { - PyObject * volatile pyc_buildvalue = NULL; - volatile int capi_success = 1; - const char * command = NULL; - int sts = 0; - static char *capi_kwlist[] = {"command", NULL}; - if (PyArg_ParseTupleAndKeywords(pyc_args, pyc_keywds,"z", - capi_kwlist, &command)) { - sts = system(command); - capi_success = !PyErr_Occurred(); - if (capi_success) { - pyc_buildvalue = Py_BuildValue("i", sts); - } - } - return pyc_buildvalue; - } - >>> print module.generate() # prints full extension module source - ... - -Components ----------- - -All components are subclassed of `Component` base class that provides -the following methods: - -- `.generate(self)` --- return a generated component string -- `.add(self, component, container_label=None)` --- add subcomponent - to component instance. The `container_label` argument can be used to tell - `ExtGen` where this component should be used, see `Developers reference - manual` for more details, otherwise `EgtGen` figures that out by - looking at subcomponent class properties. -- `.__add__(self, other)`, `.__iadd__(self, other)` --- shortcuts - for `self.add(other)` call. - -ExtGen provides the following Python C/API related components: - -- `SetupPy(, *components)` --- generates a `setup.py` file - that is used to build extension modules to the given build directory. - It provides the following methods: - - - `.execute(self, *args)` --- runs `python setup.py` command with given - arguments. - - One can add `PyCModule` and `PySource` components to `SetupPy`. - -- `PyCModule(, *components, title=..., description=...)` --- - represents python extension module source. It provides the following - methods: - - - `.build(self, build_dir=None, clean_at_exit=None)` --- compilers, - builds, and returns extension module object. - - One can add `PyCFunction` components to `PyCModule`. - -- `PyCFunction(, *components, title=..., description=...)` --- - represents python extension module function source. - - One can add `PyCArgument`, `PyCReturn`, `CCode` components to `PyCfunction`. - String components are converted `CCode` components by default. - -- `PyCArgument(, ctype=, **components, - input_intent='required', input_title=..., input_description=..., - output_intent='hide', output_title=..., output_description=..., - title=..., description=..., - depends=)` --- represents argument - to python extension module function. - - `ctype` is `PyCTypeSpec` component instance or string. In the latter case - it is converted to `PyCTypeSpec` component. - -- `PyCReturn(, ctype=, **components)` --- - same as `PyCArgument` but with `input_intent='hide'` and `output_intent='return'` - options set. - -- `PyCTypeSpec()` --- represents variable type in a - python extension module. Over 70 types are supported: - - >>> typenames = PyCTypeSpec.typeinfo_map.keys() - >>> typenames.sort() - >>> print ', '.join(typenames) - buffer, c_Py_UNICODE, c_Py_complex, c_Py_ssize_t, c_char, c_char1, - c_const_char_ptr, c_double, c_float, c_int, c_long, c_long_long, - c_short, c_unsigned_char, c_unsigned_int, c_unsigned_long, - c_unsigned_long_long, c_unsigned_short, cell, cobject, complex, dict, - file, float, frozenset, function, generator, instance, int, iter, - list, long, method, module, numeric_array, numpy_complex128, - numpy_complex160, numpy_complex192, numpy_complex256, numpy_complex32, - numpy_complex64, numpy_descr, numpy_float128, numpy_float16, - numpy_float32, numpy_float64, numpy_float80, numpy_float96, - numpy_int128, numpy_int16, numpy_int32, numpy_int64, numpy_int8, - numpy_iter, numpy_multiiter, numpy_ndarray, numpy_ufunc, - numpy_uint128, numpy_uint16, numpy_uint32, numpy_uint64, numpy_uint8, - object, property, set, slice, str, tuple, type, unicode - - `typeobj` can be python type instance (e.g. `int`) or one of the string values - from the list of typenames above. - -- `PySource(, *components)` --- represents pure python module file. - - One can add `Code` components to `PySource`. - -ExtGen provides the following C related components: - -- `CSource` --- represents C file. Derived from `FileSource`. - - One can add `CCode` components to `CSource`. - String input is converted to `CCode` component. - -- `CHeader(
)`, `CStdHeader()`. Derived from `Line`. - -- `CCode(*components)` --- represents any C code block. Derived from `Code`. - -- `CFunction(, rctype=CTypeSpec('int'), *components)` --- represents - a C function. - - One can add `CArgument`, `CDeclaration`, `CCode` components to `CFunction`. - -- `CDeclaration(, *declarators)` --- represenets ` ` - code idiom. `` is `CTypeSpec` instance. - - One can add `CDeclarator` components to `CDeclaration`. - -- `CArgument(, )` --- C function argument. Derived from `CDeclaration`. - -- `CTypeSpec()` --- C type specifier. Derived from `Line`. - -- `CDeclarator(, *initvalues, is_string=..., is_scalar=...)` --- represents - ` [= ]` code idiom. - - String input is converted to `CInitExpr` component. - -ExtGen provides the following general purpose components: - -- `Word()` - - Nothing can be added to `Word`. - -- `Line(*strings)` - - One can add `Line` component to `Line`. - String input is converted to `Line` component. - -- `Code(*lines)` - - One can add `Line` and `Code` components to `Code`. - String input is converted to `Line` component. - -- `FileSource(, *components)`. - - One can add `Line` and `Code` components to `FileSource`. - String input is converted to `Code` component. - -Developers reference manual -=========================== - -To extend ExtGen, one needs to understand the infrastructure of -generating extension modules. - -There are two important concepts in ExtGen model: components and -containers. Components (ref. class `Component`) define code blocks or -code idioms used in building up a code sources. Containers (ref. class -`Container`) are named string lists that are joined together with -specified rules resulting actual code sources. ExtGen uses two steps -for constructing code sources: - -- creating code components and adding them together to a parent - component. For example, the `PyCModule` instance in the - hello example becomes a parent component to a `PyCFunction` instance - after executing `m += f`. - -- generating code source by calling `.generate()` method of the - parent component. - -One can iterate the above process as one wishes. - -The method `PyCModule.build()` is defined for convenience. -It compiles the generated sources, builds an extension module, -imports the resulting module to Python, and returns the module object. - -All component classes must be derived from the base class `Component` -defined in `extgen/base.py` file. `Component` class defines the -following methods and attributes: - -- `.initialize(self, *args, **kws)` is used to initialize the attributes - and subcomponents of the `Component` instance. Derived classes - usually redefine it to define the signature of the component - constructor. - -- `.add(self, component, container_label=None)` is used to add - subcomponents to the `Component`. Derived classes can affect - the behavior of the `.add()` method by redefining the following - class attributes: - - - `.default_component_class_name` is used when the `component` - argument is not a `Component` instance. - - - `.default_container_label` is used when component - `container_label` is undefined. - - - `.component_containe_map` is used to find `container_label` - corresponding to `component` argument class. - -- `.update_parent(self, parent)` is called after `parent.add(self,..)`. - -- `.finalize(self)` is called after finishing adding new components - and before `.generate()` method call. - -- `.generate(self)` returns a source code string. It recursively - processes all subcomponents, creates code containers, and - evaluates code templates. - -- `.provides(self)` property method returns an unique string - labeling the current component. The label is used to name - the result of `.generate()` method when storing it to a container. - The result is saved to container only if container does not - contain the given provides label. With this feature one avoids - redefining the same functions, variables, types etc that are needed - by different components. - -- `.init_containers(self)` is called before processing subcomponents. - Derived classes may redefine it. - -- `.update_containers(self)` is called after processing subcomponents. - Derived classes usually define it to fill up any containers. - -- `.get_templates(self)` is used by `.generate()` method to evaluate - the templates and return results. By default, `.get_templates()` - returns `.template` attribute. Derived classes may redefine it - to return a tuple of templates, then also `.generate()` will - return a tuple of source code strings. - -- `.get_container(self, name)` or `.container_` can be used - to retrive a container with a given name. If the current component - does not have requested container then the method tries to find - the container from parent classes. If it still does not find it, - then a new container with the given name will be created for - the current component. One should acctually avoid the last - solution and always define the containers in `.container_options` - class attribute. This attribute is a mapping between container - names and keyword options to the `Container` constructor. - See `Container` options below for more detail. - -- `.evaluate(self, template)` will evaluate `template` using - the attributes (with string values) and the code from containers. - -- `.info(message)`, `.warning(message)` are utility methods and - will write messages to `sys.stderr`. - -- `.register(*components)` will register predefined components - that can be retrived via `.get(provides)` method. - -Deriving a new `Component` class involves the following -tasks: - -- A component class must have a base class `Component`. - -- A component class may redefine `.initialize()`, - `.init_containers()`, `.add()`, `update_parent()`, - `.update_containers()`, `.get_templates()` - methods, `.provides()` property method and `.container_options`, - `.component_container_map`, `.default_container_label`, - `.default_component_class_name`, `.template` attributes. - -- In `.initialize()` method one can process constructor options, - set new attributes and add predefined components. It must - return a `Component` instance. - -- In `.init_containers()` and `.update_containers()` methods - one may retrive containers from parents via `.get_container()` - method or `.container_` attribute and fill them using - `.add()` method of the container. - -- The attribute `.template` is a string containing formatting mapping keys - that correspond to containers names or instance attribute names. - -- The attribute `.container_options` is a mapping of container - names and keyword argument dictionaries used as options - to a `Container` constructor. - -- The attribute `.component_container_map` is a mapping between - subcomponent class names and the names of containers that should - be used to save the code generation results. - -- All classes derived from `Component` are available as - `Component.`. - -See `extgen/*.py` files for more examples how to redefine `Component` -class methods and attributes. - - -Using `Container` class ------------------------ - -`Container` class has the following optional arguments: - - - `separator='\n'` - - `prefix=''` - - `suffix=''` - - `skip_prefix_when_empty=False` - - `skip_suffix_when_empty=False` - - `default=''` - - `reverse=False` - - `use_indent=False` - - `use_firstline_indent=False` - - `indent_offset=0` - - `user_defined_str=None` - - `replace_map={}` - - `ignore_empty_content=False` - - `skip_prefix_suffix_when_single=False` - -that are used to enhance the behaviour of `Container.__str__()` -method. By default, `Container.__str__()` returns -`prefix+separator.join(.list)+suffix`. - -One can add items to `Container` instance using `.add(, -label=None)` method. The items are saved in `.list` and `.label_map` -attributes. - -`Container` instances can be combined using `+` operator and -copied with `.copy()` method. The `.copy()` method has the -same arguments as `Container` constructor and can be used -to change certain container properties. - -The `label` argument should contain an unique value that represents -the content of ``. If `label` is `None` then `label = -time.time()` will be set. - -If one tries to add items with the same label to the container then -the equality of the corresponding string values will be checked. If -they are not equal then `ValueError` is raised, otherwise adding an -item is ignored. - -If `reverse` is `True` then the `.list` is reversed before joining -its items. If `use_indent` is `True` then each item in `.list` will -be prefixed with `indent_offset` spaces. If `use_firstline_indent` is -`True` then additional indention of the number of starting spaces -in `.line[0]` is used. The `replace_map` is used to apply -`.replace(key, value)` method to the result of `__str__()`. -Full control over the `__str__()` method is obtained via -defining `user_defined_str` that should be a callable object taking -list as input and return a string. diff --git a/numpy/f2py/lib/extgen/py_support.py b/numpy/f2py/lib/extgen/py_support.py deleted file mode 100644 index 9f0057133..000000000 --- a/numpy/f2py/lib/extgen/py_support.py +++ /dev/null @@ -1,1104 +0,0 @@ - -__all__ = ['PySource', 'PyCFunction', 'PyCModule', 'PyCTypeSpec', 'PyCArgument', 'PyCReturn'] - -import os -import sys -from base import Component -from utils import * -from c_support import * - -class PySource(FileSource): - - template_py_header = '''\ -#!/usr/bin/env python -# This file %(path)r is generated using ExtGen tool -# from NumPy version %(numpy_version)s. -# ExtGen is developed by Pearu Peterson . -# For more information see http://www.scipy.org/ExtGen/ .''' - - container_options = dict( - Content = dict(default='', - prefix = template_py_header + '\n', - suffix = '\n', - use_indent=True) - ) - - pass - -class PyCModule(CSource): - - """ - >>> m = PyCModule('PyCModule_test', title='This is first line.\\nSecond line.', description='This is a module.\\nYes, it is.') - >>> mod = m.build() - >>> print mod.__doc__ #doctest: +ELLIPSIS - This module 'PyCModule_test' is generated with ExtGen from NumPy version ... - - This is first line. - Second line. - - This is a module. - Yes, it is. - """ - - template = CSource.template_c_header + ''' -#ifdef __cplusplus -extern \"C\" { -#endif -#include "Python.h" -%(CHeader)s -%(CTypeDef)s -%(CProto)s -%(CDefinition)s -%(CAPIDefinition)s -%(CDeclaration)s -%(PyCModuleCDeclaration)s -%(CMainProgram)s -#ifdef __cplusplus -} -#endif -''' - - container_options = CSource.container_options.copy() - container_options.update(CAPIDefinition=container_options['CDefinition'], - PyCModuleCDeclaration=dict(default='', - ignore_empty_content=True), - ) - - component_container_map = dict( - PyCModuleInitFunction = 'CMainProgram', - PyCModuleCDeclaration = 'PyCModuleCDeclaration', - PyCFunction = 'CAPIDefinition', - ) - - def initialize(self, pyname, *components, **options): - self.pyname = pyname - self.title = options.pop('title', None) - self.description = options.pop('description', None) - - self = CSource.initialize(self, '%smodule.c' % (pyname), **options) - self.need_numpy_support = False - - self.cdecl = PyCModuleCDeclaration(pyname) - self += self.cdecl - - self.main = PyCModuleInitFunction(pyname) - self += self.main - map(self.add, components) - return self - - def update_parent(self, parent): - if isinstance(parent, Component.SetupPy): - self.update_SetupPy(parent) - - def update_SetupPy(self, parent): - parent.setup_py += self.evaluate(' config.add_extension(%(pyname)r, sources = ["%(extmodulesrc)s"])', - extmodulesrc = self.path) - parent.init_py += 'import %s' % (self.pyname) - - def finalize(self): - if self.need_numpy_support: - self.add(CCode(''' -#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" -'''), 'CHeader') - self.main.add(CCode(''' -import_array(); -if (PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "failed to load NumPy array module."); - goto capi_error; -} -'''),'CBody') - CSource.finalize(self) - - def build(self, build_dir=None, clean_at_exit=None): - """ build(build_dir=None, clean_at_exit=None) - - A convenience function to build, import, an return - an extension module object. - """ - if build_dir is None: - import tempfile - import time - packagename = 'extgen_' + str(hex(int(time.time()*10000000)))[2:] - build_dir = os.path.join(tempfile.gettempdir(), packagename) - clean_at_exit = True - - setup = Component.SetupPy(build_dir) - setup += self - s,o = setup.execute('build_ext','--inplace') - if s: - self.info('return status=%s' % (s)) - self.info(o) - raise RuntimeError('failed to build extension module %r,'\ - ' the build is located in %r directory'\ - % (self.pyname, build_dir)) - - if clean_at_exit: - import atexit - import shutil - atexit.register(lambda d=build_dir: shutil.rmtree(d)) - self.info('directory %r will be removed at exit from python.' % (build_dir)) - - sys.path.insert(0, os.path.dirname(build_dir)) - packagename = os.path.basename(build_dir) - try: - p = __import__(packagename) - m = getattr(p, self.pyname) - except: - del sys.path[0] - raise - else: - del sys.path[0] - return m - -class PyCModuleCDeclaration(Component): - - template = '''\ -static PyObject* extgen_module; -static -PyMethodDef extgen_module_methods[] = { - %(PyMethodDef)s - {NULL,NULL,0,NULL} -}; -static -char extgen_module_doc[] = -"This module %(pyname)r is generated with ExtGen from NumPy version %(numpy_version)s." -%(Title)s -%(Description)s -%(FunctionSignature)s -;''' - container_options = dict( - PyMethodDef = dict(suffix=',', skip_suffix_when_empty=True,separator=',\n', - default='', use_indent=True, ignore_empty_content=True), - FunctionSignature = dict(prefix='"\\n\\n:Functions:\\n"\n" ', skip_prefix_when_empty=True, use_indent=True, - ignore_empty_content=True, default='', - separator = '"\n" ', suffix='"', skip_suffix_when_empty=True, - ), - Title = dict(default='',prefix='"\\n\\n',suffix='"',separator='\\n"\n"', - skip_prefix_when_empty=True, skip_suffix_when_empty=True, - use_firstline_indent=True, replace_map={'\n':'\\n'}), - Description = dict(default='',prefix='"\\n\\n"\n"', - suffix='"',separator='\\n"\n"', - skip_prefix_when_empty=True, skip_suffix_when_empty=True, - use_firstline_indent=True, replace_map={'\n':'\\n'}), - ) - - default_component_class_name = 'Line' - - def initialize(self, pyname): - self.pyname = pyname - return self - - def update_parent(self, parent): - if isinstance(parent, PyCModule): - self.update_PyCModule(parent) - - def update_PyCModule(self, parent): - if parent.title: - self.add(parent.title, 'Title') - if parent.description: - self.add(parent.description, 'Description') - - -class PyCModuleInitFunction(CFunction): - - """ - >>> f = PyCModuleInitFunction('test_PyCModuleInitFunction') - >>> print f.generate() - PyMODINIT_FUNC - inittest_PyCModuleInitFunction(void) { - PyObject* extgen_module_dict = NULL; - PyObject* extgen_str_obj = NULL; - extgen_module = Py_InitModule(\"test_PyCModuleInitFunction\", extgen_module_methods); - if ((extgen_module_dict = PyModule_GetDict(extgen_module))==NULL) goto capi_error; - if ((extgen_str_obj = PyString_FromString(extgen_module_doc))==NULL) goto capi_error; - PyDict_SetItemString(extgen_module_dict, \"__doc__\", extgen_str_obj); - Py_DECREF(extgen_str_obj); - if ((extgen_str_obj = PyString_FromString(\"restructuredtext\"))==NULL) goto capi_error; - PyDict_SetItemString(extgen_module_dict, \"__docformat__\", extgen_str_obj); - Py_DECREF(extgen_str_obj); - return; - capi_error: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, \"failed to initialize 'test_PyCModuleInitFunction' module.\"); - } - return; - } - """ - - template = '''\ -%(CSpecifier)s -%(CTypeSpec)s -%(name)s(void) { - PyObject* extgen_module_dict = NULL; - PyObject* extgen_str_obj = NULL; - %(CDeclaration)s - extgen_module = Py_InitModule("%(pyname)s", extgen_module_methods); - if ((extgen_module_dict = PyModule_GetDict(extgen_module))==NULL) goto capi_error; - if ((extgen_str_obj = PyString_FromString(extgen_module_doc))==NULL) goto capi_error; - PyDict_SetItemString(extgen_module_dict, "__doc__", extgen_str_obj); - Py_DECREF(extgen_str_obj); - if ((extgen_str_obj = PyString_FromString("restructuredtext"))==NULL) goto capi_error; - PyDict_SetItemString(extgen_module_dict, "__docformat__", extgen_str_obj); - Py_DECREF(extgen_str_obj); - %(CBody)s - return; -capi_error: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, "failed to initialize %(pyname)r module."); - } - return; -}''' - - def initialize(self, pyname, *components, **options): - self.pyname = pyname - self.title = options.pop('title', None) - self.description = options.pop('description', None) - self = CFunction.initialize(self, 'init'+pyname, 'PyMODINIT_FUNC', *components, **options) - return self - -#helper classes for PyCFunction -class KWListBase(Word): parent_container_options = dict(separator=', ', suffix=', ', skip_suffix_when_empty=True) -class ReqKWList(KWListBase): pass -class OptKWList(KWListBase): pass -class ExtKWList(KWListBase): pass -class ArgBase(Word): parent_container_options = dict(separator=', ') -class ReqArg(ArgBase): pass -class OptArg(ArgBase): pass -class ExtArg(ArgBase): pass -class RetArg(ArgBase): - parent_container_options = dict(separator=', ', prefix='(', suffix=')', default = 'None', - skip_prefix_when_empty=True, skip_suffix_when_empty=True, - skip_prefix_suffix_when_single=True) -class OptExtArg(ArgBase): - parent_container_options = dict(separator=', ', prefix=' [, ', skip_prefix_when_empty=True, - suffix=']', skip_suffix_when_empty=True) -class ArgDocBase(Word): - parent_container_options = dict(default='', prefix='"\\n\\nArguments:\\n"\n" ', - separator='\\n"\n" ', suffix='"', - skip_prefix_when_empty=True, skip_suffix_when_empty=True, - use_firstline_indent=True, replace_map={'\n':'\\n'}) -class ReqArgDoc(ArgDocBase): - parent_container_options = ArgDocBase.parent_container_options.copy() - parent_container_options.update(prefix='"\\n\\n:Parameters:\\n"\n" ') -class OptArgDoc(ArgDocBase): - parent_container_options = ArgDocBase.parent_container_options.copy() - parent_container_options.update(prefix='"\\n\\n:Optional parameters:\\n"\n" ') -class ExtArgDoc(ArgDocBase): - parent_container_options = ArgDocBase.parent_container_options.copy() - parent_container_options.update(prefix='"\\n\\n:Extra parameters:\\n"\n" ') -class RetArgDoc(ArgDocBase): - parent_container_options = ArgDocBase.parent_container_options.copy() - parent_container_options.update(prefix='"\\n\\n:Returns:\\n"\n" ', - default='"\\n\\n:Returns:\\n None"') -class ArgFmtBase(Word): parent_container_options = dict(separator='') -class ReqArgFmt(ArgFmtBase): pass -class OptArgFmt(ArgFmtBase): pass -class ExtArgFmt(ArgFmtBase): pass -class RetArgFmt(ArgFmtBase): pass -class OptExtArgFmt(ArgFmtBase): - parent_container_options = dict(separator='', prefix='|', skip_prefix_when_empty=True) -class ArgObjBase(Word): parent_container_options = dict(separator=', ', prefix=', ', skip_prefix_when_empty=True) -class ReqArgObj(ArgObjBase): pass -class OptArgObj(ArgObjBase): pass -class ExtArgObj(ArgObjBase): pass -class RetArgObj(ArgObjBase): pass - -class FunctionSignature(Component): - template = '%(name)s(%(ReqArg)s%(OptExtArg)s) -> %(RetArg)s' - parent_container_options = dict() - container_options = dict( - ReqArg = ReqArg.parent_container_options, - OptArg = OptArg.parent_container_options, - ExtArg = ExtArg.parent_container_options, - RetArg = RetArg.parent_container_options, - OptExtArg = OptExtArg.parent_container_options, - ) - def initialize(self, name, *components, **options): - self.name = name - map(self.add, components) - return self - def update_containers(self): - self.container_OptExtArg += self.container_OptArg + self.container_ExtArg - -class PyCFunction(CFunction): - - """ - >>> from __init__ import * - >>> f = PyCFunction('foo') - >>> print f.generate() - static - char pyc_function_foo_doc[] = - \" foo() -> None\" - \"\\n\\n:Returns:\\n None\" - ; - static - PyObject* - pyc_function_foo(PyObject *pyc_self, PyObject *pyc_args, PyObject *pyc_keywds) { - PyObject * volatile pyc_buildvalue = NULL; - volatile int capi_success = 1; - static char *capi_kwlist[] = {NULL}; - if (PyArg_ParseTupleAndKeywords(pyc_args, pyc_keywds,"", - capi_kwlist)) { - capi_success = !PyErr_Occurred(); - if (capi_success) { - pyc_buildvalue = Py_BuildValue(""); - } - } - return pyc_buildvalue; - } - >>> f = PyCFunction('foo', title=' Function title.\\nSecond line.', description=' This is a function.\\n2nd line.') - >>> e = PyCModule('PyCFunction_test', f) - >>> mod = e.build() - >>> print mod.foo.__doc__ - foo() -> None - - Function title. - Second line. - - This is a function. - 2nd line. - - :Returns: - None - """ - - template = '''\ -static -char %(name)s_doc[] = -" %(FunctionSignature)s" -%(Title)s -%(Description)s -%(ReqArgDoc)s -%(RetArgDoc)s -%(OptArgDoc)s -%(ExtArgDoc)s -; -static -PyObject* -%(name)s(PyObject *pyc_self, PyObject *pyc_args, PyObject *pyc_keywds) { - PyObject * volatile pyc_buildvalue = NULL; - volatile int capi_success = 1; - %(CDeclaration)s - static char *capi_kwlist[] = {%(ReqKWList)s%(OptKWList)s%(ExtKWList)sNULL}; - if (PyArg_ParseTupleAndKeywords(pyc_args, pyc_keywds,"%(ReqArgFmt)s%(OptExtArgFmt)s", - capi_kwlist%(ReqArgObj)s%(OptArgObj)s%(ExtArgObj)s)) { - %(FromPyObj)s - %(CBody)s - capi_success = !PyErr_Occurred(); - if (capi_success) { - %(PyObjFrom)s - pyc_buildvalue = Py_BuildValue("%(RetArgFmt)s"%(RetArgObj)s); - %(CleanPyObjFrom)s - } - %(CleanCBody)s - %(CleanFromPyObj)s - } - return pyc_buildvalue; -}''' - - container_options = CFunction.container_options.copy() - - container_options.update(\ - - TMP = dict(), - - ReqArg = ReqArg.parent_container_options, - OptArg = OptArg.parent_container_options, - ExtArg = ExtArg.parent_container_options, - RetArg = RetArg.parent_container_options, - - FunctionSignature = FunctionSignature.parent_container_options, - - OptExtArg = OptExtArg.parent_container_options, - - Title = dict(default='',prefix='"\\n\\n',suffix='"',separator='\\n"\n"', - skip_prefix_when_empty=True, skip_suffix_when_empty=True, - use_firstline_indent=True, replace_map={'\n':'\\n'}), - Description = dict(default='',prefix='"\\n\\n"\n"', - suffix='"',separator='\\n"\n"', - skip_prefix_when_empty=True, skip_suffix_when_empty=True, - use_firstline_indent=True, replace_map={'\n':'\\n'}), - - ReqArgDoc = ReqArgDoc.parent_container_options, - OptArgDoc = OptArgDoc.parent_container_options, - ExtArgDoc = ExtArgDoc.parent_container_options, - RetArgDoc = RetArgDoc.parent_container_options, - - ReqKWList = ReqKWList.parent_container_options, - OptKWList = OptKWList.parent_container_options, - ExtKWList = ExtKWList.parent_container_options, - - ReqArgFmt = ReqArgFmt.parent_container_options, - OptArgFmt = OptArgFmt.parent_container_options, - ExtArgFmt = ExtArgFmt.parent_container_options, - OptExtArgFmt = OptExtArgFmt.ExtArgFmt.parent_container_options, - RetArgFmt = ExtArgFmt.parent_container_options, - - ReqArgObj = ReqArgObj.parent_container_options, - OptArgObj = OptArgObj.parent_container_options, - ExtArgObj = ExtArgObj.parent_container_options, - RetArgObj = RetArgObj.parent_container_options, - - FromPyObj = CCode.parent_container_options, - PyObjFrom = CCode.parent_container_options, - - CleanPyObjFrom = dict(default='', reverse=True, use_indent=True, ignore_empty_content=True), - CleanCBody = dict(default='', reverse=True, use_indent=True, ignore_empty_content=True), - CleanFromPyObj = dict(default='', reverse=True, use_indent=True, ignore_empty_content=True), - - ) - - default_component_class_name = 'CCode' - - component_container_map = CFunction.component_container_map.copy() - component_container_map.update( - PyCArgument = 'TMP', - CCode = 'CBody', - ) - - def initialize(self, pyname, *components, **options): - self.pyname = pyname - self.title = options.pop('title', None) - self.description = options.pop('description', None) - self = CFunction.initialize(self, 'pyc_function_'+pyname, 'PyObject*', **options) - self.signature = FunctionSignature(pyname) - self += self.signature - if self.title: - self.add(self.title, 'Title') - if self.description: - self.add(self.description, 'Description') - map(self.add, components) - return self - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr,[self.pyname]+[c for (c,l) in self.components]))) - - def update_parent(self, parent): - if isinstance(parent, PyCModule): - self.update_PyCModule(parent) - - def update_PyCModule(self, parent): - t = ' {"%(pyname)s", (PyCFunction)%(name)s, METH_VARARGS | METH_KEYWORDS, %(name)s_doc}' - parent.cdecl.add(self.evaluate(t),'PyMethodDef') - parent.cdecl.add(self.signature,'FunctionSignature') - - def update_containers(self): - self.container_OptExtArg += self.container_OptArg + self.container_ExtArg - self.container_OptExtArgFmt += self.container_OptArgFmt + self.container_ExtArgFmt - - # resolve dependencies - sorted_arguments = [] - sorted_names = [] - comp_map = {} - dep_map = {} - for (c,l) in self.components: - if not isinstance(c, Component.PyCArgument): - continue - d = [n for n in c.depends if n not in sorted_names] - if not d: - sorted_arguments.append((c,l)) - sorted_names.append(c.name) - else: - comp_map[c.name] = (c,l) - dep_map[c.name] = d - - while dep_map: - dep_map_copy = dep_map.copy() - for name, deps in dep_map.items(): - d = [n for n in deps if n in dep_map] - if not d: - sorted_arguments.append(comp_map[name]) - del dep_map[name] - else: - dep_map[name] = d - if dep_map_copy==dep_map: - self.warnign('%s: detected cyclic dependencies in %r, incorrect behavior is expected.\n'\ - % (self.provides, dep_map)) - sorted_arguments += dep_map.values() - break - - for c, l in sorted_arguments: - old_parent = c.parent - c.parent = self - c.ctype.set_converters(c) - c.parent = old_parent - - -class PyCArgument(Component): - - """ - >>> from __init__ import * - >>> a = PyCArgument('a') - >>> print a - PyCArgument('a', PyCTypeSpec('object')) - >>> print a.generate() - a - >>> f = PyCFunction('foo') - >>> f += a - >>> f += PyCArgument('b') - >>> m = PyCModule('PyCArgument_test') - >>> m += f - >>> #print m.generate() - >>> mod = m.build() - >>> print mod.__doc__ #doctest: +ELLIPSIS - This module 'PyCArgument_test' is generated with ExtGen from NumPy version ... - - :Functions: - foo(a, b) -> None - - """ - - container_options = dict( - TMP = dict() - ) - - component_container_map = dict( - PyCTypeSpec = 'TMP' - ) - - template = '%(name)s' - - def initialize(self, name, ctype = object, *components, **options): - self.input_intent = options.pop('input_intent','required') # 'optional', 'extra', 'hide' - self.output_intent = options.pop('output_intent','hide') # 'return' - self.input_title = options.pop('input_title', None) - self.output_title = options.pop('output_title', None) - self.input_description = options.pop('input_description', None) - self.output_description = options.pop('output_description', None) - self.depends = options.pop('depends', []) - title = options.pop('title', None) - description = options.pop('description', None) - if title is not None: - if self.input_intent!='hide': - if self.input_title is None: - self.input_title = title - elif self.output_intent!='hide': - if self.output_title is None: - self.output_title = title - if description is not None: - if self.input_intent!='hide': - if self.input_description is None: - self.input_description = description - elif self.output_intent!='hide': - if self.output_description is None: - self.output_description = description - if options: self.warning('%s unused options: %s\n' % (self.__class__.__name__, options)) - - self.name = name - self.ctype = ctype = PyCTypeSpec(ctype) - self += ctype - - self.cvar = name - self.pycvar = None - self.retpycvar = None - - retfmt = ctype.get_pyret_fmt(self) - if isinstance(ctype, PyCTypeSpec): - if retfmt and retfmt in 'SON': - if self.output_intent == 'return': - if self.input_intent=='hide': - self.retpycvar = name - else: - self.pycvar = name - self.retpycvar = name + '_return' - elif self.input_intent!='hide': - self.pycvar = name - else: - self.pycvar = name - self.retpycvar = name - else: - self.pycvar = name + '_pyc' - self.retpycvar = name + '_pyc_r' - - ctype.set_titles(self) - - map(self.add, components) - return self - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr,[self.name]+[c for (c,l) in self.components]))) - - def update_parent(self, parent): - if isinstance(parent, PyCFunction): - self.update_PyCFunction(parent) - - def update_PyCFunction(self, parent): - ctype = self.ctype - - input_doc_title = '%s : %s' % (self.name, self.input_title) - output_doc_title = '%s : %s' % (self.name, self.output_title) - if self.input_description is not None: - input_doc_descr = ' %s' % (self.input_description) - else: - input_doc_descr = None - if self.output_description is not None: - output_doc_descr = ' %s' % (self.output_description) - else: - output_doc_descr = None - - # add components to parent: - parent += ctype.get_decl(self, parent) - if self.input_intent=='required': - parent += ReqArg(self.name) - parent.signature += ReqArg(self.name) - parent += ReqKWList('"' + self.name + '"') - parent += ReqArgFmt(ctype.get_pyarg_fmt(self)) - parent += ReqArgObj(ctype.get_pyarg_obj(self)) - parent += ReqArgDoc(input_doc_title) - parent += ReqArgDoc(input_doc_descr) - elif self.input_intent=='optional': - parent += OptArg(self.name) - parent.signature += OptArg(self.name) - parent += OptKWList('"' + self.name + '"') - parent += OptArgFmt(ctype.get_pyarg_fmt(self)) - parent += OptArgObj(ctype.get_pyarg_obj(self)) - parent += OptArgDoc(input_doc_title) - parent += OptArgDoc(input_doc_descr) - elif self.input_intent=='extra': - parent += ExtArg(self.name) - parent.signature += ExtArg(self.name) - parent += ExtKWList('"' + self.name + '"') - parent += ExtArgFmt(ctype.get_pyarg_fmt(self)) - parent += ExtArgObj(ctype.get_pyarg_obj(self)) - parent += ExtArgDoc(input_doc_title) - parent += ExtArgDoc(input_doc_descr) - elif self.input_intent=='hide': - pass - else: - raise NotImplementedError('input_intent=%r' % (self.input_intent)) - - if self.output_intent=='return': - parent += RetArg(self.name) - parent.signature += RetArg(self.name) - parent += RetArgFmt(ctype.get_pyret_fmt(self)) - parent += RetArgObj(ctype.get_pyret_obj(self)) - parent += RetArgDoc(output_doc_title) - parent += RetArgDoc(output_doc_descr) - elif self.output_intent=='hide': - pass - else: - raise NotImplementedError('output_intent=%r' % (self.output_intent)) - -class PyCReturn(PyCArgument): - - def initialize(self, name, ctype = object, *components, **options): - return PyCArgument(name, ctype, input_intent='hide', output_intent='return', *components, **options) - -class PyCTypeSpec(CTypeSpec): - - """ - >>> s = PyCTypeSpec(object) - >>> print s - PyCTypeSpec('object') - >>> print s.generate() - PyObject* - - >>> from __init__ import * - >>> m = PyCModule('test_PyCTypeSpec') - >>> f = PyCFunction('func') - >>> f += PyCArgument('i', int, output_intent='return') - >>> f += PyCArgument('l', long, output_intent='return') - >>> f += PyCArgument('f', float, output_intent='return') - >>> f += PyCArgument('c', complex, output_intent='return') - >>> f += PyCArgument('s', str, output_intent='return') - >>> f += PyCArgument('u', unicode, output_intent='return') - >>> f += PyCArgument('t', tuple, output_intent='return') - >>> f += PyCArgument('lst', list, output_intent='return') - >>> f += PyCArgument('d', dict, output_intent='return') - >>> f += PyCArgument('set', set, output_intent='return') - >>> f += PyCArgument('o1', object, output_intent='return') - >>> f += PyCArgument('o2', object, output_intent='return') - >>> m += f - >>> b = m.build() #doctest: +ELLIPSIS - >>> b.func(23, 23l, 1.2, 1+2j, 'hello', u'hei', (2,'a'), [-2], {3:4}, set([1,2]), 2, '15') - (23, 23L, 1.2, (1+2j), 'hello', u'hei', (2, 'a'), [-2], {3: 4}, set([1, 2]), 2, '15') - >>> print b.func.__doc__ - func(i, l, f, c, s, u, t, lst, d, set, o1, o2) -> (i, l, f, c, s, u, t, lst, d, set, o1, o2) - - :Parameters: - i : a python int object - l : a python long object - f : a python float object - c : a python complex object - s : a python str object - u : a python unicode object - t : a python tuple object - lst : a python list object - d : a python dict object - set : a python set object - o1 : a python object - o2 : a python object - - :Returns: - i : a python int object - l : a python long object - f : a python float object - c : a python complex object - s : a python str object - u : a python unicode object - t : a python tuple object - lst : a python list object - d : a python dict object - set : a python set object - o1 : a python object - o2 : a python object - - >>> m = PyCModule('test_PyCTypeSpec_c') - >>> f = PyCFunction('func_c_int') - >>> f += PyCArgument('i1', 'c_char', output_intent='return') - >>> f += PyCArgument('i2', 'c_short', output_intent='return') - >>> f += PyCArgument('i3', 'c_int', output_intent='return') - >>> f += PyCArgument('i4', 'c_long', output_intent='return') - >>> f += PyCArgument('i5', 'c_long_long', output_intent='return') - >>> m += f - >>> f = PyCFunction('func_c_unsigned_int') - >>> f += PyCArgument('i1', 'c_unsigned_char', output_intent='return') - >>> f += PyCArgument('i2', 'c_unsigned_short', output_intent='return') - >>> f += PyCArgument('i3', 'c_unsigned_int', output_intent='return') - >>> f += PyCArgument('i4', 'c_unsigned_long', output_intent='return') - >>> f += PyCArgument('i5', 'c_unsigned_long_long', output_intent='return') - >>> m += f - >>> f = PyCFunction('func_c_float') - >>> f += PyCArgument('f1', 'c_float', output_intent='return') - >>> f += PyCArgument('f2', 'c_double', output_intent='return') - >>> m += f - >>> f = PyCFunction('func_c_complex') - >>> f += PyCArgument('c1', 'c_Py_complex', output_intent='return') - >>> m += f - >>> f = PyCFunction('func_c_string') - >>> f += PyCArgument('s1', 'c_const_char_ptr', output_intent='return') - >>> f += PyCArgument('s2', 'c_const_char_ptr', output_intent='return') - >>> f += PyCArgument('s3', 'c_Py_UNICODE', output_intent='return') - >>> f += PyCArgument('s4', 'c_char1', output_intent='return') - >>> m += f - >>> b = m.build() - >>> b.func_c_int(2,3,4,5,6) - (2, 3, 4, 5, 6L) - >>> b.func_c_unsigned_int(-1,-1,-1,-1,-1) - (255, 65535, 4294967295, 18446744073709551615L, 18446744073709551615L) - >>> b.func_c_float(1.2,1.2) - (1.2000000476837158, 1.2) - >>> b.func_c_complex(1+2j) - (1+2j) - >>> b.func_c_string('hei', None, u'tere', 'b') - ('hei', None, u'tere', 'b') - - >>> import numpy - >>> m = PyCModule('test_PyCTypeSpec_numpy') - >>> f = PyCFunction('func_int') - >>> f += PyCArgument('i1', numpy.int8, output_intent='return') - >>> f += PyCArgument('i2', numpy.int16, output_intent='return') - >>> f += PyCArgument('i3', numpy.int32, output_intent='return') - >>> f += PyCArgument('i4', numpy.int64, output_intent='return') - >>> m += f - >>> f = PyCFunction('func_uint') - >>> f += PyCArgument('i1', numpy.uint8, output_intent='return') - >>> f += PyCArgument('i2', numpy.uint16, output_intent='return') - >>> f += PyCArgument('i3', numpy.uint32, output_intent='return') - >>> f += PyCArgument('i4', numpy.uint64, output_intent='return') - >>> m += f - >>> f = PyCFunction('func_float') - >>> f += PyCArgument('f1', numpy.float32, output_intent='return') - >>> f += PyCArgument('f2', numpy.float64, output_intent='return') - >>> f += PyCArgument('f3', numpy.float128, output_intent='return') - >>> m += f - >>> f = PyCFunction('func_complex') - >>> f += PyCArgument('c1', numpy.complex64, output_intent='return') - >>> f += PyCArgument('c2', numpy.complex128, output_intent='return') - >>> f += PyCArgument('c3', numpy.complex256, output_intent='return') - >>> m += f - >>> f = PyCFunction('func_array') - >>> f += PyCArgument('a1', numpy.ndarray, output_intent='return') - >>> m += f - >>> b = m.build() - >>> b.func_int(numpy.int8(-2), numpy.int16(-3), numpy.int32(-4), numpy.int64(-5)) - (-2, -3, -4, -5) - >>> b.func_uint(numpy.uint8(-1), numpy.uint16(-1), numpy.uint32(-1), numpy.uint64(-1)) - (255, 65535, 4294967295, 18446744073709551615) - >>> b.func_float(numpy.float32(1.2),numpy.float64(1.2),numpy.float128(1.2)) - (1.20000004768, 1.2, 1.19999999999999995559) - >>> b.func_complex(numpy.complex64(1+2j),numpy.complex128(1+2j),numpy.complex256(1+2j)) - ((1+2j), (1+2j), (1.0+2.0j)) - >>> b.func_array(numpy.array([1,2])) - array([1, 2]) - >>> b.func_array(numpy.array(2)) - array(2) - >>> b.func_array(2) - Traceback (most recent call last): - ... - TypeError: argument 1 must be numpy.ndarray, not int - >>> b.func_array(numpy.int8(2)) - Traceback (most recent call last): - ... - TypeError: argument 1 must be numpy.ndarray, not numpy.int8 - """ - - typeinfo_map = dict( - int = ('PyInt_Type', 'PyIntObject*', 'O!', 'N', 'NULL'), - long = ('PyLong_Type', 'PyLongObject*', 'O!', 'N', 'NULL'), - float = ('PyFloat_Type', 'PyFloatObject*', 'O!', 'N', 'NULL'), - complex = ('PyComplex_Type', 'PyComplexObject*', 'O!', 'N', 'NULL'), - str = ('PyString_Type', 'PyStringObject*', 'S', 'N', 'NULL'), - unicode = ('PyUnicode_Type', 'PyUnicodeObject*', 'U', 'N', 'NULL'), - buffer = ('PyBuffer_Type', 'PyBufferObject*', 'O!', 'N', 'NULL'), - tuple = ('PyTuple_Type', 'PyTupleObject*', 'O!', 'N', 'NULL'), - list = ('PyList_Type', 'PyListObject*', 'O!', 'N', 'NULL'), - dict = ('PyDict_Type', 'PyDictObject*', 'O!', 'N', 'NULL'), - file = ('PyFile_Type', 'PyFileObject*', 'O!', 'N', 'NULL'), - instance = ('PyInstance_Type', 'PyObject*', 'O!', 'N', 'NULL'), - function = ('PyFunction_Type', 'PyFunctionObject*', 'O!', 'N', 'NULL'), - method = ('PyMethod_Type', 'PyObject*', 'O!', 'N', 'NULL'), - module = ('PyModule_Type', 'PyObject*', 'O!', 'N', 'NULL'), - iter = ('PySeqIter_Type', 'PyObject*', 'O!', 'N', 'NULL'), - property = ('PyProperty_Type', 'PyObject*', 'O!', 'N', 'NULL'), - slice = ('PySlice_Type', 'PyObject*', 'O!', 'N', 'NULL'), - cell = ('PyCell_Type', 'PyCellObject*', 'O!', 'N', 'NULL'), - generator = ('PyGen_Type', 'PyGenObject*', 'O!', 'N', 'NULL'), - set = ('PySet_Type', 'PySetObject*', 'O!', 'N', 'NULL'), - frozenset = ('PyFrozenSet_Type', 'PySetObject*', 'O!', 'N', 'NULL'), - cobject = (None, 'PyCObject*', 'O', 'N', 'NULL'), - type = ('PyType_Type', 'PyTypeObject*', 'O!', 'N', 'NULL'), - object = (None, 'PyObject*', 'O', 'N', 'NULL'), - numpy_ndarray = ('PyArray_Type', 'PyArrayObject*', 'O!', 'N', 'NULL'), - numpy_descr = ('PyArrayDescr_Type','PyArray_Descr', 'O!', 'N', 'NULL'), - numpy_ufunc = ('PyUFunc_Type', 'PyUFuncObject*', 'O!', 'N', 'NULL'), - numpy_iter = ('PyArrayIter_Type', 'PyArrayIterObject*', 'O!', 'N', 'NULL'), - numpy_multiiter = ('PyArrayMultiIter_Type', 'PyArrayMultiIterObject*', 'O!', 'N', 'NULL'), - numpy_int8 = ('PyInt8ArrType_Type', 'PyInt8ScalarObject*', 'O!', 'N', 'NULL'), - numpy_int16 = ('PyInt16ArrType_Type', 'PyInt16ScalarObject*', 'O!', 'N', 'NULL'), - numpy_int32 = ('PyInt32ArrType_Type', 'PyInt32ScalarObject*', 'O!', 'N', 'NULL'), - numpy_int64 = ('PyInt64ArrType_Type', 'PyInt64ScalarObject*', 'O!', 'N', 'NULL'), - numpy_int128 = ('PyInt128ArrType_Type', 'PyInt128ScalarObject*', 'O!', 'N', 'NULL'), - numpy_uint8 = ('PyUInt8ArrType_Type', 'PyUInt8ScalarObject*', 'O!', 'N', 'NULL'), - numpy_uint16 = ('PyUInt16ArrType_Type', 'PyUInt16ScalarObject*', 'O!', 'N', 'NULL'), - numpy_uint32 = ('PyUInt32ArrType_Type', 'PyUInt32ScalarObject*', 'O!', 'N', 'NULL'), - numpy_uint64 = ('PyUInt64ArrType_Type', 'PyUInt64ScalarObject*', 'O!', 'N', 'NULL'), - numpy_uint128 = ('PyUInt128ArrType_Type', 'PyUInt128ScalarObject*', 'O!', 'N', 'NULL'), - numpy_float16 = ('PyFloat16ArrType_Type', 'PyFloat16ScalarObject*', 'O!', 'N', 'NULL'), - numpy_float32 = ('PyFloat32ArrType_Type', 'PyFloat32ScalarObject*', 'O!', 'N', 'NULL'), - numpy_float64 = ('PyFloat64ArrType_Type', 'PyFloat64ScalarObject*', 'O!', 'N', 'NULL'), - numpy_float80 = ('PyFloat80ArrType_Type', 'PyFloat80ScalarObject*', 'O!', 'N', 'NULL'), - numpy_float96 = ('PyFloat96ArrType_Type', 'PyFloat96ScalarObject*', 'O!', 'N', 'NULL'), - numpy_float128 = ('PyFloat128ArrType_Type', 'PyFloat128ScalarObject*', 'O!', 'N', 'NULL'), - numpy_complex32 = ('PyComplex32ArrType_Type', 'PyComplex32ScalarObject*', 'O!', 'N', 'NULL'), - numpy_complex64 = ('PyComplex64ArrType_Type', 'PyComplex64ScalarObject*', 'O!', 'N', 'NULL'), - numpy_complex128 = ('PyComplex128ArrType_Type', 'PyComplex128ScalarObject*', 'O!', 'N', 'NULL'), - numpy_complex160 = ('PyComplex160ArrType_Type', 'PyComplex160ScalarObject*', 'O!', 'N', 'NULL'), - numpy_complex192 = ('PyComplex192ArrType_Type', 'PyComplex192ScalarObject*', 'O!', 'N', 'NULL'), - numpy_complex256 = ('PyComplex256ArrType_Type', 'PyComplex256ScalarObject*', 'O!', 'N', 'NULL'), - numeric_array = ('PyArray_Type', 'PyArrayObject*', 'O!', 'N', 'NULL'), - c_char = (None, 'char', 'b', 'b', '0'), - c_unsigned_char = (None, 'unsigned char', 'B', 'B', '0'), - c_short = (None, 'short int', 'h', 'h', '0'), - c_unsigned_short = (None, 'unsigned short int', 'H', 'H', '0'), - c_int = (None,'int', 'i', 'i', '0'), - c_unsigned_int = (None,'unsigned int', 'I', 'I', '0'), - c_long = (None,'long', 'l', 'l', '0'), - c_unsigned_long = (None,'unsigned long', 'k', 'k', '0'), - c_long_long = (None,'PY_LONG_LONG', 'L', 'L', '0'), - c_unsigned_long_long = (None,'unsigned PY_LONG_LONG', 'K', 'K', '0'), - c_Py_ssize_t = (None,'Py_ssize_t', 'n', 'n', '0'), - c_char1 = (None,'char', 'c', 'c', '"\\0"'), - c_float = (None,'float', 'f', 'f', '0.0'), - c_double = (None,'double', 'd', 'd', '0.0'), - c_Py_complex = (None,'Py_complex', 'D', 'D', '{0.0, 0.0}'), - c_const_char_ptr = (None,'const char *', 'z', 'z', 'NULL'), - c_Py_UNICODE = (None,'Py_UNICODE*','u','u', 'NULL'), - ) - - def initialize(self, typeobj): - if isinstance(typeobj, self.__class__): - return typeobj - - m = self.typeinfo_map - - key = None - if isinstance(typeobj, type): - if typeobj.__module__=='__builtin__': - key = typeobj.__name__ - if key=='array': - key = 'numeric_array' - elif typeobj.__module__=='numpy': - key = 'numpy_' + typeobj.__name__ - elif isinstance(typeobj, str): - key = typeobj - if key.startswith('numpy_'): - k = key[6:] - named_scalars = ['byte','short','int','long','longlong', - 'ubyte','ushort','uint','ulong','ulonglong', - 'intp','uintp', - 'float_','double', - 'longfloat','longdouble', - 'complex_', - ] - if k in named_scalars: - import numpy - key = 'numpy_' + getattr(numpy, k).__name__ - - try: item = m[key] - except KeyError: - raise NotImplementedError('%s: need %s support' % (self.__class__.__name__, typeobj)) - - self.typeobj_name = key - self.ctypeobj = item[0] - self.line = item[1] - self.arg_fmt = item[2] - self.ret_fmt = item[3] - self.cinit_value = item[4] - - self.need_numpy_support = False - if key.startswith('numpy_'): - self.need_numpy_support = True - #self.add(Component.get('arrayobject.h'), 'CHeader') - #self.add(Component.get('import_array'), 'ModuleInit') - if key.startswith('numeric_'): - raise NotImplementedError(self.__class__.__name__ + ': Numeric support') - - return self - - def finalize(self): - if self.need_numpy_support: - self.component_PyCModule.need_numpy_support = True - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ', '.join([repr(self.typeobj_name)]+[repr(c) for (c,l) in self.components])) - - def get_pyarg_fmt(self, arg): - if arg.input_intent=='hide': return None - return self.arg_fmt - - def get_pyarg_obj(self, arg): - if arg.input_intent=='hide': return None - if self.arg_fmt=='O!': - return '&%s, &%s' % (self.ctypeobj, arg.pycvar) - return '&' + arg.pycvar - - def get_pyret_fmt(self, arg): - if arg.output_intent=='hide': return None - return self.ret_fmt - - def get_pyret_obj(self, arg): - if arg.output_intent=='return': - if self.get_pyret_fmt(arg)=='D': - return '&' + arg.retpycvar - return arg.retpycvar - return - - def get_init_value(self, arg): - return self.cinit_value - - def set_titles(self, arg): - if self.typeobj_name == 'object': - tn = 'a python ' + self.typeobj_name - else: - if self.typeobj_name.startswith('numpy_'): - tn = 'a numpy.' + self.typeobj_name[6:] + ' object' - elif self.typeobj_name.startswith('c_'): - n = self.typeobj_name[2:] - if not n.startswith('Py_'): - n = ' '.join(n.split('_')) - tn = 'a to C ' + n + ' convertable object' - else: - tn = 'a python ' + self.typeobj_name + ' object' - if arg.input_intent!='hide': - r = '' - if arg.input_title: r = ', ' + arg.input_title - arg.input_title = tn + r - if arg.output_intent!='hide': - r = '' - if arg.output_title: r = ', ' + arg.output_title - arg.output_title = tn + r - - def get_decl(self, arg, func): - init_value = self.get_init_value(arg) - if init_value: - init = ' = %s' % (init_value) - else: - init = '' - if arg.pycvar and arg.pycvar==arg.retpycvar: - func += CDeclaration(self, '%s%s' % (arg.pycvar, init)) - else: - if self.get_pyret_obj(arg) is None: - if self.get_pyret_obj(arg) is not None: - func += CDeclaration(self, '%s%s' % (arg.pycvar, init)) - elif self.get_pyarg_obj(arg) is not None: - func += CDeclaration(self, '%s%s' % (arg.pycvar, init)) - func += CDeclaration(self,'%s%s' % (arg.retpycvar, init)) - else: - func += CDeclaration(self, '%s%s' % (arg.retpycvar, init)) - return - - def set_converters(self, arg): - """ - Notes for user: - if arg is intent(optional, in, out) and not specified - as function argument then function may created but - it must then have *new reference* (ie use Py_INCREF - unless it is a new reference already). - """ - # this method is called from PyCFunction.update_containers(), - # note that self.parent is None put arg.parent is PyCFunction - # instance. - eval_a = arg.evaluate - FromPyObj = arg.container_FromPyObj - PyObjFrom = arg.container_PyObjFrom - - argfmt = self.get_pyarg_fmt(arg) - retfmt = self.get_pyret_fmt(arg) - if arg.output_intent=='return': - if arg.input_intent in ['optional', 'extra']: - if retfmt in 'SON': - FromPyObj += eval_a('''\ -if (!(%(pycvar)s==NULL)) { - /* make %(pycvar)r a new reference */ - %(retpycvar)s = %(pycvar)s; - Py_INCREF((PyObject*)%(retpycvar)s); -} -''') - PyObjFrom += eval_a('''\ -if (%(retpycvar)s==NULL) { - /* %(pycvar)r was not specified */ - if (%(pycvar)s==NULL) { - %(retpycvar)s = Py_None; - Py_INCREF((PyObject*)%(retpycvar)s); - } else { - %(retpycvar)s = %(pycvar)s; - /* %(pycvar)r must be a new reference or expect a core dump. */ - } -} elif (!(%(retpycvar)s == %(pycvar)s)) { - /* a new %(retpycvar)r was created, undoing %(pycvar)s new reference */ - Py_DECREF((PyObject*)%(pycvar)s); -} -''') - elif arg.input_intent=='hide': - if retfmt in 'SON': - PyObjFrom += eval_a('''\ -if (%(retpycvar)s==NULL) { - %(retpycvar)s = Py_None; - Py_INCREF((PyObject*)%(retpycvar)s); -} /* else %(retpycvar)r must be a new reference or expect a core dump. */ -''') - elif arg.input_intent=='required': - if retfmt in 'SON': - FromPyObj += eval_a('''\ -/* make %(pycvar)r a new reference */ -%(retpycvar)s = %(pycvar)s; -Py_INCREF((PyObject*)%(retpycvar)s); -''') - PyObjFrom += eval_a('''\ -if (!(%(retpycvar)s==%(pycvar)s)) { - /* a new %(retpycvar)r was created, undoing %(pycvar)r new reference */ - /* %(retpycvar)r must be a new reference or expect a core dump. */ - Py_DECREF((PyObject*)%(pycvar)s); -} -''') - - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() diff --git a/numpy/f2py/lib/extgen/setup_py.py b/numpy/f2py/lib/extgen/setup_py.py deleted file mode 100644 index da1d84943..000000000 --- a/numpy/f2py/lib/extgen/setup_py.py +++ /dev/null @@ -1,124 +0,0 @@ - -__all__ = ['SetupPy'] - -import os -import sys -from numpy.distutils.exec_command import exec_command -from base import Component -from utils import FileSource - -def write_files(container): - s = ['creating files and directories:'] - for filename, i in container.label_map.items(): - content = container.list[i] - d,f = os.path.split(filename) - if d and not os.path.isdir(d): - s.append(' %s/' % (d)) - if not Component._generate_dry_run: - os.makedirs(d) - s.append(' %s' % (filename)) - if not Component._generate_dry_run: - overwrite = True - if os.path.isfile(filename): - overwrite = False - f = file(filename, 'r') - i = 0 - for line in f: - if 'is generated using ExtGen tool' in line: - overwrite = True - break - i += 1 - if i>5: break - if not overwrite: - s[-1] += ' - unknown file exists, skipping' - else: - s[-1] += ' - extgen generated file exists, overwriting' - if overwrite: - f = file(filename,'w') - f.write(content) - f.close() - return '\n'.join(s) - - -class SetupPy(Component): - - """ - >>> from __init__ import * - >>> s = SetupPy('SetupPy_doctest') - >>> s += PyCModule('foo') - >>> s,o = s.execute('build_ext', '--inplace') - >>> assert s==0,`s` - >>> import SetupPy_doctest as mypackage - >>> print mypackage.foo.__doc__ #doctest: +ELLIPSIS - This module 'foo' is generated with ExtGen from NumPy version... - - """ - template_setup_py_start = '''\ -def configuration(parent_package='', top_path = ''): - from numpy.distutils.misc_util import Configuration - config = Configuration('',parent_package,top_path)''' - template_setup_py_end = '''\ - return config -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) -''' - template = '%(SourceWriter)s' - - container_options = dict( - SourceWriter = dict(user_defined_str = write_files), - TMP = dict() - ) - - component_container_map = dict( - FileSource = 'SourceWriter', - ExtensionModule = 'TMP', - ) - - def initialize(self, build_dir, *components, **options): - self.name = self.path = build_dir - if not self.path: - self.setup_py = setup_py = Component.PySource('extgen_setup.py') - self.init_py = init_py = Component.PySource('extgen__init__.py') - else: - self.setup_py = setup_py = Component.PySource('setup.py') - self.init_py = init_py = Component.PySource('__init__.py') - - setup_py += self.template_setup_py_start - - self += init_py - self += setup_py - - map(self.add, components) - - return self - - def finalize(self): - self.setup_py += self.template_setup_py_end - - def execute(self, *args): - """ - Run generated setup.py file with given arguments. - """ - if not args: - raise ValueError('need setup.py arguments') - self.info(self.generate(dry_run=False)) - cmd = [sys.executable,'setup.py'] + list(args) - self.info('entering %r directory' % (self.path)) - self.info('executing command %r' % (' '.join(cmd))) - try: - r = exec_command(cmd, execute_in=self.path, use_tee=False) - except: - self.info('leaving %r directory' % (self.path)) - raise - else: - self.info('leaving %r directory' % (self.path)) - return r - - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() diff --git a/numpy/f2py/lib/extgen/utils.py b/numpy/f2py/lib/extgen/utils.py deleted file mode 100644 index aa156469f..000000000 --- a/numpy/f2py/lib/extgen/utils.py +++ /dev/null @@ -1,126 +0,0 @@ - -__all__ = ['Word', 'Line', 'Code', 'FileSource'] - -from base import Component - -class Word(Component): - template = '%(word)s' - - def initialize(self, word): - if not word: return None - self.word = word - return self - - def add(self, component, container_label=None): - raise ValueError('%s does not take components' % (self.__class__.__name__)) - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr,[self.word]+[c for (c,l) in self.components]))) - - -class Line(Component): - - """ - >>> l = Line('hey') - >>> l += ' you ' - >>> l += 2 - >>> print l - Line('hey you 2') - >>> print l.generate() - hey you 2 - >>> l += l - >>> print l.generate() - hey you 2hey you 2 - """ - - template = '%(line)s' - - def initialize(self, *strings): - self.line = '' - map(self.add, strings) - return self - - def add(self, component, container_label=None): - if isinstance(component, Line): - self.line += component.line - elif isinstance(component, str): - self.line += component - elif component is None: - pass - else: - self.line += str(component) - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr,[self.line]+[c for (c,l) in self.components]))) - - -class Code(Component): - - """ - >>> c = Code('start') - >>> c += 2 - >>> c += 'end' - >>> c - Code(Line('start'), Line('2'), Line('end')) - >>> print c.generate() - start - 2 - end - """ - - template = '%(Line)s' - - container_options = dict( - Line = dict(default = '', ignore_empty_content=True) - ) - component_container_map = dict( - Line = 'Line' - ) - default_component_class_name = 'Line' - - def initialize(self, *lines): - map(self.add, lines) - return self - - def add(self, component, label=None): - if isinstance(component, Code): - assert label is None,`label` - self.components += component.components - else: - Component.add(self, component, label) - - -class FileSource(Component): - - container_options = dict( - Content = dict(default='') - ) - - template = '%(Content)s' - - default_component_class_name = 'Code' - - component_container_map = dict( - Line = 'Content', - Code = 'Content', - ) - - def initialize(self, path, *components, **options): - self.path = path - map(self.add, components) - self._provides = options.pop('provides', path) - if options: self.warning('%s unused options: %s\n' % (self.__class__.__name__, options)) - return self - - def finalize(self): - self._provides = self.get_path() or self._provides - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr,[self.path]+[c for (c,l) in self.components]))) - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() diff --git a/numpy/f2py/lib/main.py b/numpy/f2py/lib/main.py deleted file mode 100644 index de34895e5..000000000 --- a/numpy/f2py/lib/main.py +++ /dev/null @@ -1,534 +0,0 @@ -""" -Tools for building F2PY generated extension modules. - ------ -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. See http://scipy.org. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Author: Pearu Peterson -Created: Oct 2006 ------ -""" - -import os -import re -import sys -import tempfile - -try: - from numpy import __version__ as numpy_version -except ImportError: - numpy_version = 'N/A' - -__all__ = ['main', 'compile'] - -__usage__ = """ -F2PY G3 --- The third generation of Fortran to Python Interface Generator -========================================================================= - -Description ------------ - -f2py program generates a Python C/API file (module.c) that -contains wrappers for given Fortran functions and data so that they -can be accessed from Python. With the -c option the corresponding -extension modules are built. - -Options -------- - - --g3-numpy Use numpy.f2py.lib tool, the 3rd generation of F2PY, - with NumPy support. - --2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT] - --2d-numeric Use f2py2e tool with Numeric support. - --2d-numarray Use f2py2e tool with Numarray support. - - -m Name of the module; f2py generates a Python/C API - file module.c or extension module . - For wrapping Fortran 90 modules, f2py will use Fortran - module names. - --parse Parse Fortran files and print result to stdout. - - -Options effective only with -h ------------------------------- - - -h Write signatures of the fortran routines to file - and exit. You can then edit and use it instead - of for generating extension module source. - If is stdout or stderr then the signatures are - printed to the corresponding stream. - - --overwrite-signature Overwrite existing signature file. - -Options effective only with -c ------------------------------- - - -c Compile fortran sources and build extension module. - - --build-dir All f2py generated files are created in . - Default is tempfile.mktemp() and it will be removed after - f2py stops unless is specified via --build-dir - option. - -numpy.distutils options effective only with -c ----------------------------------------------- - - --fcompiler= Specify Fortran compiler type by vendor - - - -Extra options effective only with -c ------------------------------------- - - -L/path/to/lib/ -l - -D -U - -I/path/to/include/ - .o .(so|dynlib|dll) .a - - Using the following macros may be required with non-gcc Fortran - compilers: - -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN - -DUNDERSCORE_G77 - - -DF2PY_DEBUG_PYOBJ_TOFROM --- pyobj_(to|from)_ functions will - print debugging messages to stderr. - -""" - -import re -import shutil -import parser.api -from parser.api import parse, PythonModule, EndStatement, Module, Subroutine, Function,\ - get_reader - -def get_values(sys_argv, prefix='', suffix='', strip_prefix=False, strip_suffix=False): - """ - Return a list of values with pattern - . - The corresponding items will be removed from sys_argv. - """ - match = re.compile(prefix + r'.*' + suffix + '\Z').match - ret = [item for item in sys_argv if match(item)] - [sys_argv.remove(item) for item in ret] - if strip_prefix and prefix: - i = len(prefix) - ret = [item[i:] for item in ret] - if strip_suffix and suffix: - i = len(suffix) - ret = [item[:-i] for item in ret] - return ret - -def get_option(sys_argv, option, default_return = None): - """ - Return True if sys_argv has