summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py335
-rw-r--r--numpy/_import_tools.py148
-rw-r--r--numpy/base/__init__.py37
-rw-r--r--numpy/base/_internal.py327
-rw-r--r--numpy/base/arrayprint.py359
-rw-r--r--numpy/base/blasdot/_dotblas.c761
-rw-r--r--numpy/base/blasdot/cblas.h578
-rw-r--r--numpy/base/chararray.py341
-rw-r--r--numpy/base/code_generators/array_api_order.txt62
-rw-r--r--numpy/base/code_generators/genapi.py224
-rw-r--r--numpy/base/code_generators/generate_array_api.py136
-rw-r--r--numpy/base/code_generators/generate_ufunc_api.py91
-rw-r--r--numpy/base/code_generators/generate_umath.py500
-rw-r--r--numpy/base/code_generators/multiarray_api_order.txt66
-rw-r--r--numpy/base/code_generators/ufunc_api_order.txt26
-rw-r--r--numpy/base/convertcode.py147
-rw-r--r--numpy/base/function_base.py815
-rw-r--r--numpy/base/getlimits.py118
-rw-r--r--numpy/base/include/scipy/arrayobject.h1484
-rw-r--r--numpy/base/include/scipy/ufuncobject.h319
-rw-r--r--numpy/base/index_tricks.py291
-rw-r--r--numpy/base/info.py208
-rw-r--r--numpy/base/ma.py2062
-rw-r--r--numpy/base/machar.py268
-rw-r--r--numpy/base/matrix.py273
-rw-r--r--numpy/base/memmap.py88
-rw-r--r--numpy/base/mlab.py14
-rw-r--r--numpy/base/numeric.py428
-rw-r--r--numpy/base/numerictypes.py382
-rw-r--r--numpy/base/oldnumeric.py432
-rw-r--r--numpy/base/polynomial.py554
-rw-r--r--numpy/base/records.py402
-rw-r--r--numpy/base/scimath.py77
-rw-r--r--numpy/base/setup.py284
-rw-r--r--numpy/base/shape_base.py539
-rw-r--r--numpy/base/src/_compiled_base.c453
-rw-r--r--numpy/base/src/_isnan.c47
-rw-r--r--numpy/base/src/_signbit.c32
-rw-r--r--numpy/base/src/_sortmodule.c.src482
-rw-r--r--numpy/base/src/arraymethods.c1647
-rw-r--r--numpy/base/src/arrayobject.c8472
-rw-r--r--numpy/base/src/arraytypes.inc.src1913
-rw-r--r--numpy/base/src/multiarraymodule.c5507
-rw-r--r--numpy/base/src/scalarmathmodule.c.src103
-rw-r--r--numpy/base/src/scalartypes.inc.src2165
-rw-r--r--numpy/base/src/ufuncobject.c3145
-rw-r--r--numpy/base/src/umathmodule.c.src1847
-rw-r--r--numpy/base/tests/test_function_base.py338
-rw-r--r--numpy/base/tests/test_getlimits.py38
-rw-r--r--numpy/base/tests/test_index_tricks.py53
-rw-r--r--numpy/base/tests/test_ma.py637
-rw-r--r--numpy/base/tests/test_matrix.py117
-rw-r--r--numpy/base/tests/test_polynomial.py83
-rw-r--r--numpy/base/tests/test_records.py44
-rw-r--r--numpy/base/tests/test_shape_base.py364
-rw-r--r--numpy/base/tests/test_twodim_base.py134
-rw-r--r--numpy/base/tests/test_type_check.py238
-rw-r--r--numpy/base/tests/test_ufunclike.py63
-rw-r--r--numpy/base/tests/test_umath.py18
-rw-r--r--numpy/base/tests/testdata.fitsbin0 -> 8640 bytes
-rw-r--r--numpy/base/twodim_base.py123
-rw-r--r--numpy/base/type_check.py180
-rw-r--r--numpy/base/ufunclike.py77
-rw-r--r--numpy/base/utils.py28
-rw-r--r--numpy/core_version.py12
-rw-r--r--numpy/corefft/__init__.py22
-rw-r--r--numpy/corefft/fftpack.c1501
-rw-r--r--numpy/corefft/fftpack.h28
-rw-r--r--numpy/corefft/fftpack.py335
-rw-r--r--numpy/corefft/fftpack_litemodule.c266
-rw-r--r--numpy/corefft/helper.py67
-rw-r--r--numpy/corefft/info.py30
-rw-r--r--numpy/corefft/setup.py20
-rw-r--r--numpy/corefft/tests/test_helper.py45
-rw-r--r--numpy/corelinalg/__init__.py25
-rw-r--r--numpy/corelinalg/blas_lite.c10659
-rw-r--r--numpy/corelinalg/dlamch.c951
-rw-r--r--numpy/corelinalg/dlapack_lite.c36005
-rw-r--r--numpy/corelinalg/f2c.h217
-rw-r--r--numpy/corelinalg/f2c_lite.c492
-rw-r--r--numpy/corelinalg/info.py25
-rw-r--r--numpy/corelinalg/lapack_litemodule.c692
-rw-r--r--numpy/corelinalg/linalg.py556
-rw-r--r--numpy/corelinalg/setup.py31
-rw-r--r--numpy/corelinalg/zlapack_lite.c26018
-rw-r--r--numpy/distutils/__init__.py16
-rw-r--r--numpy/distutils/__version__.py4
-rw-r--r--numpy/distutils/ccompiler.py359
-rw-r--r--numpy/distutils/command/__init__.py31
-rw-r--r--numpy/distutils/command/bdist_rpm.py17
-rw-r--r--numpy/distutils/command/build.py8
-rw-r--r--numpy/distutils/command/build_clib.py185
-rw-r--r--numpy/distutils/command/build_ext.py349
-rw-r--r--numpy/distutils/command/build_py.py13
-rw-r--r--numpy/distutils/command/build_scripts.py44
-rw-r--r--numpy/distutils/command/build_src.py542
-rw-r--r--numpy/distutils/command/config.py63
-rw-r--r--numpy/distutils/command/config_compiler.py56
-rw-r--r--numpy/distutils/command/install.py9
-rw-r--r--numpy/distutils/command/install_data.py14
-rw-r--r--numpy/distutils/command/install_headers.py26
-rw-r--r--numpy/distutils/command/sdist.py26
-rw-r--r--numpy/distutils/conv_template.py200
-rw-r--r--numpy/distutils/core.py138
-rw-r--r--numpy/distutils/cpuinfo.py687
-rw-r--r--numpy/distutils/exec_command.py645
-rw-r--r--numpy/distutils/extension.py74
-rw-r--r--numpy/distutils/fcompiler/__init__.py755
-rw-r--r--numpy/distutils/fcompiler/absoft.py137
-rw-r--r--numpy/distutils/fcompiler/compaq.py94
-rw-r--r--numpy/distutils/fcompiler/g95.py41
-rw-r--r--numpy/distutils/fcompiler/gnu.py244
-rw-r--r--numpy/distutils/fcompiler/hpux.py41
-rw-r--r--numpy/distutils/fcompiler/ibm.py80
-rw-r--r--numpy/distutils/fcompiler/intel.py174
-rw-r--r--numpy/distutils/fcompiler/lahey.py46
-rw-r--r--numpy/distutils/fcompiler/mips.py56
-rw-r--r--numpy/distutils/fcompiler/nag.py39
-rw-r--r--numpy/distutils/fcompiler/none.py24
-rw-r--r--numpy/distutils/fcompiler/pg.py42
-rw-r--r--numpy/distutils/fcompiler/sun.py47
-rw-r--r--numpy/distutils/fcompiler/vast.py50
-rw-r--r--numpy/distutils/from_template.py262
-rw-r--r--numpy/distutils/intelccompiler.py30
-rw-r--r--numpy/distutils/lib2def.py116
-rw-r--r--numpy/distutils/line_endings.py75
-rw-r--r--numpy/distutils/log.py47
-rw-r--r--numpy/distutils/mingw32ccompiler.py219
-rw-r--r--numpy/distutils/misc_util.py988
-rw-r--r--numpy/distutils/setup.py14
-rw-r--r--numpy/distutils/system_info.py1644
-rw-r--r--numpy/distutils/tests/f2py_ext/__init__.py0
-rw-r--r--numpy/distutils/tests/f2py_ext/setup.py12
-rw-r--r--numpy/distutils/tests/f2py_ext/src/fib1.f18
-rw-r--r--numpy/distutils/tests/f2py_ext/src/fib2.pyf9
-rw-r--r--numpy/distutils/tests/f2py_ext/tests/test_fib2.py13
-rw-r--r--numpy/distutils/tests/f2py_f90_ext/__init__.py0
-rw-r--r--numpy/distutils/tests/f2py_f90_ext/include/body.f905
-rw-r--r--numpy/distutils/tests/f2py_f90_ext/setup.py16
-rw-r--r--numpy/distutils/tests/f2py_f90_ext/src/foo_free.f906
-rw-r--r--numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py13
-rw-r--r--numpy/distutils/tests/gen_ext/__init__.py0
-rw-r--r--numpy/distutils/tests/gen_ext/setup.py47
-rw-r--r--numpy/distutils/tests/gen_ext/tests/test_fib3.py13
-rw-r--r--numpy/distutils/tests/swig_ext/__init__.py0
-rw-r--r--numpy/distutils/tests/swig_ext/setup.py14
-rw-r--r--numpy/distutils/tests/swig_ext/src/example.c14
-rw-r--r--numpy/distutils/tests/swig_ext/src/example.i11
-rw-r--r--numpy/distutils/tests/swig_ext/src/zoo.cc23
-rw-r--r--numpy/distutils/tests/swig_ext/src/zoo.h9
-rw-r--r--numpy/distutils/tests/swig_ext/src/zoo.i10
-rw-r--r--numpy/distutils/tests/swig_ext/tests/test_example.py18
-rw-r--r--numpy/distutils/tests/swig_ext/tests/test_example2.py17
-rw-r--r--numpy/distutils/tests/test_misc_util.py33
-rw-r--r--numpy/distutils/unixccompiler.py65
-rw-r--r--numpy/doc/CAPI.txt317
-rw-r--r--numpy/doc/DISTUTILS.txt509
-rw-r--r--numpy/doc/README.txt15
-rw-r--r--numpy/doc/records.txt86
-rw-r--r--numpy/doc/ufuncs.txt98
-rw-r--r--numpy/f2py/BUGS.txt55
-rw-r--r--numpy/f2py/Makefile173
-rw-r--r--numpy/f2py/NEWS.txt2
-rw-r--r--numpy/f2py/README.txt5
-rw-r--r--numpy/f2py/TODO.txt67
-rw-r--r--numpy/f2py/__init__.py40
-rw-r--r--numpy/f2py/__version__.py9
-rw-r--r--numpy/f2py/auxfuncs.py489
-rw-r--r--numpy/f2py/capi_maps.py723
-rw-r--r--numpy/f2py/cb_rules.py534
-rw-r--r--numpy/f2py/cfuncs.py1134
-rw-r--r--numpy/f2py/common_rules.py132
-rwxr-xr-xnumpy/f2py/crackfortran.py2659
-rw-r--r--numpy/f2py/diagnose.py166
-rw-r--r--numpy/f2py/doc/Makefile76
-rw-r--r--numpy/f2py/doc/Release-1.x.txt27
-rw-r--r--numpy/f2py/doc/Release-2.x.txt77
-rw-r--r--numpy/f2py/doc/Release-3.x.txt87
-rw-r--r--numpy/f2py/doc/Release-4.x.txt91
-rw-r--r--numpy/f2py/doc/apps.tex71
-rw-r--r--numpy/f2py/doc/bugs.tex109
-rwxr-xr-xnumpy/f2py/doc/collectinput.py77
-rw-r--r--numpy/f2py/doc/commands.tex20
-rw-r--r--numpy/f2py/doc/ex1/arr.f4
-rw-r--r--numpy/f2py/doc/ex1/bar.f4
-rw-r--r--numpy/f2py/doc/ex1/foo.f5
-rw-r--r--numpy/f2py/doc/ex1/foobar-smart.f9024
-rw-r--r--numpy/f2py/doc/ex1/foobar.f9016
-rw-r--r--numpy/f2py/doc/ex1/foobarmodule.tex36
-rwxr-xr-xnumpy/f2py/doc/ex1/runme18
-rw-r--r--numpy/f2py/doc/f2py2e.tex50
-rw-r--r--numpy/f2py/doc/f2python9-final/README.txt38
-rw-r--r--numpy/f2py/doc/f2python9-final/aerostructure.jpgbin0 -> 72247 bytes
-rw-r--r--numpy/f2py/doc/f2python9-final/flow.jpgbin0 -> 13266 bytes
-rwxr-xr-xnumpy/f2py/doc/f2python9-final/mk_html.sh13
-rwxr-xr-xnumpy/f2py/doc/f2python9-final/mk_pdf.sh13
-rwxr-xr-xnumpy/f2py/doc/f2python9-final/mk_ps.sh14
-rw-r--r--numpy/f2py/doc/f2python9-final/src/examples/exp1.f26
-rw-r--r--numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt17
-rw-r--r--numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt20
-rw-r--r--numpy/f2py/doc/f2python9-final/src/examples/foo.pyf13
-rw-r--r--numpy/f2py/doc/f2python9-final/src/examples/foom.pyf14
-rw-r--r--numpy/f2py/doc/f2python9-final/structure.jpgbin0 -> 17860 bytes
-rw-r--r--numpy/f2py/doc/fortranobject.tex574
-rw-r--r--numpy/f2py/doc/index.html265
-rw-r--r--numpy/f2py/doc/intro.tex158
-rw-r--r--numpy/f2py/doc/multiarray/array_from_pyobj.c323
-rw-r--r--numpy/f2py/doc/multiarray/bar.c15
-rw-r--r--numpy/f2py/doc/multiarray/foo.f13
-rw-r--r--numpy/f2py/doc/multiarray/fortran_array_from_pyobj.txt284
-rw-r--r--numpy/f2py/doc/multiarray/fun.pyf89
-rw-r--r--numpy/f2py/doc/multiarray/run.pyf91
-rw-r--r--numpy/f2py/doc/multiarray/transpose.txt1127
-rw-r--r--numpy/f2py/doc/multiarrays.txt120
-rw-r--r--numpy/f2py/doc/notes.tex310
-rw-r--r--numpy/f2py/doc/oldnews.html121
-rw-r--r--numpy/f2py/doc/options.tex63
-rw-r--r--numpy/f2py/doc/python9.tex1046
-rw-r--r--numpy/f2py/doc/signaturefile.tex368
-rw-r--r--numpy/f2py/doc/using_F_compiler.txt147
-rw-r--r--numpy/f2py/doc/win32_notes.txt85
-rw-r--r--numpy/f2py/docs/FAQ.txt615
-rw-r--r--numpy/f2py/docs/HISTORY.txt1044
-rw-r--r--numpy/f2py/docs/OLDNEWS.txt63
-rw-r--r--numpy/f2py/docs/README.txt457
-rw-r--r--numpy/f2py/docs/TESTING.txt108
-rw-r--r--numpy/f2py/docs/THANKS.txt63
-rw-r--r--numpy/f2py/docs/default.css180
-rw-r--r--numpy/f2py/docs/docutils.conf16
-rw-r--r--numpy/f2py/docs/hello.f7
-rw-r--r--numpy/f2py/docs/pyforttest.pyf5
-rw-r--r--numpy/f2py/docs/pytest.py10
-rw-r--r--numpy/f2py/docs/simple.f13
-rw-r--r--numpy/f2py/docs/simple_session.dat51
-rw-r--r--numpy/f2py/docs/usersguide/allocarr.f9016
-rw-r--r--numpy/f2py/docs/usersguide/allocarr_session.dat27
-rw-r--r--numpy/f2py/docs/usersguide/array.f17
-rw-r--r--numpy/f2py/docs/usersguide/array_session.dat65
-rw-r--r--numpy/f2py/docs/usersguide/calculate.f14
-rw-r--r--numpy/f2py/docs/usersguide/calculate_session.dat6
-rw-r--r--numpy/f2py/docs/usersguide/callback.f12
-rw-r--r--numpy/f2py/docs/usersguide/callback2.pyf19
-rw-r--r--numpy/f2py/docs/usersguide/callback_session.dat23
-rw-r--r--numpy/f2py/docs/usersguide/common.f13
-rw-r--r--numpy/f2py/docs/usersguide/common_session.dat27
-rw-r--r--numpy/f2py/docs/usersguide/compile_session.dat11
-rw-r--r--numpy/f2py/docs/usersguide/default.css180
-rw-r--r--numpy/f2py/docs/usersguide/docutils.conf16
-rw-r--r--numpy/f2py/docs/usersguide/extcallback.f14
-rw-r--r--numpy/f2py/docs/usersguide/extcallback_session.dat19
-rw-r--r--numpy/f2py/docs/usersguide/fib1.f18
-rw-r--r--numpy/f2py/docs/usersguide/fib1.pyf12
-rw-r--r--numpy/f2py/docs/usersguide/fib2.pyf9
-rw-r--r--numpy/f2py/docs/usersguide/fib3.f21
-rw-r--r--numpy/f2py/docs/usersguide/ftype.f9
-rw-r--r--numpy/f2py/docs/usersguide/ftype_session.dat21
-rw-r--r--numpy/f2py/docs/usersguide/index.txt1772
-rw-r--r--numpy/f2py/docs/usersguide/moddata.f9018
-rw-r--r--numpy/f2py/docs/usersguide/moddata_session.dat23
-rw-r--r--numpy/f2py/docs/usersguide/run_main_session.dat14
-rw-r--r--numpy/f2py/docs/usersguide/scalar.f12
-rw-r--r--numpy/f2py/docs/usersguide/scalar_session.dat21
-rw-r--r--numpy/f2py/docs/usersguide/setup_example.py19
-rw-r--r--numpy/f2py/docs/usersguide/spam.pyf19
-rw-r--r--numpy/f2py/docs/usersguide/spam_session.dat5
-rw-r--r--numpy/f2py/docs/usersguide/string.f21
-rw-r--r--numpy/f2py/docs/usersguide/string_session.dat27
-rw-r--r--numpy/f2py/docs/usersguide/var.pyf11
-rw-r--r--numpy/f2py/docs/usersguide/var_session.dat3
-rw-r--r--numpy/f2py/f2py.1209
-rwxr-xr-xnumpy/f2py/f2py2e.py555
-rw-r--r--numpy/f2py/f2py_testing.py74
-rw-r--r--numpy/f2py/f90mod_rules.py240
-rw-r--r--numpy/f2py/func2subr.py165
-rw-r--r--numpy/f2py/rules.py1345
-rw-r--r--numpy/f2py/setup.cfg3
-rwxr-xr-xnumpy/f2py/setup.py107
-rw-r--r--numpy/f2py/src/fortranobject.c756
-rw-r--r--numpy/f2py/src/fortranobject.h123
-rw-r--r--numpy/f2py/src/test/Makefile96
-rw-r--r--numpy/f2py/src/test/bar.f11
-rw-r--r--numpy/f2py/src/test/foo.f11
-rw-r--r--numpy/f2py/src/test/foo90.f9013
-rw-r--r--numpy/f2py/src/test/foomodule.c143
-rw-r--r--numpy/f2py/src/test/wrap.f70
-rw-r--r--numpy/f2py/tests/array_from_pyobj/__init__.py0
-rw-r--r--numpy/f2py/tests/array_from_pyobj/setup.py26
-rw-r--r--numpy/f2py/tests/array_from_pyobj/tests/test_array_from_pyobj.py512
-rw-r--r--numpy/f2py/tests/array_from_pyobj/wrapmodule.c194
-rw-r--r--numpy/f2py/tests/c/return_real.py108
-rw-r--r--numpy/f2py/tests/f77/callback.py99
-rw-r--r--numpy/f2py/tests/f77/return_character.py100
-rw-r--r--numpy/f2py/tests/f77/return_complex.py125
-rw-r--r--numpy/f2py/tests/f77/return_integer.py148
-rw-r--r--numpy/f2py/tests/f77/return_logical.py135
-rw-r--r--numpy/f2py/tests/f77/return_real.py127
-rw-r--r--numpy/f2py/tests/f90/return_character.py100
-rw-r--r--numpy/f2py/tests/f90/return_complex.py127
-rw-r--r--numpy/f2py/tests/f90/return_integer.py152
-rw-r--r--numpy/f2py/tests/f90/return_logical.py138
-rw-r--r--numpy/f2py/tests/f90/return_real.py129
-rw-r--r--numpy/f2py/tests/mixed/foo.f5
-rw-r--r--numpy/f2py/tests/mixed/foo_fixed.f908
-rw-r--r--numpy/f2py/tests/mixed/foo_free.f908
-rw-r--r--numpy/f2py/tests/mixed/run.py50
-rwxr-xr-xnumpy/f2py/tests/run_all.py55
-rw-r--r--numpy/f2py/use_rules.py115
-rw-r--r--numpy/random/__init__.py17
-rw-r--r--numpy/random/info.py56
-rw-r--r--numpy/random/mtrand/Python.pxi24
-rw-r--r--numpy/random/mtrand/distributions.c845
-rw-r--r--numpy/random/mtrand/distributions.h185
-rw-r--r--numpy/random/mtrand/generate_mtrand_c.py38
-rw-r--r--numpy/random/mtrand/initarray.c134
-rw-r--r--numpy/random/mtrand/initarray.h6
-rw-r--r--numpy/random/mtrand/mtrand.c5990
-rw-r--r--numpy/random/mtrand/mtrand.pyx972
-rw-r--r--numpy/random/mtrand/randomkit.c355
-rw-r--r--numpy/random/mtrand/randomkit.h189
-rw-r--r--numpy/random/mtrand/scipy.pxi52
-rw-r--r--numpy/random/setup.py24
-rw-r--r--numpy/setup.py26
-rw-r--r--numpy/testing/__init__.py4
-rw-r--r--numpy/testing/info.py30
-rw-r--r--numpy/testing/scipytest.py385
-rwxr-xr-xnumpy/testing/setup.py16
-rw-r--r--numpy/testing/utils.py210
327 files changed, 163136 insertions, 0 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
new file mode 100644
index 000000000..2fced64fc
--- /dev/null
+++ b/numpy/__init__.py
@@ -0,0 +1,335 @@
+"""\
+SciPy Core
+==========
+
+You can support the development of SciPy by purchasing documentation
+at
+
+ http://www.trelgol.com
+
+It is being distributed for a fee for a limited time to try and raise
+money for development.
+
+Documentation is also available in the docstrings.
+
+Available subpackages
+---------------------
+"""
+
+import os, sys
+NO_SCIPY_IMPORT = os.environ.get('NO_SCIPY_IMPORT',None)
+SCIPY_IMPORT_VERBOSE = int(os.environ.get('SCIPY_IMPORT_VERBOSE','0'))
+
+try:
+ from __core_config__ import show as show_core_config
+except ImportError:
+ show_core_config = None
+
+class PackageLoader:
+ def __init__(self):
+ """ Manages loading SciPy packages.
+ """
+
+ self.parent_frame = frame = sys._getframe(1)
+ self.parent_name = eval('__name__',frame.f_globals,frame.f_locals)
+ self.parent_path = eval('__path__',frame.f_globals,frame.f_locals)
+ if not frame.f_locals.has_key('__all__'):
+ exec('__all__ = []',frame.f_globals,frame.f_locals)
+ self.parent_export_names = eval('__all__',frame.f_globals,frame.f_locals)
+
+ self.info_modules = None
+ self.imported_packages = []
+ self.verbose = None
+
+ def _get_info_files(self, package_dir, parent_path, parent_package=None):
+ """ Return list of (package name,info.py file) from parent_path subdirectories.
+ """
+ from glob import glob
+ files = glob(os.path.join(parent_path,package_dir,'info.py'))
+ for info_file in glob(os.path.join(parent_path,package_dir,'info.pyc')):
+ if info_file[:-1] not in files:
+ files.append(info_file)
+ info_files = []
+ for info_file in files:
+ package_name = os.path.dirname(info_file[len(parent_path)+1:])\
+ .replace(os.sep,'.')
+ if parent_package:
+ package_name = parent_package + '.' + package_name
+ info_files.append((package_name,info_file))
+ info_files.extend(self._get_info_files('*',
+ os.path.dirname(info_file),
+ package_name))
+ return info_files
+
+ def _init_info_modules(self, packages=None):
+ """Initialize info_modules = {<package_name>: <package info.py module>}.
+ """
+ import imp
+ info_files = []
+ if packages is None:
+ for path in self.parent_path:
+ info_files.extend(self._get_info_files('*',path))
+ else:
+ for package_name in packages:
+ package_dir = os.path.join(*package_name.split('.'))
+ for path in self.parent_path:
+ names_files = self._get_info_files(package_dir, path)
+ if names_files:
+ info_files.extend(names_files)
+ break
+ else:
+ self.warn('Package %r does not have info.py file. Ignoring.'\
+ % package_name)
+
+ info_modules = self.info_modules
+ for package_name,info_file in info_files:
+ if info_modules.has_key(package_name):
+ continue
+ fullname = self.parent_name +'.'+ package_name
+ if info_file[-1]=='c':
+ filedescriptor = ('.pyc','rb',2)
+ else:
+ filedescriptor = ('.py','U',1)
+
+ try:
+ info_module = imp.load_module(fullname+'.info',
+ open(info_file,filedescriptor[1]),
+ info_file,
+ filedescriptor)
+ except Exception,msg:
+ self.error(msg)
+ info_module = None
+
+ if info_module is None or getattr(info_module,'ignore',False):
+ info_modules.pop(package_name,None)
+ else:
+ self._init_info_modules(getattr(info_module,'depends',[]))
+ info_modules[package_name] = info_module
+
+ return
+
+ def _get_sorted_names(self):
+ """ Return package names sorted in the order as they should be
+ imported due to dependence relations between packages.
+ """
+
+ depend_dict = {}
+ for name,info_module in self.info_modules.items():
+ depend_dict[name] = getattr(info_module,'depends',[])
+ package_names = []
+
+ for name in depend_dict.keys():
+ if not depend_dict[name]:
+ package_names.append(name)
+ del depend_dict[name]
+
+ while depend_dict:
+ for name, lst in depend_dict.items():
+ new_lst = [n for n in lst if depend_dict.has_key(n)]
+ if not new_lst:
+ package_names.append(name)
+ del depend_dict[name]
+ else:
+ depend_dict[name] = new_lst
+
+ return package_names
+
+ def __call__(self,*packages, **options):
+ """Load one or more packages into scipy's top-level namespace.
+
+ Usage:
+
+ This function is intended to shorten the need to import many of scipy's
+ submodules constantly with statements such as
+
+ import scipy.linalg, scipy.fft, scipy.etc...
+
+ Instead, you can say:
+
+ import scipy
+ scipy.pkgload('linalg','fft',...)
+
+ or
+
+ scipy.pkgload()
+
+ to load all of them in one call.
+
+ If a name which doesn't exist in scipy's namespace is
+ given, an exception [[WHAT? ImportError, probably?]] is raised.
+ [NotImplemented]
+
+ Inputs:
+
+ - the names (one or more strings) of all the scipy modules one wishes to
+ load into the top-level namespace.
+
+ Optional keyword inputs:
+
+ - verbose - integer specifying verbosity level [default: 0].
+ - force - when True, force reloading loaded packages [default: False].
+ - postpone - when True, don't load packages [default: False]
+
+ If no input arguments are given, then all of scipy's subpackages are
+ imported.
+
+
+ Outputs:
+
+ The function returns a tuple with all the names of the modules which
+ were actually imported. [NotImplemented]
+
+ """
+ frame = self.parent_frame
+ self.info_modules = {}
+ if options.get('force',False):
+ self.imported_packages = []
+ self.verbose = verbose = options.get('verbose',False)
+ postpone = options.get('postpone',False)
+
+ self._init_info_modules(packages or None)
+
+ self.log('Imports to %r namespace\n----------------------------'\
+ % self.parent_name)
+
+ for package_name in self._get_sorted_names():
+ if package_name in self.imported_packages:
+ continue
+ info_module = self.info_modules[package_name]
+ global_symbols = getattr(info_module,'global_symbols',[])
+ if postpone and not global_symbols:
+ self.log('__all__.append(%r)' % (package_name))
+ if '.' not in package_name:
+ self.parent_export_names.append(package_name)
+ continue
+
+ old_object = frame.f_locals.get(package_name,None)
+
+ cmdstr = 'import '+package_name
+ if self._execcmd(cmdstr):
+ continue
+ self.imported_packages.append(package_name)
+
+ if verbose!=-1:
+ new_object = frame.f_locals.get(package_name)
+ if old_object is not None and old_object is not new_object:
+ self.warn('Overwriting %s=%s (was %s)' \
+ % (package_name,self._obj2str(new_object),
+ self._obj2str(old_object)))
+
+ if '.' not in package_name:
+ self.parent_export_names.append(package_name)
+
+ for symbol in global_symbols:
+ if symbol=='*':
+ symbols = eval('getattr(%s,"__all__",None)'\
+ % (package_name),
+ frame.f_globals,frame.f_locals)
+ if symbols is None:
+ symbols = eval('dir(%s)' % (package_name),
+ frame.f_globals,frame.f_locals)
+ symbols = filter(lambda s:not s.startswith('_'),symbols)
+ else:
+ symbols = [symbol]
+
+ if verbose!=-1:
+ old_objects = {}
+ for s in symbols:
+ if frame.f_locals.has_key(s):
+ old_objects[s] = frame.f_locals[s]
+
+ cmdstr = 'from '+package_name+' import '+symbol
+ if self._execcmd(cmdstr):
+ continue
+
+ if verbose!=-1:
+ for s,old_object in old_objects.items():
+ new_object = frame.f_locals[s]
+ if new_object is not old_object:
+ self.warn('Overwriting %s=%s (was %s)' \
+ % (s,self._obj2repr(new_object),
+ self._obj2repr(old_object)))
+
+ if symbol=='*':
+ self.parent_export_names.extend(symbols)
+ else:
+ self.parent_export_names.append(symbol)
+
+ return
+
+ def _execcmd(self,cmdstr):
+ """ Execute command in parent_frame."""
+ frame = self.parent_frame
+ try:
+ exec (cmdstr, frame.f_globals,frame.f_locals)
+ except Exception,msg:
+ self.error('%s -> failed: %s' % (cmdstr,msg))
+ return True
+ else:
+ self.log('%s -> success' % (cmdstr))
+ return
+
+ def _obj2repr(self,obj):
+ """ Return repr(obj) with"""
+ module = getattr(obj,'__module__',None)
+ file = getattr(obj,'__file__',None)
+ if module is not None:
+ return repr(obj) + ' from ' + module
+ if file is not None:
+ return repr(obj) + ' from ' + file
+ return repr(obj)
+
+ def log(self,mess):
+ if self.verbose>1:
+ print >> sys.stderr, str(mess)
+ def warn(self,mess):
+ if self.verbose>=0:
+ print >> sys.stderr, str(mess)
+ def error(self,mess):
+ if self.verbose!=-1:
+ print >> sys.stderr, str(mess)
+
+try:
+ import pkg_resources # activate namespace packages (manipulates __path__)
+except ImportError:
+ pass
+
+pkgload = PackageLoader()
+
+if show_core_config is None:
+ print >> sys.stderr, 'Running from scipy core source directory.'
+else:
+ from core_version import version as __core_version__
+
+ pkgload('testing','base','corefft','corelinalg','random',
+ verbose=SCIPY_IMPORT_VERBOSE)
+
+
+ test = ScipyTest('scipy').test
+ __all__.append('test')
+
+__scipy_doc__ = """
+
+SciPy: A scientific computing package for Python
+================================================
+
+Available subpackages
+---------------------
+"""
+
+if NO_SCIPY_IMPORT is not None:
+ print >> sys.stderr, 'Skip importing scipy packages (NO_SCIPY_IMPORT=%s)' % (NO_SCIPY_IMPORT)
+ show_scipy_config = None
+elif show_core_config is None:
+ show_scipy_config = None
+else:
+ try:
+ from __scipy_config__ import show as show_scipy_config
+ except ImportError:
+ show_scipy_config = None
+
+
+if show_scipy_config is not None:
+ from scipy_version import scipy_version as __scipy_version__
+ __doc__ += __scipy_doc__
+ pkgload(verbose=SCIPY_IMPORT_VERBOSE,postpone=True)
diff --git a/numpy/_import_tools.py b/numpy/_import_tools.py
new file mode 100644
index 000000000..77cd87f79
--- /dev/null
+++ b/numpy/_import_tools.py
@@ -0,0 +1,148 @@
+
+import os
+import sys
+import imp
+from glob import glob
+
+class PackageImport:
+ """ Import packages from the current directory that implement
+ info.py. See scipy/doc/DISTUTILS.txt for more info.
+ """
+
+ imported_packages = []
+
+ def __init__(self):
+ self.frame = frame = sys._getframe(1)
+ self.parent_name = eval('__name__',frame.f_globals,frame.f_locals)
+ self.parent_path = eval('__path__[0]',frame.f_globals,frame.f_locals)
+
+ def get_info_modules(self,packages=None):
+ """
+ Return info modules of packages or all packages in parent path.
+ """
+ if packages is None:
+ info_files = glob(os.path.join(self.parent_path,'*','info.py'))
+ else:
+ info_files = [os.path.join(self.parent_path,package,'info.py') \
+ for package in packages]
+ info_modules = {}
+ for info_file in info_files:
+ package_name = os.path.basename(os.path.dirname(info_file))
+ fullname = self.parent_name +'.'+ package_name
+ try:
+ info_module = imp.load_module(fullname+'.info',
+ open(info_file,'U'),
+ info_file,
+ ('.py','U',1))
+ except Exception,msg:
+ print >> sys.stderr, msg
+ info_module = None
+
+ if info_module is None:
+ continue
+ if getattr(info_module,'ignore',False):
+ continue
+
+ info_modules[fullname] = info_module
+
+ return info_modules
+
+ def _sort_info_modules(self, info_modules):
+ """
+ Return package names sorted in the order as they should be
+ imported due to dependence relations between packages.
+ """
+ depend_dict = {}
+ for fullname,info_module in info_modules.items():
+ depend_dict[fullname] = getattr(info_module,'depends',[])
+ package_names = []
+
+ for name in depend_dict.keys():
+ if not depend_dict[name]:
+ package_names.append(name)
+ del depend_dict[name]
+
+ while depend_dict:
+ for name, lst in depend_dict.items():
+ new_lst = [n for n in lst if depend_dict.has_key(n)]
+ if not new_lst:
+ package_names.append(name)
+ del depend_dict[name]
+ else:
+ depend_dict[name] = new_lst
+
+ return package_names
+
+ def _get_doc_title(self, info_module):
+ """ Get the title from a package info.py file.
+ """
+ title = getattr(info_module,'__doc_title__',None)
+ if title is not None:
+ return title
+ title = getattr(info_module,'__doc__',None)
+ if title is not None:
+ title = title.lstrip().split('\n',1)[0]
+ return title
+ return '* Not Available *'
+
+ def _format_titles(self,titles):
+ lengths = [len(name)-name.find('.')-1 for (name,title) in titles]
+ max_length = max(lengths)
+ lines = []
+ for (name,title) in titles:
+ name = name[name.find('.')+1:]
+ w = max_length - len(name)
+ lines.append('%s%s --- %s' % (name, w*' ', title))
+ return '\n'.join(lines)
+
+ def import_packages(self, packages=None):
+ """
+ Import packages that implement info.py.
+ Return a list of documentation strings info.__doc__ of succesfully
+ imported packages.
+ """
+ info_modules = self.get_info_modules(packages)
+ package_names = self._sort_info_modules(info_modules)
+ frame = self.frame
+
+ titles = []
+
+ for fullname in package_names:
+ if fullname in self.imported_packages:
+ continue
+ package_name = fullname.split('.')[-1]
+ info_module = info_modules[fullname]
+ global_symbols = getattr(info_module,'global_symbols',[])
+ postpone_import = getattr(info_module,'postpone_import',True)
+
+ try:
+ #print 'Importing',package_name,'to',self.parent_name
+ exec ('import '+package_name, frame.f_globals,frame.f_locals)
+ except Exception,msg:
+ print >> sys.stderr, 'Failed to import',package_name
+ print >> sys.stderr, msg
+ raise
+ continue
+
+ self.imported_packages.append(fullname)
+
+ for symbol in global_symbols:
+ try:
+ exec ('from '+package_name+' import '+symbol,
+ frame.f_globals,frame.f_locals)
+ except Exception,msg:
+ print >> sys.stderr, 'Failed to import',symbol,'from',package_name
+ print >> sys.stderr, msg
+ continue
+
+ titles.append((fullname,self._get_doc_title(info_module)))
+
+ try:
+ exec ('\n%s.test = ScipyTest(%s).test' \
+ % (package_name,package_name),
+ frame.f_globals,frame.f_locals)
+ except Exception,msg:
+ print >> sys.stderr, 'Failed to set test function for',package_name
+ print >> sys.stderr, msg
+
+ return self._format_titles(titles)
diff --git a/numpy/base/__init__.py b/numpy/base/__init__.py
new file mode 100644
index 000000000..0c0c158df
--- /dev/null
+++ b/numpy/base/__init__.py
@@ -0,0 +1,37 @@
+
+from info import __doc__
+from scipy.core_version import version as __version__
+
+import multiarray
+import umath
+import numerictypes as nt
+multiarray.set_typeDict(nt.typeDict)
+import _sort
+from numeric import *
+from oldnumeric import *
+from matrix import *
+from type_check import *
+from index_tricks import *
+from function_base import *
+from shape_base import *
+from twodim_base import *
+from ufunclike import *
+
+import scimath as math
+from polynomial import *
+from machar import *
+from getlimits import *
+import ma
+import chararray as char
+import records as rec
+from records import *
+from memmap import *
+import convertcode
+del nt
+
+from utils import *
+
+__all__ = filter(lambda s:not s.startswith('_'),dir())
+
+from scipy.testing import ScipyTest
+test = ScipyTest().test
diff --git a/numpy/base/_internal.py b/numpy/base/_internal.py
new file mode 100644
index 000000000..260381e85
--- /dev/null
+++ b/numpy/base/_internal.py
@@ -0,0 +1,327 @@
+
+#A place for code to be called from C-code
+# that implements more complicated stuff.
+
+import re
+from multiarray import _flagdict, dtypedescr, ndarray
+
+_defflags = _flagdict.keys()
+
+_setable = ['WRITEABLE','UPDATEIFCOPY', 'ALIGNED',
+ 'W','U','A']
+_setable2 = ['write','uic','align']*2
+_firstltr = {'W':'WRITEABLE',
+ 'A':'ALIGNED',
+ 'C':'CONTIGUOUS',
+ 'F':'FORTRAN',
+ 'O':'OWNDATA',
+ 'U':'UPDATEIFCOPY'}
+
+_anum = _flagdict['ALIGNED']
+_wnum = _flagdict['WRITEABLE']
+_cnum = _flagdict['CONTIGUOUS']
+_fnum = _flagdict['FORTRAN']
+_unum = _flagdict['UPDATEIFCOPY']
+_onum = _flagdict['OWNDATA']
+
+class flagsobj(dict):
+ def __init__(self, arr, flags, scalar):
+ self._arr = arr
+ self._flagnum = flags
+ for k in _defflags:
+ num = _flagdict[k]
+ dict.__setitem__(self, k, flags & num == num)
+ self.scalar = scalar
+
+ def __getitem__(self, key):
+ if not isinstance(key, str):
+ raise KeyError, "Unknown flag %s" % key
+ if len(key) == 1:
+ try:
+ return dict.__getitem__(self, _firstltr[key])
+ except:
+ if (key == 'B'):
+ num = _anum + _wnum
+ return self._flagnum & num == num
+ else:
+ try:
+ return dict.__getitem__(self, key)
+ except: # special cases
+ if (key == 'FNC'):
+ return (self._flagnum & _fnum == _fnum) and not \
+ (self._flagnum & _cnum == _cnum)
+ if (key == 'FORC'):
+ return (self._flagnum & _fnum == _fnum) or \
+ (self._flagnum & _cnum == _cnum)
+ if (key == 'BEHAVED'):
+ num = _anum + _wnum
+ return self._flagnum & num == num
+ if (key in ['CARRAY','CA']):
+ num = _anum + _wnum + _cnum
+ return self._flagnum & num == num
+ if (key in ['FARRAY','FA']):
+ num = _anum + _wnum + _fnum
+ return (self._flagnum & num == num) and not \
+ (self._flagnum & _cnum == _cnum)
+ raise KeyError, "Unknown flag: %s" % key
+
+ def __setitem__(self, item, val):
+ if self.scalar:
+ raise ValueError, "Cannot set flags on array scalars."
+ val = not not val # convert to boolean
+ if item not in _setable:
+ raise KeyError, "Cannot set flag", item
+ dict.__setitem__(self, item, val) # Does this matter?
+
+ kwds = {}
+ for k, name in enumerate(_setable):
+ if item == name:
+ kwds[_setable2[k]] = val
+
+ # now actually update array flags
+ self._arr.setflags(**kwds)
+
+
+ def get_fnc(self):
+ fl = self._flagnum
+ return (fl & _fnum == _fnum) and \
+ not (fl & _cnum == _cnum)
+
+ def get_forc(self):
+ fl = self._flagnum
+ return (fl & _cnum == _cnum) or \
+ (fl & _fnum == _fnum)
+
+ def get_behaved(self):
+ fl = self._flagnum
+ return (fl & _anum == _anum) and \
+ (fl & _wnum == _wnum)
+
+ def get_carray(self):
+ fl = self._flagnum
+ return (fl & _anum == _anum) and \
+ (fl & _wnum == _wnum) and \
+ (fl & _cnum == _cnum)
+
+ def get_farray(self):
+ fl = self._flagnum
+ return (fl & _anum == _anum) and \
+ (fl & _wnum == _wnum) and \
+ (fl & _fnum == _fnum) and \
+ not (fl & _cnum == _cnum)
+
+ def get_contiguous(self):
+ return (self._flagnum & _cnum == _cnum)
+
+ def get_fortran(self):
+ return (self._flagnum & _fnum == _fnum)
+
+ def get_updateifcopy(self):
+ return (self._flagnum & _unum == _unum)
+
+ def get_owndata(self):
+ return (self._flagnum & _onum == _onum)
+
+ def get_aligned(self):
+ return (self._flagnum & _anum == _anum)
+
+ def get_writeable(self):
+ return (self._flagnum & _wnum == _wnum)
+
+ def set_writeable(self, val):
+ val = not not val
+ self._arr.setflags(write=val)
+
+ def set_aligned(self, val):
+ val = not not val
+ self._arr.setflags(align=val)
+
+ def set_updateifcopy(self, val):
+ val = not not val
+ self._arr.setflags(uic=val)
+
+ contiguous = property(get_contiguous, None, "")
+ fortran = property(get_fortran, None, "")
+ updateifcopy = property(get_updateifcopy, set_updateifcopy, "")
+ owndata = property(get_owndata, None, "")
+ aligned = property(get_aligned, set_aligned, "")
+ writeable = property(get_writeable, set_writeable, "")
+
+ fnc = property(get_fnc, None, "")
+ forc = property(get_forc, None, "")
+ behaved = property(get_behaved, None, "")
+ carray = property(get_carray, None, "")
+ farray = property(get_farray, None, "")
+
+
+
+# make sure the tuple entries are PyArray_Descr
+# or convert them
+#
+# make sure offsets are all interpretable
+# as positive integers and
+# convert them to positive integers if so
+#
+#
+# return totalsize from last offset and size
+
+# Called in PyArray_DescrConverter function when
+# a dictionary without "names" and "formats"
+# fields is used as a data-type descriptor.
+def _usefields(adict, align):
+ try:
+ names = adict[-1]
+ except KeyError:
+ names = None
+ if names is None:
+ allfields = []
+ fnames = adict.keys()
+ for fname in fnames:
+ obj = adict[fname]
+ n = len(obj)
+ if not isinstance(obj, tuple) or n not in [2,3]:
+ raise ValueError, "entry not a 2- or 3- tuple"
+ if (n > 2) and (obj[2] == fname):
+ continue
+ num = int(obj[1])
+ if (num < 0):
+ raise ValueError, "invalid offset."
+ format = dtypedescr(obj[0])
+ if (format.itemsize == 0):
+ raise ValueError, "all itemsizes must be fixed."
+ if (n > 2):
+ title = obj[2]
+ else:
+ title = None
+ allfields.append((fname, format, num, title))
+ # sort by offsets
+ allfields.sort(lambda x,y: cmp(x[2],y[2]))
+ names = [x[0] for x in allfields]
+ formats = [x[1] for x in allfields]
+ offsets = [x[2] for x in allfields]
+ titles = [x[3] for x in allfields]
+ else:
+ formats = []
+ offsets = []
+ titles = []
+ for name in names:
+ res = adict[name]
+ formats.append(res[0])
+ offsets.append(res[1])
+ if (len(res) > 2):
+ titles.append(res[2])
+ else:
+ titles.append(None)
+
+ return dtypedescr({"names" : names,
+ "formats" : formats,
+ "offsets" : offsets,
+ "titles" : titles}, align)
+
+
+# construct an array_protocol descriptor list
+# from the fields attribute of a descriptor
+# This calls itself recursively but should eventually hit
+# a descriptor that has no fields and then return
+# a simple typestring
+
+def _array_descr(descriptor):
+ fields = descriptor.fields
+ if fields is None:
+ return descriptor.dtypestr
+
+ #get ordered list of fields with names
+ ordered_fields = fields.items()
+ # remove duplicates
+ new = {}
+ for item in ordered_fields:
+ # We don't want to include redundant or non-string
+ # entries
+ if not isinstance(item[0],str) or (len(item[1]) > 2 \
+ and item[0] == item[1][2]):
+ continue
+ new[item[1]] = item[0]
+ ordered_fields = [x[0] + (x[1],) for x in new.items()]
+ #sort the list on the offset
+ ordered_fields.sort(lambda x,y : cmp(x[1],y[1]))
+
+ result = []
+ offset = 0
+ for field in ordered_fields:
+ if field[1] > offset:
+ result.append(('','|V%d' % (field[1]-offset)))
+ if len(field) > 3:
+ name = (field[2],field[3])
+ else:
+ name = field[2]
+ if field[0].subdescr:
+ tup = (name, _array_descr(field[0].subdescr[0]),
+ field[0].subdescr[1])
+ else:
+ tup = (name, _array_descr(field[0]))
+ offset += field[0].itemsize
+ result.append(tup)
+
+ return result
+
+def _reconstruct(subtype, shape, dtype):
+ return ndarray.__new__(subtype, shape, dtype)
+
+
+# format_re and _split were taken from numarray by J. Todd Miller
+format_re = re.compile(r'(?P<repeat> *[(]?[ ,0-9]*[)]? *)(?P<dtype>[><|A-Za-z0-9.]*)')
+
+def _split(input):
+ """Split the input formats string into field formats without splitting
+ the tuple used to specify multi-dimensional arrays."""
+
+ newlist = []
+ hold = ''
+
+ for element in input.split(','):
+ if hold != '':
+ item = hold + ',' + element
+ else:
+ item = element
+ left = item.count('(')
+ right = item.count(')')
+
+ # if the parenthesis is not balanced, hold the string
+ if left > right :
+ hold = item
+
+ # when balanced, append to the output list and reset the hold
+ elif left == right:
+ newlist.append(item.strip())
+ hold = ''
+
+ # too many close parenthesis is unacceptable
+ else:
+ raise SyntaxError, item
+
+ # if there is string left over in hold
+ if hold != '':
+ raise SyntaxError, hold
+
+ return newlist
+
+# str is a string (perhaps comma separated)
+def _commastring(astr):
+ res = _split(astr)
+ if (len(res)) == 1:
+ raise ValueError, "no commas present"
+ result = []
+ for k,item in enumerate(res):
+ # convert item
+ try:
+ (repeats, dtype) = format_re.match(item).groups()
+ except (TypeError, AttributeError):
+ raise ValueError('format %s is not recognized' % item)
+
+ if (repeats == ''):
+ newitem = dtype
+ else:
+ newitem = (dtype, eval(repeats))
+ result.append(newitem)
+
+ return result
diff --git a/numpy/base/arrayprint.py b/numpy/base/arrayprint.py
new file mode 100644
index 000000000..6a5e4b23b
--- /dev/null
+++ b/numpy/base/arrayprint.py
@@ -0,0 +1,359 @@
+"""Array printing function
+
+$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
+"""
+__all__ = ["set_summary", "summary_off", "set_precision", "set_line_width",
+ "array2string"]
+
+#
+# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
+# last revision: 1996-3-13
+# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
+# and by Perry Greenfield 2000-4-1 for numarray
+# and by Travis Oliphant 2005-8-22 for scipy.base
+
+import sys
+import numeric as _gen
+import numerictypes as _nt
+import umath as _uf
+_nc = _gen
+
+# The following functions are emergency substitutes for numeric functions
+# which sometimes get broken during development.
+
+def product(x, y): return x*y
+
+def _maximum_reduce(arr):
+ maximum = arr[0]
+ for i in xrange(1, arr.nelements()):
+ if arr[i] > maximum: maximum = arr[i]
+ return maximum
+
+def _minimum_reduce(arr):
+ minimum = arr[0]
+ for i in xrange(1, arr.nelements()):
+ if arr[i] < minimum: minimum = arr[i]
+ return minimum
+
+def _numeric_compress(arr):
+ nonzero = 0
+ for i in xrange(arr.nelements()):
+ if arr[i] != 0: nonzero += 1
+ retarr = _nc.zeros((nonzero,))
+ nonzero = 0
+ for i in xrange(arr.nelements()):
+ if arr[i] != 0:
+ retarr[nonzero] = abs(arr[i])
+ nonzero += 1
+ return retarr
+
+_failsafe = 0
+if _failsafe:
+ max_reduce = _maximum_reduce
+ min_reduce = _minimum_reduce
+else:
+ max_reduce = _uf.maximum.reduce
+ min_reduce = _uf.minimum.reduce
+
+_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension
+_summaryThreshhold = 1000 # total items > triggers array summarization
+
+_float_output_precision = 8
+_float_output_suppress_small = False
+_line_width = 75
+
+
+def set_printoptions(precision=None, threshold=None, edgeitems=None,
+ linewidth=None, suppress=None):
+ """Set options associated with printing.
+
+ precision the default number of digits of precision for floating
+ point output
+ (default 8)
+ threshold total number of array elements which trigger summarization
+ rather than full repr.
+ (default 1000)
+ edgeitems number of array items in summary at beginning and end of
+ each dimension.
+ (default 3)
+ linewidth the number of characters per line for the purpose of inserting
+ line breaks.
+ (default 75)
+ supress Boolean value indicating whether or not suppress printing
+ of small floating point values using scientific notation
+ (default False)
+ """
+
+ global _summaryThreshhold, _summaryEdgeItems, _float_output_precision, \
+ _line_width, _float_output_suppress_small
+ if (linewidth is not None):
+ _line_width = linewidth
+ if (threshold is not None):
+ _summaryThreshhold = threshold
+ if (edgeitems is not None):
+ _summaryEdgeItems = edgeitems
+ if (precision is not None):
+ _float_output_precision = precision
+ if (suppress is not None):
+ _float_output_supress_small = not not suppress
+ return
+
+def get_printoptions():
+ return _float_output_precision, _summaryThreshhold, _summaryEdgeItems, \
+ _line_width, _float_output_suppress_small
+
+
+def _leading_trailing(a):
+ if a.ndim == 1:
+ if len(a) > 2*_summaryEdgeItems:
+ b = _gen.concatenate((a[:_summaryEdgeItems],
+ a[-_summaryEdgeItems:]))
+ else:
+ b = a
+ else:
+ if len(a) > 2*_summaryEdgeItems:
+ l = [_leading_trailing(a[i]) for i in range(
+ min(len(a), _summaryEdgeItems))]
+ l.extend([_leading_trailing(a[-i]) for i in range(
+ min(len(a), _summaryEdgeItems),0,-1)])
+ else:
+ l = [_leading_trailing(a[i]) for i in range(0, len(a))]
+ b = _gen.concatenate(tuple(l))
+ return b
+
+def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
+ prefix=""):
+
+ if max_line_width is None:
+ max_line_width = _line_width
+
+ if precision is None:
+ precision = _float_output_precision
+
+ if suppress_small is None:
+ suppress_small = _float_output_suppress_small
+
+ if a.size > _summaryThreshhold:
+ summary_insert = "..., "
+ data = _leading_trailing(a)
+ else:
+ summary_insert = ""
+ data = a.ravel()
+
+ items_per_line = a.shape[-1]
+
+ try:
+ format_function = a._format
+ except AttributeError:
+ dtype = a.dtype
+ if issubclass(dtype, _nt.bool):
+ format = "%s"
+ format_function = lambda x, f = format: format % x
+ if issubclass(dtype, _nt.integer):
+ max_str_len = max(len(str(max_reduce(data))),
+ len(str(min_reduce(data))))
+ format = '%' + str(max_str_len) + 'd'
+ format_function = lambda x, f = format: _formatInteger(x, f)
+ elif issubclass(dtype, _nt.floating):
+ format = _floatFormat(data, precision, suppress_small)
+ format_function = lambda x, f = format: _formatFloat(x, f)
+ elif issubclass(dtype, _nt.complexfloating):
+ real_format = _floatFormat(
+ data.real, precision, suppress_small, sign=0)
+ imag_format = _floatFormat(
+ data.imag, precision, suppress_small, sign=1)
+ format_function = lambda x, f1 = real_format, f2 = imag_format: \
+ _formatComplex(x, f1, f2)
+ else:
+ format = '%s'
+ format_function = lambda x, f = format: format % str(x)
+
+ next_line_prefix = " " # skip over "["
+ next_line_prefix += " "*len(prefix) # skip over array(
+
+
+ lst = _formatArray(a, format_function, len(a.shape), max_line_width,
+ next_line_prefix, separator,
+ _summaryEdgeItems, summary_insert)[:-1]
+
+ return lst
+
+def array2string(a, max_line_width = None, precision = None,
+ suppress_small = None, separator=' ', prefix="",
+ style=repr):
+
+ if a.shape == ():
+ x = a.item()
+ try:
+ lst = a._format(x)
+ except AttributeError:
+ lst = style(x)
+ elif reduce(product, a.shape) == 0:
+ # treat as a null array if any of shape elements == 0
+ lst = "[]"
+ else:
+ lst = _array2string(a, max_line_width, precision, suppress_small,
+ separator, prefix)
+ return lst
+
+def _extendLine(s, line, word, max_line_len, next_line_prefix):
+ if len(line.rstrip()) + len(word.rstrip()) >= max_line_len:
+ s += line.rstrip() + "\n"
+ line = next_line_prefix
+ line += word
+ return s, line
+
+def _formatArray(a, format_function, rank, max_line_len,
+ next_line_prefix, separator, edge_items, summary_insert):
+ """formatArray is designed for two modes of operation:
+
+ 1. Full output
+
+ 2. Summarized output
+
+ """
+ if rank == 0:
+ return str(a.item())
+
+ if summary_insert and 2*edge_items < len(a):
+ leading_items, trailing_items, summary_insert1 = \
+ edge_items, edge_items, summary_insert
+ else:
+ leading_items, trailing_items, summary_insert1 = 0, len(a), ""
+
+ if rank == 1:
+
+ s = ""
+ line = next_line_prefix
+ for i in xrange(leading_items):
+ word = format_function(a[i]) + separator
+ s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
+
+ if summary_insert1:
+ s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix)
+
+ for i in xrange(trailing_items, 1, -1):
+ word = format_function(a[-i]) + separator
+ s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
+
+ word = format_function(a[-1])
+ s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
+ s += line + "]\n"
+ s = '[' + s[len(next_line_prefix):]
+ else:
+ s = '['
+ sep = separator.rstrip()
+ for i in xrange(leading_items):
+ if i > 0:
+ s += next_line_prefix
+ s += _formatArray(a[i], format_function, rank-1, max_line_len,
+ " " + next_line_prefix, separator, edge_items,
+ summary_insert)
+ s = s.rstrip()+ sep.rstrip() + '\n'*max(rank-1,1)
+
+ if summary_insert1:
+ s += next_line_prefix + summary_insert1 + "\n"
+
+ for i in xrange(trailing_items, 1, -1):
+ if leading_items or i != trailing_items:
+ s += next_line_prefix
+ s += _formatArray(a[-i], format_function, rank-1, max_line_len,
+ " " + next_line_prefix, separator, edge_items,
+ summary_insert)
+ s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1)
+ if leading_items or trailing_items > 1:
+ s += next_line_prefix
+ s += _formatArray(a[-1], format_function, rank-1, max_line_len,
+ " " + next_line_prefix, separator, edge_items,
+ summary_insert).rstrip()+']\n'
+ return s
+
+def _floatFormat(data, precision, suppress_small, sign = 0):
+ exp_format = 0
+ non_zero = _uf.absolute(data.compress(_uf.not_equal(data, 0)))
+ ##non_zero = _numeric_compress(data) ##
+ if len(non_zero) == 0:
+ max_val = 0.
+ min_val = 0.
+ else:
+ max_val = max_reduce(non_zero)
+ min_val = min_reduce(non_zero)
+ if max_val >= 1.e8:
+ exp_format = 1
+ if not suppress_small and (min_val < 0.0001
+ or max_val/min_val > 1000.):
+ exp_format = 1
+ if exp_format:
+ large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
+ max_str_len = 8 + precision + large_exponent
+ if sign: format = '%+'
+ else: format = '%'
+ format = format + str(max_str_len) + '.' + str(precision) + 'e'
+ if large_exponent: format = format + '3'
+ else:
+ format = '%.' + str(precision) + 'f'
+ precision = min(precision, max(tuple(map(lambda x, p=precision,
+ f=format: _digits(x,p,f),
+ data))))
+ max_str_len = len(str(int(max_val))) + precision + 2
+ if sign: format = '%#+'
+ else: format = '%#'
+ format = format + str(max_str_len) + '.' + str(precision) + 'f'
+ return format
+
+def _digits(x, precision, format):
+ s = format % x
+ zeros = len(s)
+ while s[zeros-1] == '0': zeros = zeros-1
+ return precision-len(s)+zeros
+
+
+_MAXINT = sys.maxint
+_MININT = -sys.maxint-1
+def _formatInteger(x, format):
+ if (x < _MAXINT) and (x > _MININT):
+ return format % x
+ else:
+ return "%s" % x
+
+def _formatFloat(x, format, strip_zeros = 1):
+ if format[-1] == '3':
+ # 3-digit exponent
+ format = format[:-1]
+ s = format % x
+ third = s[-3]
+ if third == '+' or third == '-':
+ s = s[1:-2] + '0' + s[-2:]
+ elif format[-1] == 'e':
+ # 2-digit exponent
+ s = format % x
+ if s[-3] == '0':
+ s = ' ' + s[:-3] + s[-2:]
+ elif format[-1] == 'f':
+ s = format % x
+ if strip_zeros:
+ zeros = len(s)
+ while s[zeros-1] == '0': zeros = zeros-1
+ s = s[:zeros] + (len(s)-zeros)*' '
+ else:
+ s = format % x
+ return s
+
+def _formatComplex(x, real_format, imag_format):
+ r = _formatFloat(x.real, real_format)
+ i = _formatFloat(x.imag, imag_format, 0)
+ if imag_format[-1] == 'f':
+ zeros = len(i)
+ while zeros > 2 and i[zeros-1] == '0': zeros = zeros-1
+ i = i[:zeros] + 'j' + (len(i)-zeros)*' '
+ else:
+ i = i + 'j'
+ return r + i
+
+def _formatGeneral(x):
+ return str(x) + ' '
+
+if __name__ == '__main__':
+ a = _nc.arange(10)
+ print array2string(a)
+ print array2string(_nc.array([[],[]]))
diff --git a/numpy/base/blasdot/_dotblas.c b/numpy/base/blasdot/_dotblas.c
new file mode 100644
index 000000000..648ea397f
--- /dev/null
+++ b/numpy/base/blasdot/_dotblas.c
@@ -0,0 +1,761 @@
+static char module_doc[] =
+"This module provides a BLAS optimized\nmatrix multiply, inner product and dot for scipy arrays";
+
+#include "Python.h"
+#include "scipy/arrayobject.h"
+#ifndef CBLAS_HEADER
+#define CBLAS_HEADER "cblas.h"
+#endif
+#include CBLAS_HEADER
+
+#include <stdio.h>
+
+static void
+FLOAT_dot(void *a, intp stridea, void *b, intp strideb, void *res,
+ intp n, void *tmp)
+{
+ register int na = stridea / sizeof(float);
+ register int nb = strideb / sizeof(float);
+
+ *((float *)res) = cblas_sdot((int)n, (float *)a, na, (float *)b, nb);
+}
+
+static void
+DOUBLE_dot(void *a, intp stridea, void *b, intp strideb, void *res,
+ intp n, void *tmp)
+{
+ register int na = stridea / sizeof(double);
+ register int nb = strideb / sizeof(double);
+
+ *((double *)res) = cblas_ddot((int)n, (double *)a, na, (double *)b, nb);
+}
+
+static void
+CFLOAT_dot(void *a, intp stridea, void *b, intp strideb, void *res,
+ intp n, void *tmp)
+{
+
+ register int na = stridea / sizeof(cfloat);
+ register int nb = strideb / sizeof(cfloat);
+
+ cblas_cdotu_sub((int)n, (float *)a, na, (float *)b, nb, (float *)res);
+}
+
+static void
+CDOUBLE_dot(void *a, intp stridea, void *b, intp strideb, void *res,
+ intp n, void *tmp)
+{
+ register int na = stridea / sizeof(cdouble);
+ register int nb = strideb / sizeof(cdouble);
+
+ cblas_zdotu_sub((int)n, (double *)a, na, (double *)b, nb, (double *)res);
+}
+
+
+static PyArray_DotFunc *oldFunctions[PyArray_NTYPES];
+static Bool altered=FALSE;
+
+static char doc_alterdot[] = "alterdot() changes all dot functions to use blas.";
+
+static PyObject *
+dotblas_alterdot(PyObject *dummy, PyObject *args)
+{
+ PyArray_Descr *descr;
+
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+
+ /* Replace the dot functions to the ones using blas */
+
+ if (!altered) {
+ descr = PyArray_DescrFromType(PyArray_FLOAT);
+ oldFunctions[PyArray_FLOAT] = descr->f->dotfunc;
+ descr->f->dotfunc = (PyArray_DotFunc *)FLOAT_dot;
+
+ descr = PyArray_DescrFromType(PyArray_DOUBLE);
+ oldFunctions[PyArray_DOUBLE] = descr->f->dotfunc;
+ descr->f->dotfunc = (PyArray_DotFunc *)DOUBLE_dot;
+
+ descr = PyArray_DescrFromType(PyArray_CFLOAT);
+ oldFunctions[PyArray_CFLOAT] = descr->f->dotfunc;
+ descr->f->dotfunc = (PyArray_DotFunc *)CFLOAT_dot;
+
+ descr = PyArray_DescrFromType(PyArray_CDOUBLE);
+ oldFunctions[PyArray_CDOUBLE] = descr->f->dotfunc;
+ descr->f->dotfunc = (PyArray_DotFunc *)CDOUBLE_dot;
+
+ altered = TRUE;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static char doc_restoredot[] = "restoredot() restores dots to defaults.";
+
+static PyObject *
+dotblas_restoredot(PyObject *dummy, PyObject *args)
+{
+ PyArray_Descr *descr;
+
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+
+ if (altered) {
+ descr = PyArray_DescrFromType(PyArray_FLOAT);
+ descr->f->dotfunc = oldFunctions[PyArray_FLOAT];
+ oldFunctions[PyArray_FLOAT] = NULL;
+ Py_XDECREF(descr);
+
+ descr = PyArray_DescrFromType(PyArray_DOUBLE);
+ descr->f->dotfunc = oldFunctions[PyArray_DOUBLE];
+ oldFunctions[PyArray_DOUBLE] = NULL;
+ Py_XDECREF(descr);
+
+ descr = PyArray_DescrFromType(PyArray_CFLOAT);
+ descr->f->dotfunc = oldFunctions[PyArray_CFLOAT];
+ oldFunctions[PyArray_CFLOAT] = NULL;
+ Py_XDECREF(descr);
+
+ descr = PyArray_DescrFromType(PyArray_CDOUBLE);
+ descr->f->dotfunc = oldFunctions[PyArray_CDOUBLE];
+ oldFunctions[PyArray_CDOUBLE] = NULL;
+ Py_XDECREF(descr);
+
+ altered = FALSE;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+
+static char doc_matrixproduct[] = "matrixproduct(a,b)\nReturns the dot product of a and b for arrays of floating point types.\nLike the generic scipy equivalent the product sum is over\nthe last dimension of a and the second-to-last dimension of b.\nNB: The first argument is not conjugated.";
+
+static PyObject *
+dotblas_matrixproduct(PyObject *dummy, PyObject *args)
+{
+ PyObject *op1, *op2;
+ PyArrayObject *ap1, *ap2, *ret;
+ int j, l, lda, ldb, ldc;
+ int typenum, nd;
+ intp dimensions[MAX_DIMS];
+ static const float oneF[2] = {1.0, 0.0};
+ static const float zeroF[2] = {0.0, 0.0};
+ static const double oneD[2] = {1.0, 0.0};
+ static const double zeroD[2] = {0.0, 0.0};
+ double prior1, prior2;
+ PyTypeObject *subtype;
+ PyArray_Descr *dtype;
+
+
+ if (!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL;
+
+ /*
+ * "Matrix product" using the BLAS.
+ * Only works for float double and complex types.
+ */
+
+
+ typenum = PyArray_ObjectType(op1, 0);
+ typenum = PyArray_ObjectType(op2, typenum);
+
+ /* This function doesn't handle other types */
+ if ((typenum != PyArray_DOUBLE && typenum != PyArray_CDOUBLE &&
+ typenum != PyArray_FLOAT && typenum != PyArray_CFLOAT)) {
+ return PyArray_Return((PyArrayObject *)PyArray_MatrixProduct(op1, op2));
+ }
+
+ ret = NULL;
+ dtype = PyArray_DescrFromType(typenum);
+ ap1 = (PyArrayObject *)PyArray_FromAny(op1, dtype, 0, 0, CARRAY_FLAGS);
+ if (ap1 == NULL) return NULL;
+ ap2 = (PyArrayObject *)PyArray_FromAny(op2, dtype, 0, 0, CARRAY_FLAGS);
+ if (ap2 == NULL) goto fail;
+
+ if ((ap1->nd > 2) || (ap2->nd > 2)) {
+ /* This function doesn't handle dimensions greater than 2 -- other
+ than to ensure the dot function is altered
+ */
+ if (!altered) {
+ /* need to alter dot product */
+ PyObject *tmp1, *tmp2;
+ tmp1 = PyTuple_New(0);
+ tmp2 = dotblas_alterdot(NULL, tmp1);
+ Py_DECREF(tmp1);
+ Py_DECREF(tmp2);
+ }
+ ret = (PyArrayObject *)PyArray_MatrixProduct((PyObject *)ap1,
+ (PyObject *)ap2);
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return PyArray_Return(ret);
+ }
+
+ if (ap1->nd == 0 || ap2->nd == 0) {
+ /* One of ap1 or ap2 is a scalar */
+ if (ap1->nd == 0) { /* Make ap2 the scalar */
+ PyArrayObject *t = ap1;
+ ap1 = ap2;
+ ap2 = t;
+ }
+ for (l = 1, j = 0; j < ap1->nd; j++) {
+ dimensions[j] = ap1->dimensions[j];
+ l *= dimensions[j];
+ }
+ nd = ap1->nd;
+ }
+ else { /* (ap1->nd <= 2 && ap2->nd <= 2) */
+ /* Both ap1 and ap2 are vectors or matrices */
+ l = ap1->dimensions[ap1->nd-1];
+
+ if (ap2->dimensions[0] != l) {
+ PyErr_SetString(PyExc_ValueError, "matrices are not aligned");
+ goto fail;
+ }
+ nd = ap1->nd+ap2->nd-2;
+
+ if (nd == 1)
+ dimensions[0] = (ap1->nd == 2) ? ap1->dimensions[0] : ap2->dimensions[1];
+ else if (nd == 2) {
+ dimensions[0] = ap1->dimensions[0];
+ dimensions[1] = ap2->dimensions[1];
+ }
+ }
+
+ /* Choose which subtype to return */
+ prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
+ prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
+ subtype = (prior2 > prior1 ? ap2->ob_type : ap1->ob_type);
+
+ ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
+ typenum, NULL, NULL, 0, 0,
+ (PyObject *)\
+ (prior2 > prior1 ? ap2 : ap1));
+
+ if (ret == NULL) goto fail;
+ memset(ret->data, 0, PyArray_NBYTES(ret));
+
+ if (ap2->nd == 0) {
+ /* Multiplication by a scalar -- Level 1 BLAS */
+ if (typenum == PyArray_DOUBLE) {
+ cblas_daxpy(l, *((double *)ap2->data), (double *)ap1->data, 1,
+ (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zaxpy(l, (double *)ap2->data, (double *)ap1->data, 1,
+ (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_FLOAT) {
+ cblas_saxpy(l, *((float *)ap2->data), (float *)ap1->data, 1,
+ (float *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_caxpy(l, (float *)ap2->data, (float *)ap1->data, 1,
+ (float *)ret->data, 1);
+ }
+ }
+ else if (ap1->nd == 1 && ap2->nd == 1) {
+ /* Dot product between two vectors -- Level 1 BLAS */
+ if (typenum == PyArray_DOUBLE) {
+ double result = cblas_ddot(l, (double *)ap1->data, 1,
+ (double *)ap2->data, 1);
+ *((double *)ret->data) = result;
+ }
+ else if (typenum == PyArray_FLOAT) {
+ float result = cblas_sdot(l, (float *)ap1->data, 1,
+ (float *)ap2->data, 1);
+ *((float *)ret->data) = result;
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zdotu_sub(l, (double *)ap1->data, 1,
+ (double *)ap2->data, 1, (double *)ret->data);
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_cdotu_sub(l, (float *)ap1->data, 1,
+ (float *)ap2->data, 1, (float *)ret->data);
+ fprintf(stderr, "Here...\n");
+ }
+ }
+ else if (ap1->nd == 2 && ap2->nd == 1) {
+ /* Matrix vector multiplication -- Level 2 BLAS */
+ /* lda must be MAX(M,1) */
+ lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1);
+ if (typenum == PyArray_DOUBLE) {
+ cblas_dgemv(CblasRowMajor,
+ CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1],
+ 1.0, (double *)ap1->data, lda,
+ (double *)ap2->data, 1, 0.0, (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_FLOAT) {
+ cblas_sgemv(CblasRowMajor,
+ CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1],
+ 1.0, (float *)ap1->data, lda,
+ (float *)ap2->data, 1, 0.0, (float *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zgemv(CblasRowMajor,
+ CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1],
+ oneD, (double *)ap1->data, lda,
+ (double *)ap2->data, 1, zeroD, (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_cgemv(CblasRowMajor,
+ CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1],
+ oneF, (float *)ap1->data, lda,
+ (float *)ap2->data, 1, zeroF, (float *)ret->data, 1);
+ }
+ }
+ else if (ap1->nd == 1 && ap2->nd == 2) {
+ /* Vector matrix multiplication -- Level 2 BLAS */
+ lda = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1);
+ if (typenum == PyArray_DOUBLE) {
+ cblas_dgemv(CblasRowMajor,
+ CblasTrans, ap2->dimensions[0], ap2->dimensions[1],
+ 1.0, (double *)ap2->data, lda,
+ (double *)ap1->data, 1, 0.0, (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_FLOAT) {
+ cblas_sgemv(CblasRowMajor,
+ CblasTrans, ap2->dimensions[0], ap2->dimensions[1],
+ 1.0, (float *)ap2->data, lda,
+ (float *)ap1->data, 1, 0.0, (float *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zgemv(CblasRowMajor,
+ CblasTrans, ap2->dimensions[0], ap2->dimensions[1],
+ oneD, (double *)ap2->data, lda,
+ (double *)ap1->data, 1, zeroD, (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_cgemv(CblasRowMajor,
+ CblasTrans, ap2->dimensions[0], ap2->dimensions[1],
+ oneF, (float *)ap2->data, lda,
+ (float *)ap1->data, 1, zeroF, (float *)ret->data, 1);
+ }
+ }
+ else { /* (ap1->nd == 2 && ap2->nd == 2) */
+ /* Matrix matrix multiplication -- Level 3 BLAS */
+ lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1);
+ ldb = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1);
+ ldc = (ret->dimensions[1] > 1 ? ret->dimensions[1] : 1);
+ if (typenum == PyArray_DOUBLE) {
+ cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
+ ap1->dimensions[0], ap2->dimensions[1], ap2->dimensions[0],
+ 1.0, (double *)ap1->data, lda,
+ (double *)ap2->data, ldb,
+ 0.0, (double *)ret->data, ldc);
+ }
+ else if (typenum == PyArray_FLOAT) {
+ cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
+ ap1->dimensions[0], ap2->dimensions[1], ap2->dimensions[0],
+ 1.0, (float *)ap1->data, lda,
+ (float *)ap2->data, ldb,
+ 0.0, (float *)ret->data, ldc);
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
+ ap1->dimensions[0], ap2->dimensions[1], ap2->dimensions[0],
+ oneD, (double *)ap1->data, lda,
+ (double *)ap2->data, ldb,
+ zeroD, (double *)ret->data, ldc);
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_cgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
+ ap1->dimensions[0], ap2->dimensions[1], ap2->dimensions[0],
+ oneF, (float *)ap1->data, lda,
+ (float *)ap2->data, ldb,
+ zeroF, (float *)ret->data, ldc);
+ }
+ }
+
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return PyArray_Return(ret);
+
+ fail:
+ Py_XDECREF(ap1);
+ Py_XDECREF(ap2);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+
+static char doc_innerproduct[] = "innerproduct(a,b)\nReturns the inner product of a and b for arrays of floating point types.\nLike the generic Numeric equivalent the product sum is over\nthe last dimension of a and b.\nNB: The first argument is not conjugated.";
+
+static PyObject *
+dotblas_innerproduct(PyObject *dummy, PyObject *args)
+{
+ PyObject *op1, *op2;
+ PyArrayObject *ap1, *ap2, *ret;
+ int j, l, lda, ldb, ldc;
+ int typenum, nd;
+ intp dimensions[MAX_DIMS];
+ static const float oneF[2] = {1.0, 0.0};
+ static const float zeroF[2] = {0.0, 0.0};
+ static const double oneD[2] = {1.0, 0.0};
+ static const double zeroD[2] = {0.0, 0.0};
+ PyTypeObject *subtype;
+ double prior1, prior2;
+
+ if (!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL;
+
+ /*
+ * Inner product using the BLAS. The product sum is taken along the last
+ * dimensions of the two arrays.
+ * Only speeds things up for float double and complex types.
+ */
+
+
+ typenum = PyArray_ObjectType(op1, 0);
+ typenum = PyArray_ObjectType(op2, typenum);
+
+ /* This function doesn't handle other types */
+ if ((typenum != PyArray_DOUBLE && typenum != PyArray_CDOUBLE &&
+ typenum != PyArray_FLOAT && typenum != PyArray_CFLOAT)) {
+ return PyArray_Return((PyArrayObject *)PyArray_InnerProduct(op1, op2));
+ }
+
+ ret = NULL;
+ ap1 = (PyArrayObject *)PyArray_ContiguousFromObject(op1, typenum, 0, 0);
+ if (ap1 == NULL) return NULL;
+ ap2 = (PyArrayObject *)PyArray_ContiguousFromObject(op2, typenum, 0, 0);
+ if (ap2 == NULL) goto fail;
+
+
+ if ((ap1->nd > 2) || (ap2->nd > 2)) {
+ /* This function doesn't handle dimensions greater than 2 -- other
+ than to ensure the dot function is altered
+ */
+ if (!altered) {
+ /* need to alter dot product */
+ PyObject *tmp1, *tmp2;
+ tmp1 = PyTuple_New(0);
+ tmp2 = dotblas_alterdot(NULL, tmp1);
+ Py_DECREF(tmp1);
+ Py_DECREF(tmp2);
+ }
+ ret = (PyArrayObject *)PyArray_InnerProduct((PyObject *)ap1,
+ (PyObject *)ap2);
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return PyArray_Return(ret);
+ }
+
+ if (ap1->nd == 0 || ap2->nd == 0) {
+ /* One of ap1 or ap2 is a scalar */
+ if (ap1->nd == 0) { /* Make ap2 the scalar */
+ PyArrayObject *t = ap1;
+ ap1 = ap2;
+ ap2 = t;
+ }
+ for (l = 1, j = 0; j < ap1->nd; j++) {
+ dimensions[j] = ap1->dimensions[j];
+ l *= dimensions[j];
+ }
+ nd = ap1->nd;
+ }
+ else { /* (ap1->nd <= 2 && ap2->nd <= 2) */
+ /* Both ap1 and ap2 are vectors or matrices */
+ l = ap1->dimensions[ap1->nd-1];
+
+ if (ap2->dimensions[ap2->nd-1] != l) {
+ PyErr_SetString(PyExc_ValueError, "matrices are not aligned");
+ goto fail;
+ }
+ nd = ap1->nd+ap2->nd-2;
+
+ if (nd == 1)
+ dimensions[0] = (ap1->nd == 2) ? ap1->dimensions[0] : ap2->dimensions[0];
+ else if (nd == 2) {
+ dimensions[0] = ap1->dimensions[0];
+ dimensions[1] = ap2->dimensions[0];
+ }
+ }
+
+ /* Choose which subtype to return */
+ prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
+ prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
+ subtype = (prior2 > prior1 ? ap2->ob_type : ap1->ob_type);
+
+ ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
+ typenum, NULL, NULL, 0, 0,
+ (PyObject *)\
+ (prior2 > prior1 ? ap2 : ap1));
+
+ if (ret == NULL) goto fail;
+ memset(ret->data, 0, PyArray_NBYTES(ret));
+
+ if (ap2->nd == 0) {
+ /* Multiplication by a scalar -- Level 1 BLAS */
+ if (typenum == PyArray_DOUBLE) {
+ cblas_daxpy(l, *((double *)ap2->data), (double *)ap1->data, 1,
+ (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zaxpy(l, (double *)ap2->data, (double *)ap1->data, 1,
+ (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_FLOAT) {
+ cblas_saxpy(l, *((float *)ap2->data), (float *)ap1->data, 1,
+ (float *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_caxpy(l, (float *)ap2->data, (float *)ap1->data, 1,
+ (float *)ret->data, 1);
+ }
+ }
+ else if (ap1->nd == 1 && ap2->nd == 1) {
+ /* Dot product between two vectors -- Level 1 BLAS */
+ if (typenum == PyArray_DOUBLE) {
+ double result = cblas_ddot(l, (double *)ap1->data, 1,
+ (double *)ap2->data, 1);
+ *((double *)ret->data) = result;
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zdotu_sub(l, (double *)ap1->data, 1,
+ (double *)ap2->data, 1, (double *)ret->data);
+ }
+ else if (typenum == PyArray_FLOAT) {
+ float result = cblas_sdot(l, (float *)ap1->data, 1,
+ (float *)ap2->data, 1);
+ *((float *)ret->data) = result;
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_cdotu_sub(l, (float *)ap1->data, 1,
+ (float *)ap2->data, 1, (float *)ret->data);
+ }
+ }
+ else if (ap1->nd == 2 && ap2->nd == 1) {
+ /* Matrix-vector multiplication -- Level 2 BLAS */
+ lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1);
+ if (typenum == PyArray_DOUBLE) {
+ cblas_dgemv(CblasRowMajor,
+ CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1],
+ 1.0, (double *)ap1->data, lda,
+ (double *)ap2->data, 1, 0.0, (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zgemv(CblasRowMajor,
+ CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1],
+ oneD, (double *)ap1->data, lda,
+ (double *)ap2->data, 1, zeroD, (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_FLOAT) {
+ cblas_sgemv(CblasRowMajor,
+ CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1],
+ 1.0, (float *)ap1->data, lda,
+ (float *)ap2->data, 1, 0.0, (float *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_cgemv(CblasRowMajor,
+ CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1],
+ oneF, (float *)ap1->data, lda,
+ (float *)ap2->data, 1, zeroF, (float *)ret->data, 1);
+ }
+ }
+ else if (ap1->nd == 1 && ap2->nd == 2) {
+ /* Vector matrix multiplication -- Level 2 BLAS */
+ lda = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1);
+ if (typenum == PyArray_DOUBLE) {
+ cblas_dgemv(CblasRowMajor,
+ CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1],
+ 1.0, (double *)ap2->data, lda,
+ (double *)ap1->data, 1, 0.0, (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zgemv(CblasRowMajor,
+ CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1],
+ oneD, (double *)ap2->data, lda,
+ (double *)ap1->data, 1, zeroD, (double *)ret->data, 1);
+ }
+ else if (typenum == PyArray_FLOAT) {
+ cblas_sgemv(CblasRowMajor,
+ CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1],
+ 1.0, (float *)ap2->data, lda,
+ (float *)ap1->data, 1, 0.0, (float *)ret->data, 1);
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_cgemv(CblasRowMajor,
+ CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1],
+ oneF, (float *)ap2->data, lda,
+ (float *)ap1->data, 1, zeroF, (float *)ret->data, 1);
+ }
+ }
+ else { /* (ap1->nd == 2 && ap2->nd == 2) */
+ /* Matrix matrix multiplication -- Level 3 BLAS */
+ lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1);
+ ldb = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1);
+ ldc = (ret->dimensions[1] > 1 ? ret->dimensions[1] : 1);
+ if (typenum == PyArray_DOUBLE) {
+ cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
+ ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1],
+ 1.0, (double *)ap1->data, lda,
+ (double *)ap2->data, ldb,
+ 0.0, (double *)ret->data, ldc);
+ }
+ else if (typenum == PyArray_FLOAT) {
+ cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
+ ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1],
+ 1.0, (float *)ap1->data, lda,
+ (float *)ap2->data, ldb,
+ 0.0, (float *)ret->data, ldc);
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
+ ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1],
+ oneD, (double *)ap1->data, lda,
+ (double *)ap2->data, ldb,
+ zeroD, (double *)ret->data, ldc);
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_cgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
+ ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1],
+ oneF, (float *)ap1->data, lda,
+ (float *)ap2->data, ldb,
+ zeroF, (float *)ret->data, ldc);
+ }
+ }
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return PyArray_Return(ret);
+
+ fail:
+ Py_XDECREF(ap1);
+ Py_XDECREF(ap2);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+
+static char doc_vdot[] = "vdot(a,b)\nReturns the dot product of a and b for scalars and vectors\nof floating point and complex types. The first argument, a, is conjugated.";
+
+
+static PyObject *dotblas_vdot(PyObject *dummy, PyObject *args) {
+ PyObject *op1, *op2;
+ PyArrayObject *ap1=NULL, *ap2=NULL, *ret=NULL;
+ int l;
+ int typenum;
+ intp dimensions[MAX_DIMS];
+ PyArray_Descr *type;
+
+ if (!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL;
+
+ /*
+ * Conjugating dot product using the BLAS for vectors.
+ * Multiplies op1 and op2, each of which must be vector.
+ */
+
+ typenum = PyArray_ObjectType(op1, 0);
+ typenum = PyArray_ObjectType(op2, typenum);
+
+ type = PyArray_DescrFromType(typenum);
+
+ ap1 = (PyArrayObject *)PyArray_FromAny(op1, type, 0, 0, 0);
+ if (ap1==NULL) goto fail;
+ op1 = PyArray_Flatten(ap1, 0);
+ if (op1==NULL) goto fail;
+ Py_DECREF(ap1);
+ ap1 = (PyArrayObject *)op1;
+
+ ap2 = (PyArrayObject *)PyArray_FromAny(op2, type, 0, 0, 0);
+ if (ap2==NULL) goto fail;
+ op2 = PyArray_Flatten(ap2, 0);
+ if (op2 == NULL) goto fail;
+ Py_DECREF(ap2);
+ ap2 = (PyArrayObject *)op2;
+
+ if (typenum != PyArray_FLOAT && typenum != PyArray_DOUBLE &&
+ typenum != PyArray_CFLOAT && typenum != PyArray_CDOUBLE) {
+ if (!altered) {
+ /* need to alter dot product */
+ PyObject *tmp1, *tmp2;
+ tmp1 = PyTuple_New(0);
+ tmp2 = dotblas_alterdot(NULL, tmp1);
+ Py_DECREF(tmp1);
+ Py_DECREF(tmp2);
+ }
+ if (PyTypeNum_ISCOMPLEX(typenum)) {
+ op1 = PyArray_Conjugate(ap1);
+ if (op1==NULL) goto fail;
+ Py_DECREF(ap1);
+ ap1 = (PyArrayObject *)op1;
+ }
+ ret = (PyArrayObject *)PyArray_InnerProduct((PyObject *)ap1,
+ (PyObject *)ap2);
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return PyArray_Return(ret);
+ }
+
+ if (ap2->dimensions[0] != ap1->dimensions[ap1->nd-1]) {
+ PyErr_SetString(PyExc_ValueError, "vectors have different lengths");
+ goto fail;
+ }
+ l = ap1->dimensions[ap1->nd-1];
+
+ ret = (PyArrayObject *)PyArray_SimpleNew(0, dimensions, typenum);
+ if (ret == NULL) goto fail;
+
+
+ /* Dot product between two vectors -- Level 1 BLAS */
+ if (typenum == PyArray_DOUBLE) {
+ *((double *)ret->data) = cblas_ddot(l, (double *)ap1->data, 1,
+ (double *)ap2->data, 1);
+ }
+ else if (typenum == PyArray_FLOAT) {
+ *((float *)ret->data) = cblas_sdot(l, (float *)ap1->data, 1,
+ (float *)ap2->data, 1);
+ }
+ else if (typenum == PyArray_CDOUBLE) {
+ cblas_zdotc_sub(l, (double *)ap1->data, 1,
+ (double *)ap2->data, 1, (double *)ret->data);
+ }
+ else if (typenum == PyArray_CFLOAT) {
+ cblas_cdotc_sub(l, (float *)ap1->data, 1,
+ (float *)ap2->data, 1, (float *)ret->data);
+ }
+
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return PyArray_Return(ret);
+
+ fail:
+ Py_XDECREF(ap1);
+ Py_XDECREF(ap2);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+static struct PyMethodDef dotblas_module_methods[] = {
+ {"dot", (PyCFunction)dotblas_matrixproduct, 1, doc_matrixproduct},
+ {"inner", (PyCFunction)dotblas_innerproduct, 1, doc_innerproduct},
+ {"vdot", (PyCFunction)dotblas_vdot, 1, doc_vdot},
+ {"alterdot", (PyCFunction)dotblas_alterdot, 1, doc_alterdot},
+ {"restoredot", (PyCFunction)dotblas_restoredot, 1, doc_restoredot},
+ {NULL, NULL, 0} /* sentinel */
+};
+
+/* Initialization function for the module */
+DL_EXPORT(void) init_dotblas(void) {
+ int i;
+ PyObject *m, *d, *s;
+
+ /* Create the module and add the functions */
+ m = Py_InitModule3("_dotblas", dotblas_module_methods, module_doc);
+
+ /* Import the array object */
+ import_array();
+
+ /* Initialise the array of dot functions */
+ for (i = 0; i < PyArray_NTYPES; i++)
+ oldFunctions[i] = NULL;
+
+ /* alterdot at load */
+ d = PyTuple_New(0);
+ s = dotblas_alterdot(NULL, d);
+ Py_DECREF(d);
+ Py_DECREF(s);
+
+ /* Check for errors */
+ if (PyErr_Occurred())
+ Py_FatalError("can't initialize module _dotblas");
+}
diff --git a/numpy/base/blasdot/cblas.h b/numpy/base/blasdot/cblas.h
new file mode 100644
index 000000000..3e0faebbe
--- /dev/null
+++ b/numpy/base/blasdot/cblas.h
@@ -0,0 +1,578 @@
+#ifndef CBLAS_H
+#define CBLAS_H
+#include <stddef.h>
+
+/* Allow the use in C++ code. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*
+ * Enumerated and derived types
+ */
+#define CBLAS_INDEX size_t /* this may vary between platforms */
+
+enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102};
+enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113};
+enum CBLAS_UPLO {CblasUpper=121, CblasLower=122};
+enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132};
+enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};
+
+/*
+ * ===========================================================================
+ * Prototypes for level 1 BLAS functions (complex are recast as routines)
+ * ===========================================================================
+ */
+float cblas_sdsdot(const int N, const float alpha, const float *X,
+ const int incX, const float *Y, const int incY);
+double cblas_dsdot(const int N, const float *X, const int incX, const float *Y,
+ const int incY);
+float cblas_sdot(const int N, const float *X, const int incX,
+ const float *Y, const int incY);
+double cblas_ddot(const int N, const double *X, const int incX,
+ const double *Y, const int incY);
+
+/*
+ * Functions having prefixes Z and C only
+ */
+void cblas_cdotu_sub(const int N, const void *X, const int incX,
+ const void *Y, const int incY, void *dotu);
+void cblas_cdotc_sub(const int N, const void *X, const int incX,
+ const void *Y, const int incY, void *dotc);
+
+void cblas_zdotu_sub(const int N, const void *X, const int incX,
+ const void *Y, const int incY, void *dotu);
+void cblas_zdotc_sub(const int N, const void *X, const int incX,
+ const void *Y, const int incY, void *dotc);
+
+
+/*
+ * Functions having prefixes S D SC DZ
+ */
+float cblas_snrm2(const int N, const float *X, const int incX);
+float cblas_sasum(const int N, const float *X, const int incX);
+
+double cblas_dnrm2(const int N, const double *X, const int incX);
+double cblas_dasum(const int N, const double *X, const int incX);
+
+float cblas_scnrm2(const int N, const void *X, const int incX);
+float cblas_scasum(const int N, const void *X, const int incX);
+
+double cblas_dznrm2(const int N, const void *X, const int incX);
+double cblas_dzasum(const int N, const void *X, const int incX);
+
+
+/*
+ * Functions having standard 4 prefixes (S D C Z)
+ */
+CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX);
+CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX);
+CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX);
+CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 1 BLAS routines
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (s, d, c, z)
+ */
+void cblas_sswap(const int N, float *X, const int incX,
+ float *Y, const int incY);
+void cblas_scopy(const int N, const float *X, const int incX,
+ float *Y, const int incY);
+void cblas_saxpy(const int N, const float alpha, const float *X,
+ const int incX, float *Y, const int incY);
+
+void cblas_dswap(const int N, double *X, const int incX,
+ double *Y, const int incY);
+void cblas_dcopy(const int N, const double *X, const int incX,
+ double *Y, const int incY);
+void cblas_daxpy(const int N, const double alpha, const double *X,
+ const int incX, double *Y, const int incY);
+
+void cblas_cswap(const int N, void *X, const int incX,
+ void *Y, const int incY);
+void cblas_ccopy(const int N, const void *X, const int incX,
+ void *Y, const int incY);
+void cblas_caxpy(const int N, const void *alpha, const void *X,
+ const int incX, void *Y, const int incY);
+
+void cblas_zswap(const int N, void *X, const int incX,
+ void *Y, const int incY);
+void cblas_zcopy(const int N, const void *X, const int incX,
+ void *Y, const int incY);
+void cblas_zaxpy(const int N, const void *alpha, const void *X,
+ const int incX, void *Y, const int incY);
+
+
+/*
+ * Routines with S and D prefix only
+ */
+void cblas_srotg(float *a, float *b, float *c, float *s);
+void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P);
+void cblas_srot(const int N, float *X, const int incX,
+ float *Y, const int incY, const float c, const float s);
+void cblas_srotm(const int N, float *X, const int incX,
+ float *Y, const int incY, const float *P);
+
+void cblas_drotg(double *a, double *b, double *c, double *s);
+void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P);
+void cblas_drot(const int N, double *X, const int incX,
+ double *Y, const int incY, const double c, const double s);
+void cblas_drotm(const int N, double *X, const int incX,
+ double *Y, const int incY, const double *P);
+
+
+/*
+ * Routines with S D C Z CS and ZD prefixes
+ */
+void cblas_sscal(const int N, const float alpha, float *X, const int incX);
+void cblas_dscal(const int N, const double alpha, double *X, const int incX);
+void cblas_cscal(const int N, const void *alpha, void *X, const int incX);
+void cblas_zscal(const int N, const void *alpha, void *X, const int incX);
+void cblas_csscal(const int N, const float alpha, void *X, const int incX);
+void cblas_zdscal(const int N, const double alpha, void *X, const int incX);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 2 BLAS
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (S, D, C, Z)
+ */
+void cblas_sgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ const float *X, const int incX, const float beta,
+ float *Y, const int incY);
+void cblas_sgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU, const float alpha,
+ const float *A, const int lda, const float *X,
+ const int incX, const float beta, float *Y, const int incY);
+void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *A, const int lda,
+ float *X, const int incX);
+void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const float *A, const int lda,
+ float *X, const int incX);
+void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *Ap, float *X, const int incX);
+void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *A, const int lda, float *X,
+ const int incX);
+void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const float *A, const int lda,
+ float *X, const int incX);
+void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *Ap, float *X, const int incX);
+
+void cblas_dgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ const double *X, const int incX, const double beta,
+ double *Y, const int incY);
+void cblas_dgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU, const double alpha,
+ const double *A, const int lda, const double *X,
+ const int incX, const double beta, double *Y, const int incY);
+void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *A, const int lda,
+ double *X, const int incX);
+void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const double *A, const int lda,
+ double *X, const int incX);
+void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *Ap, double *X, const int incX);
+void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *A, const int lda, double *X,
+ const int incX);
+void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const double *A, const int lda,
+ double *X, const int incX);
+void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *Ap, double *X, const int incX);
+
+void cblas_cgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY);
+void cblas_cgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU, const void *alpha,
+ const void *A, const int lda, const void *X,
+ const int incX, const void *beta, void *Y, const int incY);
+void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX);
+void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda, void *X,
+ const int incX);
+void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX);
+
+void cblas_zgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY);
+void cblas_zgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU, const void *alpha,
+ const void *A, const int lda, const void *X,
+ const int incX, const void *beta, void *Y, const int incY);
+void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX);
+void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda, void *X,
+ const int incX);
+void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX);
+
+
+/*
+ * Routines with S and D prefixes only
+ */
+void cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *A,
+ const int lda, const float *X, const int incX,
+ const float beta, float *Y, const int incY);
+void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const int K, const float alpha, const float *A,
+ const int lda, const float *X, const int incX,
+ const float beta, float *Y, const int incY);
+void cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *Ap,
+ const float *X, const int incX,
+ const float beta, float *Y, const int incY);
+void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N,
+ const float alpha, const float *X, const int incX,
+ const float *Y, const int incY, float *A, const int lda);
+void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, float *A, const int lda);
+void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, float *Ap);
+void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, const float *Y, const int incY, float *A,
+ const int lda);
+void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, const float *Y, const int incY, float *A);
+
+void cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *A,
+ const int lda, const double *X, const int incX,
+ const double beta, double *Y, const int incY);
+void cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const int K, const double alpha, const double *A,
+ const int lda, const double *X, const int incX,
+ const double beta, double *Y, const int incY);
+void cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *Ap,
+ const double *X, const int incX,
+ const double beta, double *Y, const int incY);
+void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N,
+ const double alpha, const double *X, const int incX,
+ const double *Y, const int incY, double *A, const int lda);
+void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, double *A, const int lda);
+void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, double *Ap);
+void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, const double *Y, const int incY, double *A,
+ const int lda);
+void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, const double *Y, const int incY, double *A);
+
+
+/*
+ * Routines with C and Z prefixes only
+ */
+void cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const void *alpha, const void *A,
+ const int lda, const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const int K, const void *alpha, const void *A,
+ const int lda, const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const void *alpha, const void *Ap,
+ const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const void *X, const int incX,
+ void *A, const int lda);
+void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const void *X,
+ const int incX, void *A);
+void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *Ap);
+
+void cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const void *alpha, const void *A,
+ const int lda, const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const int K, const void *alpha, const void *A,
+ const int lda, const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const void *alpha, const void *Ap,
+ const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const void *X, const int incX,
+ void *A, const int lda);
+void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const void *X,
+ const int incX, void *A);
+void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *Ap);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 3 BLAS
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (S, D, C, Z)
+ */
+void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const float alpha, const float *A,
+ const int lda, const float *B, const int ldb,
+ const float beta, float *C, const int ldc);
+void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ const float *B, const int ldb, const float beta,
+ float *C, const int ldc);
+void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const float alpha, const float *A, const int lda,
+ const float beta, float *C, const int ldc);
+void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const float alpha, const float *A, const int lda,
+ const float *B, const int ldb, const float beta,
+ float *C, const int ldc);
+void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ float *B, const int ldb);
+void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ float *B, const int ldb);
+
+void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const double alpha, const double *A,
+ const int lda, const double *B, const int ldb,
+ const double beta, double *C, const int ldc);
+void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ const double *B, const int ldb, const double beta,
+ double *C, const int ldc);
+void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const double alpha, const double *A, const int lda,
+ const double beta, double *C, const int ldc);
+void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const double alpha, const double *A, const int lda,
+ const double *B, const int ldb, const double beta,
+ double *C, const int ldc);
+void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ double *B, const int ldb);
+void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ double *B, const int ldb);
+
+void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const void *alpha, const void *A,
+ const int lda, const void *B, const int ldb,
+ const void *beta, void *C, const int ldc);
+void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *beta, void *C, const int ldc);
+void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb);
+void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb);
+
+void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const void *alpha, const void *A,
+ const int lda, const void *B, const int ldb,
+ const void *beta, void *C, const int ldc);
+void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *beta, void *C, const int ldc);
+void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb);
+void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb);
+
+
+/*
+ * Routines with prefixes C and Z only
+ */
+void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const float alpha, const void *A, const int lda,
+ const float beta, void *C, const int ldc);
+void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const float beta,
+ void *C, const int ldc);
+
+void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const double alpha, const void *A, const int lda,
+ const double beta, void *C, const int ldc);
+void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const double beta,
+ void *C, const int ldc);
+
+void cblas_xerbla(int p, const char *rout, const char *form, ...);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/numpy/base/chararray.py b/numpy/base/chararray.py
new file mode 100644
index 000000000..130a40cc4
--- /dev/null
+++ b/numpy/base/chararray.py
@@ -0,0 +1,341 @@
+from numerictypes import character, string, unicode_, \
+ obj2dtype, integer, object_
+from numeric import ndarray, broadcast, empty
+from numeric import array as narray
+import sys
+
+__all__ = ['chararray']
+
+# special sub-class for character arrays (string and unicode_)
+# This adds equality testing and methods of str and unicode types
+# which operate on an element-by-element basis
+
+
+class chararray(ndarray):
+ def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
+ offset=0, strides=None, fortran=0):
+
+ if unicode:
+ dtype = unicode_
+ else:
+ dtype = string
+
+ if buffer is None:
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
+ fortran=fortran)
+ else:
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
+ buffer=buffer,
+ offset=offset, strides=strides,
+ fortran=fortran)
+ return self
+
+ def _richcmpfunc(self, other, op):
+ b = broadcast(self, other)
+ result = empty(b.shape, dtype=bool)
+ res = result.flat
+ for k, val in enumerate(b):
+ r1 = val[0].rstrip('\x00')
+ r2 = val[1]
+ res[k] = eval("r1 %s r2" % op, {'r1':r1,'r2':r2})
+ return result
+
+ # these should probably be moved to C
+ def __eq__(self, other):
+ return self._richcmpfunc(other, '==')
+
+ def __ne__(self, other):
+ return self._richcmpfunc(other, '!=')
+
+ def __ge__(self, other):
+ return self._richcmpfunc(other, '>=')
+
+ def __le__(self, other):
+ return self._richcmpfunc(other, '<=')
+
+ def __gt__(self, other):
+ return self._richcmpfunc(other, '>')
+
+ def __lt__(self, other):
+ return self._richcmpfunc(other, '<')
+
+ def __add__(self, other):
+ b = broadcast(self, other)
+ arr = b.iters[1].base
+ outitem = self.itemsize + arr.itemsize
+ result = chararray(b.shape, outitem, self.dtype is unicode_)
+ res = result.flat
+ for k, val in enumerate(b):
+ res[k] = (val[0] + val[1])
+ return result
+
+ def __radd__(self, other):
+ b = broadcast(other, self)
+ outitem = b.iters[0].base.itemsize + \
+ b.iters[1].base.itemsize
+ result = chararray(b.shape, outitem, self.dtype is unicode_)
+ res = result.flat
+ for k, val in enumerate(b):
+ res[k] = (val[0] + val[1])
+ return result
+
+ def __mul__(self, other):
+ b = broadcast(self, other)
+ arr = b.iters[1].base
+ if not issubclass(arr.dtype, integer):
+ raise ValueError, "Can only multiply by integers"
+ outitem = b.iters[0].base.itemsize * arr.max()
+ result = chararray(b.shape, outitem, self.dtype is unicode_)
+ res = result.flat
+ for k, val in enumerate(b):
+ res[k] = val[0]*val[1]
+ return result
+
+ def __rmul__(self, other):
+ b = broadcast(self, other)
+ arr = b.iters[1].base
+ if not issubclass(arr.dtype, integer):
+ raise ValueError, "Can only multiply by integers"
+ outitem = b.iters[0].base.itemsize * arr.max()
+ result = chararray(b.shape, outitem, self.dtype is unicode_)
+ res = result.flat
+ for k, val in enumerate(b):
+ res[k] = val[0]*val[1]
+ return result
+
+ def __mod__(self, other):
+ b = broadcast(self, other)
+ res = [None]*b.size
+ maxsize = -1
+ for k,val in enumerate(b):
+ newval = val[0] % val[1]
+ maxsize = max(len(newval), maxsize)
+ res[k] = newval
+ newarr = chararray(b.shape, maxsize, self.dtype is unicode_)
+ newarr[:] = res
+ return newarr
+
+ def __rmod__(self, other):
+ return NotImplemented
+
+ def _generalmethod(self, name, myiter):
+ res = [None]*myiter.size
+ maxsize = -1
+ for k, val in enumerate(myiter):
+ newval = []
+ for chk in val[1:]:
+ if chk.dtype is object_ and chk.item() is None:
+ break
+ newval.append(chk)
+ newitem = getattr(val[0],name)(*newval)
+ maxsize = max(len(newitem), maxsize)
+ res[k] = newitem
+ newarr = chararray(myiter.shape, maxsize, self.dtype is unicode_)
+ print res, maxsize
+ newarr[:] = res
+ return newarr
+
+ def _typedmethod(self, name, myiter, dtype):
+ result = empty(myiter.shape, dtype=dtype)
+ res = result.flat
+ for k, val in enumerate(myiter):
+ newval = []
+ for chk in val[1:]:
+ if chk.dtype is object_ and chk.item() is None:
+ break
+ newval.append(chk)
+ this_str = val[0].rstrip('\x00')
+ newitem = getattr(this_str,name)(*newval)
+ res[k] = newitem
+ return result
+
+ def _samemethod(self, name):
+ result = self.copy()
+ res = result.flat
+ for k, val in enumerate(self.flat):
+ res[k] = getattr(val, name)()
+ return result
+
+ def capitalize(self):
+ return self._samemethod('capitalize')
+
+ if sys.version[:3] >= '2.4':
+ def center(self, width, fillchar=' '):
+ return self._generalmethod('center',
+ broadcast(self, width, fillchar))
+ def ljust(self, width, fillchar=' '):
+ return self._generalmethod('ljust',
+ broadcast(self, width, fillchar))
+ def rjust(self, width, fillchar=' '):
+ return self._generalmethod('rjust',
+ broadcast(self, width, fillchar))
+ def rsplit(self, sep=None, maxsplit=None):
+ return self._typedmethod('rsplit', broadcast(self, sep, maxsplit),
+ object)
+ else:
+ def ljust(self, width):
+ return self._generalmethod('ljust', broadcast(self, width))
+ def rjust(self, width):
+ return self._generalmethod('rjust', broadcast(self, width))
+ def center(self, width):
+ return self._generalmethod('center', broadcast(self, width))
+
+ def count(self, sub, start=None, end=None):
+ return self._typedmethod('count', broadcast(self, sub, start, end), int)
+
+ def decode(self,encoding=None,errors=None):
+ return self._generalmethod('decode', broadcast(self, encoding, errors))
+
+ def encode(self,encoding=None,errors=None):
+ return self._generalmethod('encode', broadcast(self, encoding, errors))
+
+ def endswith(self, suffix, start=None, end=None):
+ return self._typedmethod('endswith', broadcast(self, suffix, start, end), bool)
+
+ def expandtabs(self, tabsize=None):
+ return self._generalmethod('endswith', broadcast(self, tabsize))
+
+ def find(self, sub, start=None, end=None):
+ return self._typedmethod('find', broadcast(self, sub, start, end), int)
+
+ def index(self, sub, start=None, end=None):
+ return self._typedmethod('index', broadcast(self, sub, start, end), int)
+
+ def _ismethod(self, name):
+ result = empty(self.shape, dtype=bool)
+ res = result.flat
+ for k, val in enumerate(self.flat):
+ item = val.rstrip('\x00')
+ res[k] = getattr(item, name)()
+ return result
+
+ def isalnum(self):
+ return self._ismethod('isalnum')
+
+ def isalpha(self):
+ return self._ismethod('isalpha')
+
+ def isdigit(self):
+ return self._ismethod('isdigit')
+
+ def islower(self):
+ return self._ismethod('islower')
+
+ def isspace(self):
+ return self._ismethod('isspace')
+
+ def istitle(self):
+ return self._ismethod('istitle')
+
+ def isupper(self):
+ return self._ismethod('isupper')
+
+ def join(self, seq):
+ return self._generalmethod('join', broadcast(self, seq))
+
+ def lower(self):
+ return self._samemethod('lower')
+
+ def lstrip(self, chars):
+ return self._generalmethod('lstrip', broadcast(self, chars))
+
+ def replace(self, old, new, count=None):
+ return self._generalmethod('replace', broadcast(self, old, new, count))
+
+ def rfind(self, sub, start=None, end=None):
+ return self._typedmethod('rfind', broadcast(self, sub, start, end), int)
+
+ def rindex(self, sub, start=None, end=None):
+ return self._typedmethod('rindex', broadcast(self, sub, start, end), int)
+
+ def rstrip(self, chars=None):
+ return self._generalmethod('rstrip', broadcast(self, chars))
+
+ def split(self, sep=None, maxsplit=None):
+ return self._typedmethod('split', broadcast(self, sep, maxsplit), object)
+
+ def splitlines(self, keepends=None):
+ return self._typedmethod('splitlines', broadcast(self, keepends), object)
+
+ def startswith(self, prefix, start=None, end=None):
+ return self._typedmethod('startswith', broadcast(self, prefix, start, end), bool)
+
+ def strip(self, chars=None):
+ return self._generalmethod('strip', broadcast(self, chars))
+
+ def swapcase(self):
+ return self._samemethod('swapcase')
+
+ def title(self):
+ return self._samemethod('title')
+
+ def translate(self, table, deletechars=None):
+ if self.dtype is unicode_:
+ return self._generalmethod('translate', broadcast(self, table))
+ else:
+ return self._generalmethod('translate', broadcast(self, table, deletechars))
+
+ def upper(self):
+ return self._samemethod('upper')
+
+ def zfill(self, width):
+ return self._generalmethod('zfill', broadcast(self, width))
+
+
+def array(obj, itemsize=None, copy=True, unicode=False, fortran=False):
+
+ if isinstance(obj, chararray):
+ if itemsize is None:
+ itemsize = obj.itemsize
+ if copy or (itemsize != obj.itemsize) \
+ or (not unicode and obj.dtype == unicode_) \
+ or (unicode and obj.dtype == string):
+ return obj.astype("%s%d" % (obj.dtypechar, itemsize))
+ else:
+ return obj
+
+ if isinstance(obj, ndarray) and (obj.dtype in [unicode_, string]):
+ if itemsize is None:
+ itemsize = obj.itemsize
+ copied = 0
+ if unicode:
+ dtype = (unicode_, obj.itemsize)
+ if obj.dtype == string:
+ obj = obj.astype(dtype)
+ copied = 1
+ else:
+ dtype = (string, obj.itemsize)
+ if obj.dtype == unicode_:
+ obj = obj.astype(dtype)
+ copied = 1
+
+ if copy and not copied:
+ obj = obj.copy()
+
+ return chararray(obj.shape, itemsize=itemsize, unicode=unicode,
+ buffer=obj, offset=0,
+ fortran=obj.flags['FNC'])
+
+ if unicode: dtype = "U"
+ else: dtype = "S"
+
+ if itemsize is not None:
+ dtype += str(itemsize)
+
+ if isinstance(obj, str) or isinstance(obj, unicode):
+ if itemsize is None:
+ itemsize = len(obj)
+ shape = len(obj) / itemsize
+ return chararray(shape, itemsize=itemsize, unicode=unicode,
+ buffer=obj)
+
+ # default
+ val = narray(obj, dtype=dtype, fortran=fortran, subok=1)
+
+ return chararray(val.shape, itemsize, unicode, buffer=val,
+ strides=val.strides,
+ fortran=fortran)
+
+def asarray(obj, itemsize=None, unicode=False, fortran=False):
+ return array(obj, itemsize, copy=False,
+ unicode=unicode, fortran=fortran)
diff --git a/numpy/base/code_generators/array_api_order.txt b/numpy/base/code_generators/array_api_order.txt
new file mode 100644
index 000000000..154373592
--- /dev/null
+++ b/numpy/base/code_generators/array_api_order.txt
@@ -0,0 +1,62 @@
+# The functions in the scipy_core C API
+# They are defined here so that the order is set.
+PyArray_SetNumericOps
+PyArray_GetNumericOps
+PyArray_INCREF
+PyArray_XDECREF
+PyArray_SetStringFunction
+PyArray_DescrFromType
+PyArray_TypeObjectFromType
+PyArray_Zero
+PyArray_One
+PyArray_CastToType
+PyArray_CastTo
+PyArray_CanCastSafely
+PyArray_CanCastTo
+PyArray_ObjectType
+PyArray_DescrFromObject
+PyArray_ConvertToCommonType
+PyArray_DescrFromScalar
+PyArray_Size
+PyArray_Scalar
+PyArray_ToScalar
+PyArray_FromScalar
+PyArray_ScalarAsCtype
+PyArray_CastScalarToCtype
+PyArray_RegisterDataType
+PyArray_RegisterDescrForType
+PyArray_FromDims
+PyArray_FromDimsAndDataAndDescr
+PyArray_FromAny
+PyArray_EnsureArray
+PyArray_FromFile
+PyArray_FromString
+PyArray_FromBuffer
+PyArray_Return
+PyArray_GetField
+PyArray_SetField
+PyArray_Byteswap
+PyArray_Resize
+PyArray_NewCopy
+PyArray_CopyInto
+PyArray_ToList
+PyArray_ToFile
+PyArray_Dump
+PyArray_Dumps
+PyArray_ValidType
+PyArray_UpdateFlags
+PyArray_New
+PyArray_NewFromDescr
+PyArray_DescrNew
+PyArray_DescrNewFromType
+PyArray_GetPriority
+PyArray_IterNew
+PyArray_MultiIterNew
+PyArray_PyIntAsInt
+PyArray_PyIntAsIntp
+PyArray_Broadcast
+PyArray_FillObjectArray
+PyArray_FillWithScalar
+PyArray_CheckStrides
+PyArray_DescrNewByteorder
+PyArray_IterAllButAxis
diff --git a/numpy/base/code_generators/genapi.py b/numpy/base/code_generators/genapi.py
new file mode 100644
index 000000000..b70aa7a14
--- /dev/null
+++ b/numpy/base/code_generators/genapi.py
@@ -0,0 +1,224 @@
+import sys, os, re
+import md5
+
+API_FILES = ['arraymethods.c',
+ 'arrayobject.c',
+ 'arraytypes.inc.src',
+ 'multiarraymodule.c',
+ 'scalartypes.inc.src',
+ 'ufuncobject.c',
+ ]
+THIS_DIR = os.path.dirname(__file__)
+API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES]
+
+def remove_whitespace(s):
+ return ''.join(s.split())
+
+class Function(object):
+ def __init__(self, name, return_type, args, doc=''):
+ self.name = name
+ self.return_type = return_type
+ self.args = args
+ self.doc = doc
+
+ def _format_arg(self, (typename, name)):
+ if typename.endswith('*'):
+ return typename + name
+ else:
+ return typename + ' ' + name
+
+ def argtypes_string(self):
+ if not self.args:
+ return 'void'
+ argstr = ', '.join([a[0] for a in self.args])
+ return argstr
+
+ def __str__(self):
+ argstr = ', '.join([self._format_arg(a) for a in self.args])
+ if self.doc:
+ doccomment = '/* %s */\n' % self.doc
+ else:
+ doccomment = ''
+ return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr)
+
+ def api_hash(self):
+ m = md5.new()
+ m.update(remove_whitespace(self.return_type))
+ m.update('\000')
+ m.update(self.name)
+ m.update('\000')
+ for typename, name in self.args:
+ m.update(remove_whitespace(typename))
+ m.update('\000')
+ return m.hexdigest()[:8]
+
+class ParseError(Exception):
+ def __init__(self, filename, lineno, msg):
+ self.filename = filename
+ self.lineno = lineno
+ self.msg = msg
+
+ def __str__(self):
+ return '%s:%s:%s' % (self.filename, self.lineno, self.msg)
+
+def skip_brackets(s, lbrac, rbrac):
+ count = 0
+ for i, c in enumerate(s):
+ if c == lbrac:
+ count += 1
+ elif c == rbrac:
+ count -= 1
+ if count == 0:
+ return i
+ raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s))
+
+def split_arguments(argstr):
+ arguments = []
+ bracket_counts = {'(': 0, '[': 0}
+ current_argument = []
+ state = 0
+ i = 0
+ def finish_arg():
+ if current_argument:
+ argstr = ''.join(current_argument).strip()
+ m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr)
+ if m:
+ typename = m.group(1).strip()
+ name = m.group(3)
+ else:
+ typename = argstr
+ name = ''
+ arguments.append((typename, name))
+ del current_argument[:]
+ while i < len(argstr):
+ c = argstr[i]
+ if c == ',':
+ finish_arg()
+ elif c == '(':
+ p = skip_brackets(argstr[i:], '(', ')')
+ current_argument += argstr[i:i+p]
+ i += p-1
+ else:
+ current_argument += c
+ i += 1
+ finish_arg()
+ return arguments
+
+
+def find_functions(filename, tag='API'):
+ fo = open(filename, 'r')
+ functions = []
+ return_type = None
+ function_name = None
+ function_args = []
+ doclist = []
+ SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = range(5)
+ state = SCANNING
+ tagcomment = '/*' + tag
+ for lineno, line in enumerate(fo):
+ try:
+ line = line.strip()
+ if state == SCANNING:
+ if line.startswith(tagcomment):
+ if line.endswith('*/'):
+ state = STATE_RETTYPE
+ else:
+ state = STATE_DOC
+ elif state == STATE_DOC:
+ if line.startswith('*/'):
+ state = STATE_RETTYPE
+ else:
+ line = line.lstrip(' *')
+ doclist.append(line)
+ elif state == STATE_RETTYPE: #first line of declaration with return type
+ m = re.match(r'static\s+(.*)$', line)
+ if m:
+ line = m.group(1)
+ return_type = line
+ state = STATE_NAME
+ elif state == STATE_NAME: # second line, with function name
+ m = re.match(r'(\w+)\s*\(', line)
+ if m:
+ function_name = m.group(1)
+ else:
+ raise ParseError(filename, lineno+1, 'could not find function name')
+ function_args.append(line[m.end():])
+ state = STATE_ARGS
+ elif state == STATE_ARGS:
+ if line.startswith('{'): # finished
+ fargs_str = ' '.join(function_args).rstrip(' )')
+ fargs = split_arguments(fargs_str)
+ f = Function(function_name, return_type, fargs,
+ ' '.join(doclist))
+ functions.append(f)
+ return_type = None
+ function_name = None
+ function_args = []
+ doclist = []
+ state = 0
+ else:
+ function_args.append(line)
+ except:
+ print filename, lineno+1
+ raise
+ fo.close()
+ return functions
+
+def read_order(order_file):
+ fo = open(order_file, 'r')
+ order = {}
+ i = 0
+ for line in fo:
+ line = line.strip()
+ if not line.startswith('#'):
+ order[line] = i
+ i += 1
+ fo.close()
+ return order
+
+def get_api_functions(tagname, order_file):
+ if not os.path.exists(order_file):
+ order_file = os.path.join(THIS_DIR, order_file)
+ order = read_order(order_file)
+ functions = []
+ for f in API_FILES:
+ functions.extend(find_functions(f, tagname))
+ dfunctions = []
+ for func in functions:
+ o = order[func.name]
+ dfunctions.append( (o, func) )
+ dfunctions.sort()
+ return [a[1] for a in dfunctions]
+
+def add_api_list(offset, APIname, api_list,
+ module_list, extension_list, init_list):
+ """Add the API function declerations to the appropiate lists for use in
+ the headers.
+ """
+ for k, func in enumerate(api_list):
+ num = offset + k
+ astr = "static %s %s \\\n (%s);" % \
+ (func.return_type, func.name, func.argtypes_string())
+ module_list.append(astr)
+ astr = "#define %s \\\n (*(%s (*)(%s)) \\\n"\
+ " %s[%d])" % (func.name,func.return_type,
+ func.argtypes_string(), APIname, num)
+ extension_list.append(astr)
+ astr = " (void *) %s," % func.name
+ init_list.append(astr)
+
+
+def main():
+ tagname = sys.argv[1]
+ order_file = sys.argv[2]
+ functions = get_api_functions(tagname, order_file)
+ m = md5.new(tagname)
+ for func in functions:
+ print func
+ ah = func.api_hash()
+ m.update(ah)
+ print hex(int(ah,16))
+ print hex(int(m.hexdigest()[:8],16))
+
+if __name__ == '__main__':
+ main()
diff --git a/numpy/base/code_generators/generate_array_api.py b/numpy/base/code_generators/generate_array_api.py
new file mode 100644
index 000000000..e4ec8b2e1
--- /dev/null
+++ b/numpy/base/code_generators/generate_array_api.py
@@ -0,0 +1,136 @@
+import os
+import genapi
+
+types = ['Generic','Numeric','Integer','SignedInteger','UnsignedInteger',
+ 'Inexact',
+ 'Floating', 'ComplexFloating', 'Flexible', 'Character',
+ 'Bool','Byte','Short','Int', 'Long', 'LongLong', 'UByte', 'UShort',
+ 'UInt', 'ULong', 'ULongLong', 'Float', 'Double', 'LongDouble',
+ 'CFloat', 'CDouble', 'CLongDouble', 'Object', 'String', 'Unicode',
+ 'Void']
+
+h_template = r"""
+#ifdef _MULTIARRAYMODULE
+
+static PyTypeObject PyBigArray_Type;
+static PyTypeObject PyArray_Type;
+static PyTypeObject PyArrayDescr_Type;
+static PyTypeObject PyArrayIter_Type;
+static PyTypeObject PyArrayMapIter_Type;
+static PyTypeObject PyArrayMultiIter_Type;
+static int PyArray_NUMUSERTYPES=0;
+
+%s
+
+#else
+
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
+extern void **PyArray_API;
+#else
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+void **PyArray_API;
+#else
+static void **PyArray_API=NULL;
+#endif
+#endif
+
+#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[0])
+#define PyArray_Type (*(PyTypeObject *)PyArray_API[1])
+#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[2])
+#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[3])
+#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[4])
+#define PyArray_NUMUSERTYPES (*(int *)PyArray_API[5])
+
+%s
+
+#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
+static int
+import_array(void)
+{
+ PyObject *numpy = PyImport_ImportModule("scipy.base.multiarray");
+ PyObject *c_api = NULL;
+ if (numpy == NULL) return -1;
+ c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
+ if (c_api == NULL) {Py_DECREF(numpy); return -1;}
+ if (PyCObject_Check(c_api)) {
+ PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
+ }
+ Py_DECREF(c_api);
+ Py_DECREF(numpy);
+ if (PyArray_API == NULL) return -1;
+ return 0;
+}
+#endif
+
+#endif
+"""
+
+
+c_template = r"""
+/* These pointers will be stored in the C-object for use in other
+ extension modules
+*/
+
+void *PyArray_API[] = {
+ (void *) &PyBigArray_Type,
+ (void *) &PyArray_Type,
+ (void *) &PyArrayDescr_Type,
+ (void *) &PyArrayIter_Type,
+ (void *) &PyArrayMultiIter_Type,
+ (int *) &PyArray_NUMUSERTYPES,
+%s
+};
+"""
+
+def generate_api(output_dir):
+ objectapi_list = genapi.get_api_functions('OBJECT_API',
+ 'array_api_order.txt')
+ multiapi_list = genapi.get_api_functions('MULTIARRAY_API',
+ 'multiarray_api_order.txt')
+ # API fixes for __arrayobject_api.h
+
+ fixed = 6
+ numtypes = len(types) + fixed
+ numobject = len(objectapi_list) + numtypes
+ nummulti = len(multiapi_list)
+ numtotal = numobject + nummulti
+
+ module_list = []
+ extension_list = []
+ init_list = []
+
+ # setup types
+ for k, atype in enumerate(types):
+ num = fixed + k
+ astr = " (void *) &Py%sArrType_Type," % types[k]
+ init_list.append(astr)
+ astr = "static PyTypeObject Py%sArrType_Type;" % types[k]
+ module_list.append(astr)
+ astr = "#define Py%sArrType_Type (*(PyTypeObject *)PyArray_API[%d])" % \
+ (types[k], num)
+ extension_list.append(astr)
+
+ #setup object API
+ genapi.add_api_list(numtypes, 'PyArray_API', objectapi_list,
+ module_list, extension_list, init_list)
+
+ # setup multiarray module API
+ genapi.add_api_list(numobject, 'PyArray_API', multiapi_list,
+ module_list, extension_list, init_list)
+
+
+ # Write to header
+ fid = open(os.path.join(output_dir, '__multiarray_api.h'),'w')
+ s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
+ fid.write(s)
+ fid.close()
+
+ # Write to c-code
+ fid = open(os.path.join(output_dir,'__multiarray_api.c'),'w')
+ s = c_template % '\n'.join(init_list)
+ fid.write(s)
+ fid.close()
diff --git a/numpy/base/code_generators/generate_ufunc_api.py b/numpy/base/code_generators/generate_ufunc_api.py
new file mode 100644
index 000000000..59c808e36
--- /dev/null
+++ b/numpy/base/code_generators/generate_ufunc_api.py
@@ -0,0 +1,91 @@
+import os
+import genapi
+
+h_template = r"""
+#ifdef _UMATHMODULE
+
+static PyTypeObject PyUFunc_Type;
+
+%s
+
+#else
+
+#if defined(PY_UFUNC_UNIQUE_SYMBOL)
+#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC)
+extern void **PyUFunc_API;
+#else
+#if defined(PY_UFUNC_UNIQUE_SYMBOL)
+void **PyUFunc_API;
+#else
+static void **PyUFunc_API=NULL;
+#endif
+#endif
+
+#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0])
+
+%s
+
+static int
+import_ufunc(void)
+{
+ PyObject *numpy = PyImport_ImportModule("scipy.base.umath");
+ PyObject *c_api = NULL;
+
+ if (numpy == NULL) return -1;
+ c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
+ if (c_api == NULL) {Py_DECREF(numpy); return -1;}
+ if (PyCObject_Check(c_api)) {
+ PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api);
+ }
+ Py_DECREF(c_api);
+ Py_DECREF(numpy);
+ if (PyUFunc_API == NULL) return -1;
+ return 0;
+}
+
+#endif
+"""
+
+c_template = r"""
+/* These pointers will be stored in the C-object for use in other
+ extension modules
+*/
+
+void *PyUFunc_API[] = {
+ (void *) &PyUFunc_Type,
+%s
+};
+"""
+
+def generate_api(output_dir):
+ ufunc_api_list = genapi.get_api_functions('UFUNC_API',
+ 'ufunc_api_order.txt')
+
+ # API fixes for __arrayobject_api.h
+
+ fixed = 1
+ nummulti = len(ufunc_api_list)
+ numtotal = fixed + nummulti
+
+ module_list = []
+ extension_list = []
+ init_list = []
+
+ #setup object API
+ genapi.add_api_list(fixed, 'PyUFunc_API', ufunc_api_list,
+ module_list, extension_list, init_list)
+
+ # Write to header
+ fid = open(os.path.join(output_dir, '__ufunc_api.h'),'w')
+ s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
+ fid.write(s)
+ fid.close()
+
+ # Write to c-code
+ fid = open(os.path.join(output_dir, '__ufunc_api.c'),'w')
+ s = c_template % '\n'.join(init_list)
+ fid.write(s)
+ fid.close()
diff --git a/numpy/base/code_generators/generate_umath.py b/numpy/base/code_generators/generate_umath.py
new file mode 100644
index 000000000..16945d256
--- /dev/null
+++ b/numpy/base/code_generators/generate_umath.py
@@ -0,0 +1,500 @@
+
+import string
+import re
+
+Zero = "PyUFunc_Zero"
+One = "PyUFunc_One"
+None_ = "PyUFunc_None"
+#each entry in defdict is
+
+#name: [string of chars for which it is defined,
+# string of characters using func interface,
+# tuple of strings giving funcs for data,
+# (in, out), or (instr, outstr) giving the signature as character codes,
+# identity,
+# docstring,
+# output specification (optional)
+# ]
+
+all = '?bBhHiIlLqQfdgFDGO'
+ints = 'bBhHiIlLqQ'
+intsO = ints + 'O'
+bintsO = '?'+ints+'O'
+flts = 'fdg'
+fltsO = flts+'O'
+fltsM = flts+'M'
+cmplx = 'FDG'
+cmplxO = cmplx+'O'
+cmplxM = cmplx+'M'
+noint = flts+cmplx+'O'
+nointM = flts+cmplx+'M'
+allM = '?'+ints+flts+cmplxM
+nobool = all[1:]
+nobool_or_obj = all[1:-1]
+intflt = ints+flts
+nocmplx = '?'+ints+flts
+nocmplxO = nocmplx+'O'
+nocmplxM = nocmplx+'M'
+noobj = all[:-1]
+
+defdict = {
+'add': [all,'O',("PyNumber_Add",),
+ (2,1), Zero,
+ "adds the arguments elementwise."
+ ],
+'subtract' : [all,'O',("PyNumber_Subtract",),
+ (2,1), Zero,
+ "subtracts the arguments elementwise."
+ ],
+'multiply' : [all,cmplxO,
+ ("prod,"*3,"PyNumber_Multiply",),
+ (2,1), One,
+ "multiplies the arguments elementwise."
+ ],
+'divide' : [nobool,cmplxO,
+ ("quot,"*3,"PyNumber_Divide",),
+ (2,1), One,
+ "divides the arguments elementwise."
+ ],
+'floor_divide' : [nobool, cmplxO,
+ ("floor_quot,"*3,
+ "PyNumber_FloorDivide"),
+ (2,1), One,
+ "floor divides the arguments elementwise."
+ ],
+'true_divide' : [nobool, cmplxO,
+ ("quot,"*3,"PyNumber_TrueDivide"),
+ (2,1), One,
+ "true divides the arguments elementwise.",
+ 'f'*4+'d'*6+flts+cmplxO
+ ],
+'conjugate' : [nobool_or_obj, 'M',
+ ('"conjugate"',),
+ (1,1), None,
+ "takes the conjugate of x elementwise."
+ ],
+
+'fmod' : [intflt,fltsM,
+ ("fmod,"*3, "fmod"),
+ (2,1), Zero,
+ "computes (C-like) x1 % x2 elementwise."
+ ],
+'power' : [nobool,noint,
+ ("pow,"*6,
+ "PyNumber_Power"),
+ (2,1), One,
+ "computes x1**x2 elementwise."
+ ],
+'absolute' : [all,'O',
+ ("PyNumber_Absolute",),
+ (1,1), None,
+ "takes |x| elementwise.",
+ nocmplx+fltsO
+ ],
+'negative' : [all,cmplxO,
+ ("neg,"*3,"PyNumber_Negative"),
+ (1,1), None,
+ "determines -x elementwise",
+ ],
+'greater' : [all,'',(),(2,1), None,
+ "returns elementwise x1 > x2 in a bool array.",
+ '?'*len(all)
+ ],
+'greater_equal' : [all,'',(),(2,1), None,
+ "returns elementwise x1 >= x2 in a bool array.",
+ '?'*len(all)
+ ],
+'less' : [all,'',(),(2,1), None,
+ "returns elementwise x1 < x2 in a bool array.",
+ '?'*len(all)
+ ],
+'less_equal' : [all,'',(),(2,1), None,
+ "returns elementwise x1 <= x2 in a bool array",
+ '?'*len(all)
+ ],
+'equal' : [all, '', (), (2,1), None,
+ "returns elementwise x1 == x2 in a bool array",
+ '?'*len(all)
+ ],
+'not_equal' : [all, '', (), (2,1), None,
+ "returns elementwise x1 |= x2",
+ '?'*len(all)
+ ],
+'logical_and': [allM,'M',('"logical_and"',),
+ (2,1), One,
+ "returns x1 and x2 elementwise.",
+ '?'*len(nocmplxM+cmplx)
+ ],
+'logical_or': [allM,'M',('"logical_or"',),
+ (2,1), Zero,
+ "returns x1 or x2 elementwise.",
+ '?'*len(nocmplxM+cmplx)
+ ],
+'logical_xor': [allM, 'M', ('"logical_xor"',),
+ (2,1), None,
+ "returns x1 xor x2 elementwise.",
+ '?'*len(nocmplxM+cmplx)
+ ],
+'logical_not' : [allM, 'M', ('"logical_not"',),
+ (1,1), None,
+ "returns not x elementwise.",
+ '?'*len(nocmplxM+cmplx)
+ ],
+'maximum' : [noobj,'',(),
+ (2,1), None,
+ "returns maximum (if x1 > x2: x1; else: x2) elementwise."],
+'minimum' : [noobj,'',(),
+ (2,1), None,
+ "returns minimum (if x1 < x2: x1; else: x2) elementwise"],
+'bitwise_and' : [bintsO,'O',("PyNumber_And",),
+ (2,1), One,
+ "computes x1 & x2 elementwise."],
+'bitwise_or' : [bintsO, 'O', ("PyNumber_Or",),
+ (2,1), Zero,
+ "computes x1 | x2 elementwise."],
+'bitwise_xor' : [bintsO, 'O', ("PyNumber_Xor",),
+ (2,1), None,
+ "computes x1 ^ x2 elementwise."],
+'invert' : [bintsO,'O', ("PyNumber_Invert",),
+ (1,1), None,
+ "computes ~x (bit inversion) elementwise."
+ ],
+'left_shift' : [intsO, 'O', ("PyNumber_Lshift",),
+ (2,1), None,
+ "computes x1 << x2 (x1 shifted to left by x2 bits) elementwise."
+ ],
+'right_shift' : [intsO, 'O', ("PyNumber_Rshift",),
+ (2,1), None,
+ "computes x1 >> x2 (x1 shifted to right by x2 bits) elementwise."
+ ],
+'arccos' : [nointM, nointM,
+ ("acos,"*6, '"arccos"'),
+ (1, 1), None,
+ "inverse cosine elementwise."
+ ],
+'arcsin': [nointM, nointM,
+ ("asin,"*6, '"arcsin"'),
+ (1, 1), None,
+ "inverse sine elementwise."
+ ],
+'arctan': [nointM, nointM,
+ ("atan,"*6, '"arctan"'),
+ (1, 1), None,
+ "inverse tangent elementwise."
+ ],
+'arccosh' : [nointM, nointM,
+ ("acosh,"*6, '"arccosh"'),
+ (1, 1), None,
+ "inverse hyperbolic cosine elementwise."
+ ],
+'arcsinh': [nointM, nointM,
+ ("asinh,"*6, '"arcsinh"'),
+ (1, 1), None,
+ "inverse hyperbolic sine elementwise."
+ ],
+'arctanh': [nointM, nointM,
+ ("atanh,"*6, '"arctanh"'),
+ (1, 1), None,
+ "inverse hyperbolic tangent elementwise."
+ ],
+'cos': [nointM, nointM,
+ ("cos,"*6, '"cos"'),
+ (1, 1), None,
+ "cosine elementwise."
+ ],
+'sin': [nointM, nointM,
+ ("sin,"*6, '"sin"'),
+ (1, 1), None,
+ "sine elementwise."
+ ],
+'tan': [nointM, nointM,
+ ("tan,"*6, '"tan"'),
+ (1, 1), None,
+ "tangent elementwise."
+ ],
+'cosh': [nointM, nointM,
+ ("cosh,"*6, '"cosh"'),
+ (1, 1), None,
+ "hyperbolic cosine elementwise."
+ ],
+'sinh': [nointM, nointM,
+ ("sinh,"*6, '"sinh"'),
+ (1, 1), None,
+ "hyperbolic sine elementwise."
+ ],
+'tanh': [nointM, nointM,
+ ("tanh,"*6, '"tanh"'),
+ (1, 1), None,
+ "hyperbolic tangent elementwise."
+ ],
+'exp' : [nointM, nointM,
+ ("exp,"*6, '"exp"'),
+ (1, 1), None,
+ "e**x elementwise."
+ ],
+'log' : [nointM, nointM,
+ ("log,"*6, '"log"'),
+ (1, 1), None,
+ "logarithm base e elementwise."
+ ],
+'log10' : [nointM, nointM,
+ ("log10,"*6, '"log10"'),
+ (1, 1), None,
+ "logarithm base 10 elementwise."
+ ],
+'sqrt' : [nointM, nointM,
+ ("sqrt,"*6, '"sqrt"'),
+ (1,1), None,
+ "square-root elementwise."
+ ],
+'ceil' : [fltsM, fltsM,
+ ("ceil,"*3, '"ceil"'),
+ (1,1), None,
+ "elementwise smallest integer >= x."
+ ],
+'fabs' : [fltsM, fltsM,
+ ("fabs,"*3, '"fabs"'),
+ (1,1), None,
+ "absolute values."
+ ],
+'floor' : [fltsM, fltsM,
+ ("floor,"*3, '"floor"'),
+ (1,1), None,
+ "elementwise largest integer <= x"
+ ],
+'arctan2' : [fltsM, fltsM,
+ ("atan2,"*3, '"arctan2"'),
+ (2,1), None,
+ "a safe and correct arctan(x1/x2)"
+ ],
+
+'remainder' : [intflt, 'O',
+ ("PyObject_Remainder"),
+ (2,1), None,
+ "computes x1-n*x2 where n is floor(x1 / x2)"],
+
+'hypot' : [fltsM, fltsM,
+ ("hypot,"*3, '"hypot"'),
+ (2,1), None,
+ "sqrt(x1**2 + x2**2) elementwise"
+ ],
+
+'isnan' : [flts+cmplx, '',
+ (), (1,1), None,
+ "returns True where x is Not-A-Number",
+ '?'*len(flts+cmplx)
+ ],
+
+'isinf' : [flts+cmplx, '',
+ (), (1,1), None,
+ "returns True where x is +inf or -inf",
+ '?'*len(flts+cmplx)
+ ],
+
+'isfinite' : [flts+cmplx, '',
+ (), (1,1), None,
+ "returns True where x is finite",
+ '?'*len(flts+cmplx)
+ ],
+
+'signbit' : [flts,'',
+ (),(1,1),None,
+ "returns True where signbit of x is set (x<0).",
+ '?'*len(flts)
+ ],
+
+'modf' : [flts,'',
+ (),(1,2),None,
+ "breaks x into fractional (y1) and integral (y2) parts.\\n\\n Each output has the same sign as the input."
+ ]
+}
+
+
+def indent(st,spaces):
+ indention = ' '*spaces
+ indented = indention + string.replace(st,'\n','\n'+indention)
+ # trim off any trailing spaces
+ indented = re.sub(r' +$',r'',indented)
+ return indented
+
+chartoname = {'?': 'bool',
+ 'b': 'byte',
+ 'B': 'ubyte',
+ 'h': 'short',
+ 'H': 'ushort',
+ 'i': 'int',
+ 'I': 'uint',
+ 'l': 'long',
+ 'L': 'ulong',
+ 'q': 'longlong',
+ 'Q': 'ulonglong',
+ 'f': 'float',
+ 'd': 'double',
+ 'g': 'longdouble',
+ 'F': 'cfloat',
+ 'D': 'cdouble',
+ 'G': 'clongdouble',
+ 'O': 'OBJECT',
+ 'M': 'OBJECT',
+ }
+
+chartotype1 = {'f': 'f_f',
+ 'd': 'd_d',
+ 'g': 'g_g',
+ 'F': 'F_F',
+ 'D': 'D_D',
+ 'G': 'G_G',
+ 'O': 'O_O',
+ 'M': 'O_O_method'}
+
+chartotype2 = {'f': 'ff_f',
+ 'd': 'dd_d',
+ 'g': 'gg_g',
+ 'F': 'FF_F',
+ 'D': 'DD_D',
+ 'G': 'GG_G',
+ 'O': 'OO_O',
+ 'M': 'O_O_method'}
+#for each name
+# 1) create functions, data, and signature
+# 2) fill in functions and data in InitOperators
+# 3) add function.
+
+def make_arrays(funcdict):
+ # functions array contains an entry for every type implemented
+ # NULL should be placed where PyUfunc_ style function will be filled in later
+ #
+ code1list = []
+ code2list = []
+ for name, vals in funcdict.iteritems():
+ funclist = []
+ datalist = []
+ siglist = []
+ k=0;
+ sub=0;
+ numin, numout = vals[3]
+
+ if numin > 1:
+ thedict = chartotype2 # two inputs and one output
+ else:
+ thedict = chartotype1 # one input and one output
+
+ instr = ''.join([x*numin for x in list(vals[0])])
+ if len(vals) > 6:
+ if isinstance(vals[6],type('')):
+ outstr = vals[6]
+ else: # a tuple specifying input signature, output signature
+ instr, outstr = vals[6]
+ else:
+ outstr = ''.join([x*numout for x in list(vals[0])])
+
+ _valslen = len(vals[0])
+ assert _valslen*numout == len(outstr), "input/output signature doesn't match"
+ assert len(instr) == _valslen*numin, "input/output signature doesn't match"
+
+ for char in vals[0]:
+ if char in vals[1]: # use generic function-based interface
+ funclist.append('NULL')
+ astr = '%s_functions[%d] = PyUFunc_%s;' % \
+ (name, k, thedict[char])
+ code2list.append(astr)
+ thisfunc = vals[2][sub]
+ if len(thisfunc) > 8 and thisfunc[:8] == "PyNumber":
+ astr = '%s_data[%d] = (void *) %s;' % \
+ (name, k, thisfunc)
+ code2list.append(astr)
+ datalist.append('(void *)NULL');
+ else:
+ datalist.append('(void *)%s' % thisfunc)
+ sub += 1
+ else: # individual wrapper interface
+ datalist.append('(void *)NULL');
+ funclist.append('%s_%s' % (chartoname[char].upper(), name))
+
+ insubstr = instr[numin*k:numin*(k+1)]
+ outsubstr = outstr[numout*k:numout*(k+1)]
+ siglist.extend(['PyArray_%s' % chartoname[x].upper() for x in insubstr])
+ siglist.extend(['PyArray_%s' % chartoname[x].upper() for x in outsubstr])
+ k += 1
+ funcnames = ', '.join(funclist)
+ signames = ', '.join(siglist)
+ datanames = ', '.join(datalist)
+ code1list.append("static PyUFuncGenericFunction %s_functions[] = { %s };" \
+ % (name, funcnames))
+ code1list.append("static void * %s_data[] = { %s };" \
+ % (name, datanames))
+ code1list.append("static char %s_signatures[] = { %s };" \
+ % (name, signames))
+ return "\n".join(code1list),"\n".join(code2list)
+
+def make_ufuncs(funcdict):
+ code3list = []
+ for name, vals in funcdict.items():
+ mlist = []
+ mlist.append(\
+r"""f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d,
+ %d, %d, %s, "%s",
+ "%s", 0);""" % (name,name,name,len(vals[0]),
+ vals[3][0], vals[3][1], vals[4],
+ name, vals[5]))
+ mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);"""%name)
+ mlist.append(r"""Py_DECREF(f);""")
+ code3list.append('\n'.join(mlist))
+ return '\n'.join(code3list)
+
+
+def convert_vals(funcdict):
+ for name, vals in funcdict.iteritems():
+ if vals[4] is None:
+ vals[4] = None_
+ vals2 = vals[2]
+ if len(vals2) > 0:
+ alist = vals2[0].split(',')
+ if len(alist) == 4:
+ a = alist[0]
+ if 'f' in vals[1]:
+ newlist = [ a+'f', a, a+'l']
+ else:
+ newlist = ['nc_'+a+'f', 'nc_'+a, 'nc_'+a+'l']
+ elif len(alist) == 7:
+ a = alist[0]
+ newlist = [a+'f', a, a+'l','nc_'+a+'f', 'nc_'+a, 'nc_'+a+'l']
+ else:
+ newlist = alist
+ newlist = newlist + list(vals2[1:])
+ vals[2] = tuple(newlist)
+ funcdict[name] = vals
+
+
+def make_code(funcdict,filename):
+ convert_vals(funcdict)
+ code1, code2 = make_arrays(funcdict)
+ code3 = make_ufuncs(funcdict)
+ code2 = indent(code2,4)
+ code3 = indent(code3,4)
+ code = r"""
+
+/** Warning this file is autogenerated!!!
+
+ Please make changes to the code generator program (%s)
+**/
+
+%s
+
+static void
+InitOperators(PyObject *dictionary) {
+ PyObject *f;
+
+%s
+%s
+}
+""" % (filename, code1, code2, code3)
+ return code;
+
+
+if __name__ == "__main__":
+ filename = __file__
+ fid = open('__umath_generated.c','w')
+ code = make_code(defdict, filename)
+ fid.write(code)
+ fid.close()
diff --git a/numpy/base/code_generators/multiarray_api_order.txt b/numpy/base/code_generators/multiarray_api_order.txt
new file mode 100644
index 000000000..8dbb86882
--- /dev/null
+++ b/numpy/base/code_generators/multiarray_api_order.txt
@@ -0,0 +1,66 @@
+PyArray_Transpose
+PyArray_Take
+PyArray_Put
+PyArray_PutMask
+PyArray_Repeat
+PyArray_Choose
+PyArray_Sort
+PyArray_ArgSort
+PyArray_SearchSorted
+PyArray_ArgMax
+PyArray_ArgMin
+PyArray_Reshape
+PyArray_Newshape
+PyArray_Squeeze
+PyArray_View
+PyArray_SwapAxes
+PyArray_Max
+PyArray_Min
+PyArray_Ptp
+PyArray_Mean
+PyArray_Trace
+PyArray_Diagonal
+PyArray_Clip
+PyArray_Conjugate
+PyArray_Nonzero
+PyArray_Std
+PyArray_Sum
+PyArray_CumSum
+PyArray_Prod
+PyArray_CumProd
+PyArray_All
+PyArray_Any
+PyArray_Compress
+PyArray_Flatten
+PyArray_Ravel
+PyArray_MultiplyList
+PyArray_MultiplyIntList
+PyArray_GetPtr
+PyArray_CompareLists
+PyArray_AsCArray
+PyArray_As1D
+PyArray_As2D
+PyArray_Free
+PyArray_Converter
+PyArray_IntpFromSequence
+PyArray_Concatenate
+PyArray_InnerProduct
+PyArray_MatrixProduct
+PyArray_CopyAndTranspose
+PyArray_Correlate
+PyArray_TypestrConvert
+PyArray_DescrConverter
+PyArray_DescrConverter2
+PyArray_IntpConverter
+PyArray_BufferConverter
+PyArray_AxisConverter
+PyArray_BoolConverter
+PyArray_ByteorderConverter
+PyArray_EquivTypes
+PyArray_Zeros
+PyArray_Empty
+PyArray_Where
+PyArray_Arange
+PyArray_ArangeObj
+PyArray_SortkindConverter
+PyArray_LexSort
diff --git a/numpy/base/code_generators/ufunc_api_order.txt b/numpy/base/code_generators/ufunc_api_order.txt
new file mode 100644
index 000000000..805765313
--- /dev/null
+++ b/numpy/base/code_generators/ufunc_api_order.txt
@@ -0,0 +1,26 @@
+PyUFunc_FromFuncAndData
+PyUFunc_RegisterLoopForType
+PyUFunc_GenericFunction
+PyUFunc_f_f_As_d_d
+PyUFunc_d_d
+PyUFunc_f_f
+PyUFunc_g_g
+PyUFunc_F_F_As_D_D
+PyUFunc_F_F
+PyUFunc_D_D
+PyUFunc_G_G
+PyUFunc_O_O
+PyUFunc_ff_f_As_dd_d
+PyUFunc_ff_f
+PyUFunc_dd_d
+PyUFunc_gg_g
+PyUFunc_FF_F_As_DD_D
+PyUFunc_DD_D
+PyUFunc_FF_F
+PyUFunc_GG_G
+PyUFunc_OO_O
+PyUFunc_O_O_method
+PyUFunc_On_Om
+PyUFunc_GetPyValues
+PyUFunc_checkfperr
+PyUFunc_clearfperr
diff --git a/numpy/base/convertcode.py b/numpy/base/convertcode.py
new file mode 100644
index 000000000..5c532b394
--- /dev/null
+++ b/numpy/base/convertcode.py
@@ -0,0 +1,147 @@
+
+# This module converts code written for Numeric to run with scipy.base
+
+# Makes the following changes:
+# * Converts typecharacters
+# * Changes import statements (warns of use of from Numeric import *)
+# * Changes import statements (using numerix) ...
+# * Makes search and replace changes to:
+# - .typecode()
+# - .iscontiguous()
+# - .byteswapped()
+# - .itemsize()
+# * Converts .flat to .ravel() except for .flat = xxx or .flat[xxx]
+# * Change typecode= to dtype=
+# * Eliminates savespace=xxx
+# * Replace xxx.spacesaver() with True
+# * Convert xx.savespace(?) to pass + ## xx.savespace(?)
+# #### -- not * Convert a.shape = ? to a.reshape(?)
+# * Prints warning for use of bool, int, float, copmlex, object, and unicode
+#
+
+__all__ = ['fromfile', 'fromstr']
+
+import sys
+import os
+import re
+import glob
+
+flatindex_re = re.compile('([.]flat(\s*?[[=]))')
+
+def replacetypechars(astr):
+# astr = astr.replace("'s'","'h'")
+# astr = astr.replace("'c'","'S1'")
+ astr = astr.replace("'b'","'B'")
+ astr = astr.replace("'1'","'b'")
+# astr = astr.replace("'w'","'H'")
+ astr = astr.replace("'u'","'I'")
+ return astr
+
+def changeimports(fstr, name, newname):
+ importstr = 'import %s' % name
+ importasstr = 'import %s as ' % name
+ fromstr = 'from %s import ' % name
+ fromall=0
+
+ fstr = fstr.replace(importasstr, 'import %s as ' % newname)
+ fstr = fstr.replace(importstr, 'import %s as %s' % (newname,name))
+
+ ind = 0
+ Nlen = len(fromstr)
+ Nlen2 = len("from %s import " % newname)
+ while 1:
+ found = fstr.find(fromstr,ind)
+ if (found < 0):
+ break
+ ind = found + Nlen
+ if fstr[ind] == '*':
+ continue
+ fstr = "%sfrom %s import %s" % (fstr[:found], newname, fstr[ind:])
+ ind += Nlen2 - Nlen
+ return fstr, fromall
+
+def replaceattr(astr):
+ astr = astr.replace(".typecode()",".dtypechar")
+ astr = astr.replace(".iscontiguous()",".flags.contiguous")
+ astr = astr.replace(".byteswapped()",".byteswap()")
+ astr = astr.replace(".toscalar()", ".item()")
+ astr = astr.replace(".itemsize()",".itemsize")
+ # preserve uses of flat that should be o.k.
+ tmpstr = flatindex_re.sub("@@@@\\2",astr)
+ # replace other uses of flat
+ tmpstr = tmpstr.replace(".flat",".ravel()")
+ # put back .flat where it was valid
+ astr = tmpstr.replace("@@@@", ".flat")
+ return astr
+
+svspc = re.compile(r'(\S+\s*[(].+),\s*savespace\s*=.+\s*[)]')
+svspc2 = re.compile(r'([^,(\s]+[.]spacesaver[(][)])')
+svspc3 = re.compile(r'(\S+[.]savespace[(].*[)])')
+#shpe = re.compile(r'(\S+\s*)[.]shape\s*=[^=]\s*(.+)')
+def replaceother(astr):
+ astr = astr.replace("typecode=","dtype=")
+ astr = astr.replace("UserArray","ndarray")
+ astr = svspc.sub('\\1)',astr)
+ astr = svspc2.sub('True',astr)
+ astr = svspc3.sub('pass ## \\1', astr)
+ #astr = shpe.sub('\\1=\\1.reshape(\\2)', astr)
+ return astr
+
+import datetime
+def fromstr(filestr):
+ filestr = replacetypechars(filestr)
+ filestr, fromall1 = changeimports(filestr, 'Numeric', 'scipy')
+ filestr, fromall1 = changeimports(filestr, 'multiarray',
+ 'scipy.base.multiarray')
+ filestr, fromall1 = changeimports(filestr, 'umath',
+ 'scipy.base.umath')
+ filestr, fromall1 = changeimports(filestr, 'Precision', 'scipy.base')
+ filestr, fromall2 = changeimports(filestr, 'numerix', 'scipy.base')
+ filestr, fromall3 = changeimports(filestr, 'scipy_base', 'scipy.base')
+ filestr, fromall3 = changeimports(filestr, 'MLab', 'scipy.base.mlab')
+ filestr, fromall3 = changeimports(filestr, 'LinearAlgebra', 'scipy.corelinalg')
+ filestr, fromall3 = changeimports(filestr, 'RNG', 'scipy.random')
+ filestr, fromall3 = changeimports(filestr, 'RandomArray', 'scipy.random')
+ filestr, fromall3 = changeimports(filestr, 'FFT', 'scipy.corefft')
+ filestr, fromall3 = changeimports(filestr, 'MA', 'scipy.base.ma')
+ fromall = fromall1 or fromall2 or fromall3
+ filestr = replaceattr(filestr)
+ filestr = replaceother(filestr)
+ today = datetime.date.today().strftime('%b %d, %Y')
+ name = os.path.split(sys.argv[0])[-1]
+ filestr = '## Automatically adapted for '\
+ 'scipy %s by %s\n\n%s' % (today, name, filestr)
+ return filestr
+
+def makenewfile(name, filestr):
+ fid = file(name, 'w')
+ fid.write(filestr)
+ fid.close()
+
+def getandcopy(name):
+ fid = file(name)
+ filestr = fid.read()
+ fid.close()
+ base, ext = os.path.splitext(name)
+ makenewfile(base+'.orig', filestr)
+ return filestr
+
+def fromfile(filename):
+ filestr = getandcopy(filename)
+ filestr = fromstr(filestr)
+ makenewfile(filename, filestr)
+
+def fromargs(args):
+ filename = args[1]
+ fromfile(filename)
+
+def convertall(direc=''):
+ files = glob.glob(os.path.join(direc,'*.py'))
+ for afile in files:
+ fromfile(afile)
+
+if __name__ == '__main__':
+ fromargs(sys.argv)
+
+
+
diff --git a/numpy/base/function_base.py b/numpy/base/function_base.py
new file mode 100644
index 000000000..60e4b4be0
--- /dev/null
+++ b/numpy/base/function_base.py
@@ -0,0 +1,815 @@
+
+l__all__ = ['logspace', 'linspace', 'round_',
+ 'select', 'piecewise', 'trim_zeros',
+ 'copy', 'iterable', 'base_repr', 'binary_repr',
+ 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
+ 'unique', 'extract', 'insert', 'nansum', 'nanmax', 'nanargmax',
+ 'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
+ 'histogram', 'bincount', 'digitize', 'cov', 'corrcoef', 'msort',
+ 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 'blackman',
+ 'kaiser', 'trapz'
+ ]
+
+import types
+import math
+import numeric as _nx
+from numeric import ones, zeros, arange, concatenate, array, asarray, empty
+from numeric import ScalarType, dot, where, newaxis
+from umath import pi, multiply, add, arctan2, maximum, minimum, frompyfunc, \
+ isnan, absolute, cos, less_equal, sqrt, sin, mod
+from oldnumeric import ravel, nonzero, choose, \
+ sometrue, alltrue, reshape, any, all, typecodes, ArrayType, squeeze,\
+ sort
+from type_check import ScalarType, isscalar
+from shape_base import atleast_1d
+from twodim_base import diag
+from _compiled_base import digitize, bincount, _insert
+from ufunclike import sign
+
+_lkup = {'0':'000',
+ '1':'001',
+ '2':'010',
+ '3':'011',
+ '4':'100',
+ '5':'101',
+ '6':'110',
+ '7':'111',
+ 'L':''}
+
+def binary_repr(num):
+ """Return the binary representation of the input number as a string.
+
+ This is equivalent to using base_repr with base 2, but about 25x
+ faster.
+ """
+ ostr = oct(num)
+ bin = ''
+ for ch in ostr[1:]:
+ bin += _lkup[ch]
+ ind = 0
+ while bin[ind] == '0':
+ ind += 1
+ return bin[ind:]
+
+def base_repr (number, base=2, padding=0):
+ """Return the representation of a number in any given base.
+ """
+ chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+
+ lnb = math.log(base)
+ res = padding*chars[0]
+ if number == 0:
+ return res + chars[0]
+ exponent = int (math.log (number)/lnb)
+ while(exponent >= 0):
+ term = long(base)**exponent
+ lead_digit = int(number / term)
+ res += chars[lead_digit]
+ number -= term*lead_digit
+ exponent -= 1
+ return res
+#end Fernando's utilities
+
+
+def linspace(start, stop, num=50, endpoint=True, retstep=False):
+ """Return evenly spaced numbers.
+
+ Return 'num' evenly spaced samples from 'start' to 'stop'. If
+ 'endpoint' is True, the last sample is 'stop'. If 'retstep' is
+ True then return the step value used.
+ """
+ num = int(num)
+ if num <= 0:
+ return array([])
+ if endpoint:
+ if num == 1:
+ return array([start])
+ step = (stop-start)/float((num-1))
+ else:
+ step = (stop-start)/float(num)
+ y = _nx.arange(0, num) * step + start
+ if retstep:
+ return y, step
+ else:
+ return y
+
+def logspace(start,stop,num=50,endpoint=True,base=10.0):
+ """Evenly spaced numbers on a logarithmic scale.
+
+ Computes int(num) evenly spaced exponents from start to stop.
+ If endpoint=True, then last exponent is stop.
+ Returns base**exponents.
+ """
+ y = linspace(start,stop,num=num,endpoint=endpoint)
+ return _nx.power(base,y)
+
+def iterable(y):
+ try: iter(y)
+ except: return 0
+ return 1
+
+def histogram(a, bins=10, range=None, normed=False):
+ a = asarray(a).ravel()
+ if not iterable(bins):
+ if range is None:
+ range = (a.min(), a.max())
+ mn, mx = [mi+0.0 for mi in range]
+ if mn == mx:
+ mn -= 0.5
+ mx += 0.5
+ bins = linspace(mn, mx, bins, endpoint=False)
+
+ n = sort(a).searchsorted(bins)
+ n = concatenate([n, [len(a)]])
+ n = n[1:]-n[:-1]
+
+ if normed:
+ db = bins[1] - bins[0]
+ return 1.0/(a.size*db) * n, bins
+ else:
+ return n, bins
+
+def average(a, axis=0, weights=None, returned=False):
+ """average(a, axis=0, weights=None, returned=False)
+
+ Average the array over the given axis. If the axis is None, average
+ over all dimensions of the array. Equivalent to a.mean(axis), but
+ with a default axis of 0 instead of None.
+
+ If an integer axis is given, this equals:
+ a.sum(axis) * 1.0 / len(a)
+
+ If axis is None, this equals:
+ a.sum(axis) * 1.0 / product(a.shape)
+
+ If weights are given, result is:
+ sum(a * weights) / sum(weights),
+ where the weights must have a's shape or be 1D with length the
+ size of a in the given axis. Integer weights are converted to
+ Float. Not specifying weights is equivalent to specifying
+ weights that are all 1.
+
+ If 'returned' is True, return a tuple: the result and the sum of
+ the weights or count of values. The shape of these two results
+ will be the same.
+
+ Raises ZeroDivisionError if appropriate. (The version in MA does
+ not -- it returns masked values).
+ """
+ if axis is None:
+ a = array(a).ravel()
+ if weights is None:
+ n = add.reduce(a)
+ d = len(a) * 1.0
+ else:
+ w = array(weights).ravel() * 1.0
+ n = add.reduce(multiply(a, w))
+ d = add.reduce(w)
+ else:
+ a = array(a)
+ ash = a.shape
+ if ash == ():
+ a.shape = (1,)
+ if weights is None:
+ n = add.reduce(a, axis)
+ d = ash[axis] * 1.0
+ if returned:
+ d = ones(n.shape) * d
+ else:
+ w = array(weights, copy=False) * 1.0
+ wsh = w.shape
+ if wsh == ():
+ wsh = (1,)
+ if wsh == ash:
+ n = add.reduce(a*w, axis)
+ d = add.reduce(w, axis)
+ elif wsh == (ash[axis],):
+ ni = ash[axis]
+ r = [newaxis]*ni
+ r[axis] = slice(None, None, 1)
+ w1 = eval("w["+repr(tuple(r))+"]*ones(ash, Float)")
+ n = add.reduce(a*w1, axis)
+ d = add.reduce(w1, axis)
+ else:
+ raise ValueError, 'averaging weights have wrong shape'
+
+ if not isinstance(d, ArrayType):
+ if d == 0.0:
+ raise ZeroDivisionError, 'zero denominator in average()'
+ if returned:
+ return n/d, d
+ else:
+ return n/d
+
+def asarray_chkfinite(a):
+ """Like asarray, but check that no NaNs or Infs are present.
+ """
+ a = asarray(a)
+ if (a.dtypechar in _nx.typecodes['AllFloat']) \
+ and (_nx.isnan(a).any() or _nx.isinf(a).any()):
+ raise ValueError, "array must not contain infs or NaNs"
+ return a
+
+
+
+
+def piecewise(x, condlist, funclist, *args, **kw):
+ """Return a piecewise-defined function.
+
+ x is the domain
+
+ condlist is a list of boolean arrays or a single boolean array
+ The length of the condition list must be n2 or n2-1 where n2
+ is the length of the function list. If len(condlist)==n2-1, then
+ an 'otherwise' condition is formed by |'ing all the conditions
+ and inverting.
+
+ funclist is a list of functions to call of length (n2).
+ Each function should return an array output for an array input
+ Each function can take (the same set) of extra arguments and
+ keyword arguments which are passed in after the function list.
+
+ The output is the same shape and type as x and is found by
+ calling the functions on the appropriate portions of x.
+
+ Note: This is similar to choose or select, except
+ the the functions are only evaluated on elements of x
+ that satisfy the corresponding condition.
+
+ The result is
+ |--
+ | f1(x) for condition1
+ y = --| f2(x) for condition2
+ | ...
+ | fn(x) for conditionn
+ |--
+
+ """
+ n2 = len(funclist)
+ if not isinstance(condlist, type([])):
+ condlist = [condlist]
+ n = len(condlist)
+ if n == n2-1: # compute the "otherwise" condition.
+ totlist = condlist[0]
+ for k in range(1, n):
+ totlist |= condlist
+ condlist.append(~totlist)
+ n += 1
+ if (n != n2):
+ raise ValueError, "function list and condition list must be the same"
+ y = empty(x.shape, x.dtype)
+ for k in range(n):
+ item = funclist[k]
+ if not callable(item):
+ y[condlist[k]] = item
+ else:
+ y[condlist[k]] = item(x[condlist[k]], *args, **kw)
+ return y
+
+def select(condlist, choicelist, default=0):
+ """ Return an array composed of different elements of choicelist
+ depending on the list of conditions.
+
+ condlist is a list of condition arrays containing ones or zeros
+
+ choicelist is a list of choice arrays (of the "same" size as the
+ arrays in condlist). The result array has the "same" size as the
+ arrays in choicelist. If condlist is [c0, ..., cN-1] then choicelist
+ must be of length N. The elements of the choicelist can then be
+ represented as [v0, ..., vN-1]. The default choice if none of the
+ conditions are met is given as the default argument.
+
+ The conditions are tested in order and the first one statisfied is
+ used to select the choice. In other words, the elements of the
+ output array are found from the following tree (notice the order of
+ the conditions matters):
+
+ if c0: v0
+ elif c1: v1
+ elif c2: v2
+ ...
+ elif cN-1: vN-1
+ else: default
+
+ Note that one of the condition arrays must be large enough to handle
+ the largest array in the choice list.
+ """
+ n = len(condlist)
+ n2 = len(choicelist)
+ if n2 != n:
+ raise ValueError, "list of cases must be same length as list of conditions"
+ choicelist.insert(0, default)
+ S = 0
+ pfac = 1
+ for k in range(1, n+1):
+ S += k * pfac * asarray(condlist[k-1])
+ if k < n:
+ pfac *= (1-asarray(condlist[k-1]))
+ # handle special case of a 1-element condition but
+ # a multi-element choice
+ if type(S) in ScalarType or max(asarray(S).shape)==1:
+ pfac = asarray(1)
+ for k in range(n2+1):
+ pfac = pfac + asarray(choicelist[k])
+ S = S*ones(asarray(pfac).shape)
+ return choose(S, tuple(choicelist))
+
+def _asarray1d(arr, copy=False):
+ """Ensure 1D array for one array.
+ """
+ if copy:
+ return asarray(arr).flatten()
+ else:
+ return asarray(arr).ravel()
+
+def copy(a):
+ """Return an array copy of the given object.
+ """
+ return array(a, copy=True)
+
+# Basic operations
+
+def gradient(f, *varargs):
+ """Calculate the gradient of an N-dimensional scalar function.
+
+ Uses central differences on the interior and first differences on boundaries
+ to give the same shape.
+
+ Inputs:
+
+ f -- An N-dimensional array giving samples of a scalar function
+
+ varargs -- 0, 1, or N scalars giving the sample distances in each direction
+
+ Outputs:
+
+ N arrays of the same shape as f giving the derivative of f with respect
+ to each dimension.
+ """
+ N = len(f.shape) # number of dimensions
+ n = len(varargs)
+ if n==0:
+ dx = [1.0]*N
+ elif n==1:
+ dx = [varargs[0]]*N
+ elif n==N:
+ dx = list(varargs)
+ else:
+ raise SyntaxError, "invalid number of arguments"
+
+ # use central differences on interior and first differences on endpoints
+
+ print dx
+ outvals = []
+
+ # create slice objects --- initially all are [:, :, ..., :]
+ slice1 = [slice(None)]*N
+ slice2 = [slice(None)]*N
+ slice3 = [slice(None)]*N
+
+ otype = f.dtypechar
+ if otype not in ['f', 'd', 'F', 'D']:
+ otype = 'd'
+
+ for axis in range(N):
+ # select out appropriate parts for this dimension
+ out = zeros(f.shape, f.dtypechar)
+ slice1[axis] = slice(1, -1)
+ slice2[axis] = slice(2, None)
+ slice3[axis] = slice(None, -2)
+ # 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
+ out[slice1] = (f[slice2] - f[slice3])/2.0
+ slice1[axis] = 0
+ slice2[axis] = 1
+ slice3[axis] = 0
+ # 1D equivalent -- out[0] = (f[1] - f[0])
+ out[slice1] = (f[slice2] - f[slice3])
+ slice1[axis] = -1
+ slice2[axis] = -1
+ slice3[axis] = -2
+ # 1D equivalent -- out[-1] = (f[-1] - f[-2])
+ out[slice1] = (f[slice2] - f[slice3])
+
+ # divide by step size
+ outvals.append(out / dx[axis])
+
+ # reset the slice object in this dimension to ":"
+ slice1[axis] = slice(None)
+ slice2[axis] = slice(None)
+ slice3[axis] = slice(None)
+
+ if N == 1:
+ return outvals[0]
+ else:
+ return outvals
+
+
+def diff(a, n=1, axis=-1):
+ """Calculate the nth order discrete difference along given axis.
+ """
+ if n==0:
+ return a
+ if n<0:
+ raise ValueError, 'order must be non-negative but got ' + `n`
+ a = asarray(a)
+ nd = len(a.shape)
+ slice1 = [slice(None)]*nd
+ slice2 = [slice(None)]*nd
+ slice1[axis] = slice(1, None)
+ slice2[axis] = slice(None, -1)
+ slice1 = tuple(slice1)
+ slice2 = tuple(slice2)
+ if n > 1:
+ return diff(a[slice1]-a[slice2], n-1, axis=axis)
+ else:
+ return a[slice1]-a[slice2]
+
+def angle(z, deg=0):
+ """Return the angle of the complex argument z.
+ """
+ if deg:
+ fact = 180/pi
+ else:
+ fact = 1.0
+ z = asarray(z)
+ if (issubclass(z.dtype, _nx.complexfloating)):
+ zimag = z.imag
+ zreal = z.real
+ else:
+ zimag = 0
+ zreal = z
+ return arctan2(zimag, zreal) * fact
+
+def unwrap(p, discont=pi, axis=-1):
+ """Unwrap radian phase p by changing absolute jumps greater than
+ 'discont' to their 2*pi complement along the given axis.
+ """
+ p = asarray(p)
+ nd = len(p.shape)
+ dd = diff(p, axis=axis)
+ slice1 = [slice(None, None)]*nd # full slices
+ slice1[axis] = slice(1, None)
+ ddmod = mod(dd+pi, 2*pi)-pi
+ _nx.putmask(ddmod, (ddmod==-pi) & (dd > 0), pi)
+ ph_correct = ddmod - dd;
+ _nx.putmask(ph_correct, abs(dd)<discont, 0)
+ up = array(p, copy=True, dtype='d')
+ up[slice1] = p[slice1] + ph_correct.cumsum(axis)
+ return up
+
+def sort_complex(a):
+ """ Sort 'a' as a complex array using the real part first and then
+ the imaginary part if the real part is equal (the default sort order
+ for complex arrays). This function is a wrapper ensuring a complex
+ return type.
+ """
+ b = array(a,copy=True)
+ b.sort()
+ if not issubclass(b.dtype, _nx.complexfloating):
+ if b.dtypechar in 'bhBH':
+ return b.astype('F')
+ elif b.dtypechar == 'g':
+ return b.astype('G')
+ else:
+ return b.astype('D')
+ else:
+ return b
+
+def trim_zeros(filt, trim='fb'):
+ """ Trim the leading and trailing zeros from a 1D array.
+
+ Example:
+ >>> import scipy
+ >>> a = array((0, 0, 0, 1, 2, 3, 2, 1, 0))
+ >>> scipy.trim_zeros(a)
+ array([1, 2, 3, 2, 1])
+ """
+ first = 0
+ trim = trim.upper()
+ if 'F' in trim:
+ for i in filt:
+ if i != 0.: break
+ else: first = first + 1
+ last = len(filt)
+ if 'B' in trim:
+ for i in filt[::-1]:
+ if i != 0.: break
+ else: last = last - 1
+ return filt[first:last]
+
+def unique(inseq):
+ """Return unique items from a 1-dimensional sequence.
+ """
+ # Dictionary setting is quite fast.
+ set = {}
+ for item in inseq:
+ set[item] = None
+ return asarray(set.keys())
+
+def extract(condition, arr):
+ """Return the elements of ravel(arr) where ravel(condition) is True
+ (in 1D).
+
+ Equivalent to compress(ravel(condition), ravel(arr)).
+ """
+ return _nx.take(ravel(arr), nonzero(ravel(condition)))
+
+def insert(arr, mask, vals):
+ """Similar to putmask arr[mask] = vals but the 1D array vals has the
+ same number of elements as the non-zero values of mask. Inverse of
+ extract.
+ """
+ return _insert(arr, mask, vals)
+
+def nansum(a, axis=-1):
+ """Sum the array over the given axis, treating NaNs as 0.
+ """
+ y = array(a)
+ if not issubclass(y.dtype, _nx.integer):
+ y[isnan(a)] = 0
+ return y.sum(axis)
+
+def nanmin(a, axis=-1):
+ """Find the minimium over the given axis, ignoring NaNs.
+ """
+ y = array(a)
+ if not issubclass(y.dtype, _nx.integer):
+ y[isnan(a)] = _nx.inf
+ return y.min(axis)
+
+def nanargmin(a, axis=-1):
+ """Find the indices of the minimium over the given axis ignoring NaNs.
+ """
+ y = array(a)
+ if not issubclass(y.dtype, _nx.integer):
+ y[isnan(a)] = _nx.inf
+ return y.argmin(axis)
+
+def nanmax(a, axis=-1):
+ """Find the maximum over the given axis ignoring NaNs.
+ """
+ y = array(a)
+ if not issubclass(y.dtype, _nx.integer):
+ y[isnan(a)] = -_nx.inf
+ return y.max(axis)
+
+def nanargmax(a, axis=-1):
+ """Find the maximum over the given axis ignoring NaNs.
+ """
+ y = array(a)
+ if not issubclass(y.dtype, _nx.integer):
+ y[isnan(a)] = -_nx.inf
+ return y.argmax(axis)
+
+def disp(mesg, device=None, linefeed=True):
+ """Display a message to the given device (default is sys.stdout)
+ with or without a linefeed.
+ """
+ if device is None:
+ import sys
+ device = sys.stdout
+ if linefeed:
+ device.write('%s\n' % mesg)
+ else:
+ device.write('%s' % mesg)
+ device.flush()
+ return
+
+class vectorize(object):
+ """
+ vectorize(somefunction, otypes=None, doc=None)
+ Generalized Function class.
+
+ Description:
+
+ Define a vectorized function which takes nested sequence
+ objects or scipy arrays as inputs and returns a
+ scipy array as output, evaluating the function over successive
+ tuples of the input arrays like the python map function except it uses
+ the broadcasting rules of scipy.
+
+ Input:
+
+ somefunction -- a Python function or method
+
+ Example:
+
+ def myfunc(a, b):
+ if a > b:
+ return a-b
+ else
+ return a+b
+
+ vfunc = vectorize(myfunc)
+
+ >>> vfunc([1, 2, 3, 4], 2)
+ array([3, 4, 1, 2])
+
+ """
+ def __init__(self, pyfunc, otypes='', doc=None):
+ try:
+ fcode = pyfunc.func_code
+ except AttributeError:
+ raise TypeError, "object is not a callable Python object"
+
+ self.thefunc = pyfunc
+ self.ufunc = None
+ self.nin = fcode.co_argcount
+ if pyfunc.func_defaults:
+ self.nin_wo_defaults = self.nin - len(pyfunc.func_defaults)
+ else:
+ self.nin_wo_defaults = self.nin
+ self.nout = None
+ if doc is None:
+ self.__doc__ = pyfunc.__doc__
+ else:
+ self.__doc__ = doc
+ if isinstance(otypes, types.StringType):
+ self.otypes=otypes
+ else:
+ raise ValueError, "output types must be a string"
+ for char in self.otypes:
+ if char not in typecodes['All']:
+ raise ValueError, "invalid typecode specified"
+ self.lastcallargs = 0
+
+ def __call__(self, *args):
+ # get number of outputs and output types by calling
+ # the function on the first entries of args
+ nargs = len(args)
+ if (nargs > self.nin) or (nargs < self.nin_wo_defaults):
+ raise ValueError, "mismatch between python function inputs"\
+ " and received arguments"
+ if self.nout is None or self.otypes == '':
+ newargs = []
+ for arg in args:
+ newargs.append(asarray(arg).flat[0])
+ theout = self.thefunc(*newargs)
+ if isinstance(theout, types.TupleType):
+ self.nout = len(theout)
+ else:
+ self.nout = 1
+ theout = (theout,)
+ if self.otypes == '':
+ otypes = []
+ for k in range(self.nout):
+ otypes.append(asarray(theout[k]).dtypechar)
+ self.otypes = ''.join(otypes)
+
+ if (self.ufunc is None) or (self.lastcallargs != nargs):
+ self.ufunc = frompyfunc(self.thefunc, nargs, self.nout)
+ self.lastcallargs = nargs
+
+ if self.nout == 1:
+ return self.ufunc(*args).astype(self.otypes[0])
+ else:
+ return tuple([x.astype(c) for x, c in zip(self.ufunc(*args), self.otypes)])
+
+
+def round_(a, decimals=0):
+ """Round 'a' to the given number of decimal places. Rounding
+ behaviour is equivalent to Python.
+
+ Return 'a' if the array is not floating point. Round both the real
+ and imaginary parts separately if the array is complex.
+ """
+ a = asarray(a)
+ if not issubclass(a.dtype, _nx.inexact):
+ return a
+ if issubclass(a.dtype, _nx.complexfloating):
+ return round_(a.real, decimals) + 1j*round_(a.imag, decimals)
+ if decimals is not 0:
+ decimals = asarray(decimals)
+ s = sign(a)
+ if decimals is not 0:
+ a = absolute(multiply(a, 10.**decimals))
+ else:
+ a = absolute(a)
+ rem = a-asarray(a).astype(_nx.intp)
+ a = _nx.where(_nx.less(rem, 0.5), _nx.floor(a), _nx.ceil(a))
+ # convert back
+ if decimals is not 0:
+ return multiply(a, s/(10.**decimals))
+ else:
+ return multiply(a, s)
+
+
+def cov(m,y=None, rowvar=0, bias=0):
+ """Estimate the covariance matrix.
+
+ If m is a vector, return the variance. For matrices where each row
+ is an observation, and each column a variable, return the covariance
+ matrix. Note that in this case diag(cov(m)) is a vector of
+ variances for each column.
+
+ cov(m) is the same as cov(m, m)
+
+ Normalization is by (N-1) where N is the number of observations
+ (unbiased estimate). If bias is 1 then normalization is by N.
+
+ If rowvar is zero, then each row is a variable with
+ observations in the columns.
+ """
+ if y is None:
+ y = asarray(m)
+ else:
+ y = asarray(y)
+ m = asarray(m)
+ if rowvar:
+ m = m.transpose()
+ y = y.transpose()
+ if (m.shape[0] == 1):
+ m = m.transpose()
+ if (y.shape[0] == 1):
+ y = y.transpose()
+ N = m.shape[0]
+ if (y.shape[0] != N):
+ raise ValueError, "x and y must have the same number of observations."
+ m = m - m.mean(axis=0)
+ y = y - y.mean(axis=0)
+ if bias:
+ fact = N*1.0
+ else:
+ fact = N-1.0
+
+ val = squeeze(dot(m.transpose(),y.conj()) / fact)
+ return val
+
+def corrcoef(x, y=None):
+ """The correlation coefficients
+ """
+ c = cov(x, y)
+ d = diag(c)
+ return c/sqrt(multiply.outer(d,d))
+
+def blackman(M):
+ """blackman(M) returns the M-point Blackman window.
+ """
+ n = arange(0,M)
+ return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
+
+def bartlett(M):
+ """bartlett(M) returns the M-point Bartlett window.
+ """
+ n = arange(0,M)
+ return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
+
+def hanning(M):
+ """hanning(M) returns the M-point Hanning window.
+ """
+ n = arange(0,M)
+ return 0.5-0.5*cos(2.0*pi*n/(M-1))
+
+def hamming(M):
+ """hamming(M) returns the M-point Hamming window.
+ """
+ n = arange(0,M)
+ return 0.54-0.46*cos(2.0*pi*n/(M-1))
+
+def kaiser(M,beta):
+ """kaiser(M, beta) returns a Kaiser window of length M with shape parameter
+ beta. It depends on scipy.special (in full scipy) for the modified bessel
+ function i0.
+ """
+ from scipy.special import i0
+ n = arange(0,M)
+ alpha = (M-1)/2.0
+ return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(beta)
+
+def sinc(x):
+ """sinc(x) returns sin(pi*x)/(pi*x) at all points of array x.
+ """
+ y = pi* where(x == 0, 1.0e-20, x)
+ return sin(y)/y
+
+def msort(a):
+ b = array(a,copy=True)
+ b.sort(0)
+ return b
+
+def median(m):
+ """median(m) returns a median of m along the first dimension of m.
+ """
+ sorted = msort(m)
+ if sorted.shape[0] % 2 == 1:
+ return sorted[int(sorted.shape[0]/2)]
+ else:
+ sorted = msort(m)
+ index=sorted.shape[0]/2
+ return (sorted[index-1]+sorted[index])/2.0
+
+def trapz(y, x=None, dx=1.0, axis=-1):
+ """Integrate y(x) using samples along the given axis and the composite
+ trapezoidal rule. If x is None, spacing given by dx is assumed.
+ """
+ y = asarray(y)
+ if x is None:
+ d = dx
+ else:
+ d = diff(x,axis=axis)
+ nd = len(y.shape)
+ slice1 = [slice(None)]*nd
+ slice2 = [slice(None)]*nd
+ slice1[axis] = slice(1,None)
+ slice2[axis] = slice(None,-1)
+ return add.reduce(d * (y[slice1]+y[slice2])/2.0,axis)
diff --git a/numpy/base/getlimits.py b/numpy/base/getlimits.py
new file mode 100644
index 000000000..41030af2d
--- /dev/null
+++ b/numpy/base/getlimits.py
@@ -0,0 +1,118 @@
+""" Machine limits for Float32 and Float64 and (long double) if available...
+"""
+
+__all__ = ['finfo']
+
+from machar import MachAr
+import numeric
+from numeric import array
+
+def _frz(a):
+ """fix rank-0 --> rank-1"""
+ if a.ndim == 0: a.shape = (1,)
+ return a
+
+_convert_to_float = {
+ numeric.csingle: numeric.single,
+ numeric.complex_: numeric.float_,
+ numeric.clongfloat: numeric.longfloat
+ }
+
+class finfo(object):
+
+ _finfo_cache = {}
+
+ def __new__(cls, dtype):
+ obj = cls._finfo_cache.get(dtype,None)
+ if obj is not None:
+ return obj
+ dtypes = [dtype]
+ newdtype = numeric.obj2dtype(dtype)
+ if newdtype is not dtype:
+ dtypes.append(newdtype)
+ dtype = newdtype
+ if not issubclass(dtype, numeric.inexact):
+ raise ValueError, "data type %r not inexact" % (dtype)
+ obj = cls._finfo_cache.get(dtype,None)
+ if obj is not None:
+ return obj
+ if not issubclass(dtype, numeric.floating):
+ newdtype = _convert_to_float[dtype]
+ if newdtype is not dtype:
+ dtypes.append(newdtype)
+ dtype = newdtype
+ obj = cls._finfo_cache.get(dtype,None)
+ if obj is not None:
+ return obj
+ obj = object.__new__(cls)._init(dtype)
+ for dt in dtypes:
+ cls._finfo_cache[dt] = obj
+ return obj
+
+ def _init(self, dtype):
+ self.dtype = dtype
+ if dtype is numeric.float_:
+ machar = MachAr(lambda v:array([v],'d'),
+ lambda v:_frz(v.astype('i'))[0],
+ lambda v:array(_frz(v)[0],'d'),
+ lambda v:'%24.16e' % array(_frz(v)[0],'d'),
+ 'scipy float precision floating point '\
+ 'number')
+ elif dtype is numeric.single:
+ machar = MachAr(lambda v:array([v],'f'),
+ lambda v:_frz(v.astype('i'))[0],
+ lambda v:array(_frz(v)[0],'f'), #
+ lambda v:'%15.7e' % array(_frz(v)[0],'f'),
+ "scipy single precision floating "\
+ "point number")
+ elif dtype is numeric.longfloat:
+ machar = MachAr(lambda v:array([v],'g'),
+ lambda v:_frz(v.astype('i'))[0],
+ lambda v:array(_frz(v)[0],'g'), #
+ lambda v:str(array(_frz(v)[0],'g')),
+ "scipy longfloat precision floating "\
+ "point number")
+ else:
+ raise ValueError,`dtype`
+
+ for word in ['tiny', 'precision', 'resolution','iexp',
+ 'maxexp','minexp','epsneg','negep',
+ 'machep']:
+ setattr(self,word,getattr(machar, word))
+ self.max = machar.huge
+ self.min = -self.max
+ self.eps = machar.epsilon
+ self.nexp = machar.iexp
+ self.nmant = machar.it
+ self.machar = machar
+ self._str_tiny = machar._str_xmin
+ self._str_max = machar._str_xmax
+ self._str_epsneg = machar._str_epsneg
+ self._str_eps = machar._str_eps
+ self._str_resolution = machar._str_resolution
+ return self
+
+ def __str__(self):
+ return '''\
+Machine parameters for %(dtype)s
+---------------------------------------------------------------------
+precision=%(precision)3s resolution=%(_str_resolution)s
+machep=%(machep)6s eps= %(_str_eps)s
+negep =%(negep)6s epsneg= %(_str_epsneg)s
+minexp=%(minexp)6s tiny= %(_str_tiny)s
+maxexp=%(maxexp)6s max= %(_str_max)s
+nexp =%(nexp)6s min= -max
+---------------------------------------------------------------------
+''' % self.__dict__
+
+if __name__ == '__main__':
+ f = finfo(numeric.single)
+ print 'single epsilon:',f.eps
+ print 'single tiny:',f.tiny
+ f = finfo(numeric.float)
+ print 'float epsilon:',f.eps
+ print 'float tiny:',f.tiny
+ f = finfo(numeric.longfloat)
+ print 'longfloat epsilon:',f.eps
+ print 'longfloat tiny:',f.tiny
+
diff --git a/numpy/base/include/scipy/arrayobject.h b/numpy/base/include/scipy/arrayobject.h
new file mode 100644
index 000000000..bc9f685fc
--- /dev/null
+++ b/numpy/base/include/scipy/arrayobject.h
@@ -0,0 +1,1484 @@
+
+/* This expects the following variables to be defined (besides
+ the usual ones from pyconfig.h
+
+ SIZEOF_LONG_DOUBLE -- sizeof(long double) or sizeof(double) if no
+ long double is present on platform.
+ CHAR_BIT -- number of bits in a char (usually 8)
+ (should be in limits.h)
+*/
+
+#ifndef Py_ARRAYOBJECT_H
+#define Py_ARRAYOBJECT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "config.h"
+
+#ifdef PY_ARRAY_TYPES_PREFIX
+# define CAT2(x,y) x ## y
+# define CAT(x,y) CAT2(x,y)
+# define NS(name) CAT(PY_ARRAY_TYPES_PREFIX, name)
+# define longlong NS(longlong)
+# define ulonglong NS(ulonglong)
+# define Bool NS(Bool)
+# define longdouble NS(longdouble)
+# define byte NS(byte)
+# define ubyte NS(ubyte)
+# define ushort NS(ushort)
+# define uint NS(uint)
+# define ulong NS(ulong)
+# define cfloat NS(cfloat)
+# define cdouble NS(cdouble)
+# define clongdouble NS(clongdouble)
+# define Int8 NS(Int8)
+# define UInt8 NS(UInt8)
+# define Int16 NS(Int16)
+# define UInt16 NS(UInt16)
+# define Int32 NS(Int32)
+# define UInt32 NS(UInt32)
+# define Int64 NS(Int64)
+# define UInt64 NS(UInt64)
+# define Int128 NS(Int128)
+# define UInt128 NS(UInt128)
+# define Int256 NS(Int256)
+# define UInt256 NS(UInt256)
+# define Float16 NS(Float16)
+# define Complex32 NS(Complex32)
+# define Float32 NS(Float32)
+# define Complex64 NS(Complex64)
+# define Float64 NS(Float64)
+# define Complex128 NS(Complex128)
+# define Float80 NS(Float80)
+# define Complex160 NS(Complex160)
+# define Float96 NS(Float96)
+# define Complex192 NS(Complex192)
+# define Float128 NS(Float128)
+# define Complex256 NS(Complex256)
+# define intp NS(intp)
+# define uintp NS(uintp)
+#endif
+
+/* There are several places in the code where an array of dimensions is */
+/* allocated statically. This is the size of that static allocation. */
+
+#define MAX_DIMS 40
+
+/* Used for Converter Functions "O&" code in ParseTuple */
+#define PY_FAIL 0
+#define PY_SUCCEED 1
+
+ /* Helpful to distinguish what is installed */
+#define NDARRAY_VERSION 0x0802
+
+ /* Some platforms don't define bool, long long, or long double.
+ Handle that here.
+ */
+
+#ifdef PY_LONG_LONG
+typedef PY_LONG_LONG longlong;
+typedef unsigned PY_LONG_LONG ulonglong;
+# ifdef _MSC_VER
+# define LONGLONG_FMT "I64d"
+# define ULONGLONG_FMT "I64u"
+# define LONGLONG_SUFFIX(x) (x##i64)
+# define ULONGLONG_SUFFIX(x) (x##Ui64)
+# else
+ /* #define LONGLONG_FMT "lld" Another possible variant
+ #define ULONGLONG_FMT "llu"
+
+ #define LONGLONG_FMT "qd" -- BSD perhaps?
+ #define ULONGLONG_FMT "qu"
+ */
+# define LONGLONG_FMT "Ld"
+# define ULONGLONG_FMT "Lu"
+# define LONGLONG_SUFFIX(x) (x##LL)
+# define ULONGLONG_SUFFIX(x) (x##ULL)
+# endif
+#else
+typedef long longlong;
+typedef unsigned long ulonglong;
+# define LONGLONG_SUFFIX(x) (x##L)
+# define ULONGLONG_SUFFIX(x) (x##UL)
+#endif
+
+typedef unsigned char Bool;
+#ifndef FALSE
+#define FALSE 0
+#endif
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#if SIZEOF_LONG_DOUBLE==SIZEOF_DOUBLE
+ typedef double longdouble;
+ #define LONGDOUBLE_FMT "g"
+#else
+ typedef long double longdouble;
+ #define LONGDOUBLE_FMT "Lg"
+#endif
+
+#ifndef Py_USING_UNICODE
+#define Py_UNICODE char
+#endif
+
+
+typedef signed char byte;
+typedef unsigned char ubyte;
+#ifndef _BSD_SOURCE
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+#endif
+
+typedef struct { float real, imag; } cfloat;
+typedef struct { double real, imag; } cdouble;
+typedef struct {longdouble real, imag;} clongdouble;
+
+enum PyArray_TYPES { PyArray_BOOL=0,
+ PyArray_BYTE, PyArray_UBYTE,
+ PyArray_SHORT, PyArray_USHORT,
+ PyArray_INT, PyArray_UINT,
+ PyArray_LONG, PyArray_ULONG,
+ PyArray_LONGLONG, PyArray_ULONGLONG,
+ PyArray_FLOAT, PyArray_DOUBLE, PyArray_LONGDOUBLE,
+ PyArray_CFLOAT, PyArray_CDOUBLE, PyArray_CLONGDOUBLE,
+ PyArray_OBJECT=17,
+ PyArray_STRING, PyArray_UNICODE,
+ PyArray_VOID,
+ PyArray_NTYPES,
+ PyArray_NOTYPE,
+ PyArray_USERDEF=256 /* leave room for characters */
+};
+
+ /* basetype array priority */
+#define PyArray_PRIORITY 0.0
+#define PyArray_BIG_PRIORITY 0.1
+ /* default subtype priority */
+#define PyArray_SUBTYPE_PRIORITY 1.0
+
+ /* How many floating point types are there */
+#define PyArray_NUM_FLOATTYPE 3
+
+
+ /* We need to match intp to a signed integer of the same size as
+ a pointer variable. uintp to the equivalent unsigned integer
+ */
+
+
+ /* These characters correspond to the array type and the
+ struct module */
+
+ /* except 'p' -- signed integer for pointer type */
+
+enum PyArray_TYPECHAR { PyArray_BOOLLTR = '?',
+ PyArray_BYTELTR = 'b',
+ PyArray_UBYTELTR = 'B',
+ PyArray_SHORTLTR = 'h',
+ PyArray_USHORTLTR = 'H',
+ PyArray_INTLTR = 'i',
+ PyArray_UINTLTR = 'I',
+ PyArray_LONGLTR = 'l',
+ PyArray_ULONGLTR = 'L',
+ PyArray_LONGLONGLTR = 'q',
+ PyArray_ULONGLONGLTR = 'Q',
+ PyArray_FLOATLTR = 'f',
+ PyArray_DOUBLELTR = 'd',
+ PyArray_LONGDOUBLELTR = 'g',
+ PyArray_CFLOATLTR = 'F',
+ PyArray_CDOUBLELTR = 'D',
+ PyArray_CLONGDOUBLELTR = 'G',
+ PyArray_OBJECTLTR = 'O',
+ PyArray_STRINGLTR = 'S',
+ PyArray_STRINGLTR2 = 'a',
+ PyArray_UNICODELTR = 'U',
+ PyArray_VOIDLTR = 'V',
+
+ /* No Descriptor, just a define -- this let's
+ Python users specify an array of integers
+ large enough to hold a pointer on the platform*/
+ PyArray_INTPLTR = 'p',
+ PyArray_UINTPLTR = 'P',
+
+ PyArray_GENBOOLLTR ='b',
+ PyArray_SIGNEDLTR = 'i',
+ PyArray_UNSIGNEDLTR = 'u',
+ PyArray_FLOATINGLTR = 'f',
+ PyArray_COMPLEXLTR = 'c'
+};
+
+typedef enum {
+ PyArray_QUICKSORT=0,
+ PyArray_HEAPSORT=1,
+ PyArray_MERGESORT=2,
+ PyArray_TIMSORT=3, /* the sort Python uses -- specialized */
+} PyArray_SORTKIND;
+#define PyArray_NSORTS PyArray_TIMSORT + 1
+
+ /* Define bit-width array types and typedefs */
+
+#define MAX_INT8 127
+#define MIN_INT8 -128
+#define MAX_UINT8 255
+#define MAX_INT16 32767
+#define MIN_INT16 -32768
+#define MAX_UINT16 65535
+#define MAX_INT32 2147483647
+#define MIN_INT32 (-MAX_INT32 - 1)
+#define MAX_UINT32 4294967295U
+#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807)
+#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1))
+#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615)
+#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864)
+#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1))
+#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
+#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967)
+#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1))
+#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935)
+
+ /* Need to find the number of bits for each type and
+ make definitions accordingly.
+
+ C states that sizeof(char) == 1 by definition
+
+ So, just using the sizeof keyword won't help.
+
+ It also looks like Python itself uses sizeof(char) quite a
+ bit, which by definition should be 1 all the time.
+
+ Idea: Make Use of CHAR_BIT which should tell us how many
+ BITS per CHARACTER
+ */
+
+ /* Include platform definitions -- These are in the C89/90 standard */
+#include <limits.h>
+#define MAX_BYTE SCHAR_MAX
+#define MIN_BYTE SCHAR_MIN
+#define MAX_UBYTE UCHAR_MAX
+#define MAX_SHORT SHRT_MAX
+#define MIN_SHORT SHRT_MIN
+#define MAX_USHORT USHRT_MAX
+#define MAX_INT INT_MAX
+#ifndef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#endif
+#define MIN_INT INT_MIN
+#define MAX_UINT UINT_MAX
+#define MAX_LONG LONG_MAX
+#define MIN_LONG LONG_MIN
+#define MAX_ULONG ULONG_MAX
+
+#define SIZEOF_LONGDOUBLE SIZEOF_LONG_DOUBLE
+#define SIZEOF_LONGLONG SIZEOF_LONG_LONG
+#define BITSOF_BOOL sizeof(Bool)*CHAR_BIT
+#define BITSOF_CHAR CHAR_BIT
+#define BITSOF_SHORT (SIZEOF_SHORT*CHAR_BIT)
+#define BITSOF_INT (SIZEOF_INT*CHAR_BIT)
+#define BITSOF_LONG (SIZEOF_LONG*CHAR_BIT)
+#define BITSOF_LONGLONG (SIZEOF_LONGLONG*CHAR_BIT)
+#define BITSOF_FLOAT (SIZEOF_FLOAT*CHAR_BIT)
+#define BITSOF_DOUBLE (SIZEOF_DOUBLE*CHAR_BIT)
+#define BITSOF_LONGDOUBLE (SIZEOF_LONGDOUBLE*CHAR_BIT)
+
+
+#if BITSOF_LONG == 8
+#define PyArray_INT8 PyArray_LONG
+#define PyArray_UINT8 PyArray_ULONG
+ typedef long Int8;
+ typedef unsigned long UInt8;
+#define STRBITSOF_LONG "8"
+#elif BITSOF_LONG == 16
+#define PyArray_INT16 PyArray_LONG
+#define PyArray_UINT16 PyArray_ULONG
+ typedef long Int16;
+ typedef unsigned long UInt16;
+#define STRBITSOF_LONG "16"
+#elif BITSOF_LONG == 32
+#define PyArray_INT32 PyArray_LONG
+#define PyArray_UINT32 PyArray_ULONG
+ typedef long Int32;
+ typedef unsigned long UInt32;
+#define STRBITSOF_LONG "32"
+#elif BITSOF_LONG == 64
+#define PyArray_INT64 PyArray_LONG
+#define PyArray_UINT64 PyArray_ULONG
+ typedef long Int64;
+ typedef unsigned long UInt64;
+#define STRBITSOF_LONG "64"
+#elif BITSOF_LONG == 128
+#define PyArray_INT128 PyArray_LONG
+#define PyArray_UINT128 PyArray_ULONG
+ typedef long Int128;
+ typedef unsigned long UInt128;
+#define STRBITSOF_LONG "128"
+#endif
+
+#if BITSOF_LONGLONG == 8
+# ifndef PyArray_INT8
+# define PyArray_INT8 PyArray_LONGLONG
+# define PyArray_UINT8 PyArray_ULONGLONG
+ typedef longlong Int8;
+ typedef ulonglong UInt8;
+# endif
+# define MAX_LONGLONG MAX_INT8
+# define MIN_LONGLONG MIN_INT8
+# define MAX_ULONGLONG MAX_UINT8
+#define STRBITSOF_LONGLONG "8"
+#elif BITSOF_LONGLONG == 16
+# ifndef PyArray_INT16
+# define PyArray_INT16 PyArray_LONGLONG
+# define PyArray_UINT16 PyArray_ULONGLONG
+ typedef longlong Int16;
+ typedef ulonglong UInt16;
+# endif
+# define MAX_LONGLONG MAX_INT16
+# define MIN_LONGLONG MIN_INT16
+# define MAX_ULONGLONG MAX_UINT16
+#define STRBITSOF_LONGLONG "16"
+#elif BITSOF_LONGLONG == 32
+# ifndef PyArray_INT32
+# define PyArray_INT32 PyArray_LONGLONG
+# define PyArray_UINT32 PyArray_ULONGLONG
+ typedef longlong Int32;
+ typedef ulonglong UInt32;
+# endif
+# define MAX_LONGLONG MAX_INT32
+# define MIN_LONGLONG MIN_INT32
+# define MAX_ULONGLONG MAX_UINT32
+#define STRBITSOF_LONGLONG "32"
+#elif BITSOF_LONGLONG == 64
+# ifndef PyArray_INT64
+# define PyArray_INT64 PyArray_LONGLONG
+# define PyArray_UINT64 PyArray_ULONGLONG
+ typedef longlong Int64;
+ typedef ulonglong UInt64;
+# endif
+# define MAX_LONGLONG MAX_INT64
+# define MIN_LONGLONG MIN_INT64
+# define MAX_ULONGLONG MAX_UINT64
+#define STRBITSOF_LONGLONG "64"
+#elif BITSOF_LONGLONG == 128
+# ifndef PyArray_INT128
+# define PyArray_INT128 PyArray_LONGLONG
+# define PyArray_UINT128 PyArray_ULONGLONG
+ typedef longlong Int128;
+ typedef ulonglong UInt128;
+# endif
+# define MAX_LONGLONG MAX_INT128
+# define MIN_LONGLONG MIN_INT128
+# define MAX_ULONGLONG MAX_UINT128
+#define STRBITSOF_LONGLONG "128"
+#elif BITSOF_LONGLONG == 256
+# define PyArray_INT256 PyArray_LONGLONG
+# define PyArray_UINT256 PyArray_ULONGLONG
+ typedef longlong Int256;
+ typedef ulonglong UInt256;
+# define MAX_LONGLONG MAX_INT256
+# define MIN_LONGLONG MIN_INT256
+# define MAX_ULONGLONG MAX_UINT256
+#define STRBITSOF_LONGLONG "256"
+#endif
+
+#if BITSOF_INT == 8
+#ifndef PyArray_INT8
+#define PyArray_INT8 PyArray_INT
+#define PyArray_UINT8 PyArray_UINT
+ typedef int Int8;
+ typedef unsigned int UInt8;
+#endif
+#define STRBITSOF_INT "8"
+#elif BITSOF_INT == 16
+#ifndef PyArray_INT16
+#define PyArray_INT16 PyArray_INT
+#define PyArray_UINT16 PyArray_UINT
+ typedef int Int16;
+ typedef unsigned int UInt16;
+#endif
+#define STRBITSOF_INT "16"
+#elif BITSOF_INT == 32
+#ifndef PyArray_INT32
+#define PyArray_INT32 PyArray_INT
+#define PyArray_UINT32 PyArray_UINT
+ typedef int Int32;
+ typedef unsigned int UInt32;
+#endif
+#define STRBITSOF_INT "32"
+#elif BITSOF_INT == 64
+#ifndef PyArray_INT64
+#define PyArray_INT64 PyArray_INT
+#define PyArray_UINT64 PyArray_UINT
+ typedef int Int64;
+ typedef unsigned int UInt64;
+#endif
+#define STRBITSOF_INT "64"
+#elif BITSOF_INT == 128
+#ifndef PyArray_INT128
+#define PyArray_INT128 PyArray_INT
+#define PyArray_UINT128 PyArray_UINT
+ typedef int Int128;
+ typedef unsigned int UInt128;
+#endif
+#define STRBITSOF_INT "128"
+#endif
+
+#if BITSOF_SHORT == 8
+#ifndef PyArray_INT8
+#define PyArray_INT8 PyArray_SHORT
+#define PyArray_UINT8 PyArray_USHORT
+ typedef short Int8;
+ typedef unsigned short UInt8;
+#endif
+#define STRBITSOF_SHORT "8"
+#elif BITSOF_SHORT == 16
+#ifndef PyArray_INT16
+#define PyArray_INT16 PyArray_SHORT
+#define PyArray_UINT16 PyArray_USHORT
+ typedef short Int16;
+ typedef unsigned short UInt16;
+#endif
+#define STRBITSOF_SHORT "16"
+#elif BITSOF_SHORT == 32
+#ifndef PyArray_INT32
+#define PyArray_INT32 PyArray_SHORT
+#define PyArray_UINT32 PyArray_USHORT
+ typedef short Int32;
+ typedef unsigned short UInt32;
+#endif
+#define STRBITSOF_SHORT "32"
+#elif BITSOF_SHORT == 64
+#ifndef PyArray_INT64
+#define PyArray_INT64 PyArray_SHORT
+#define PyArray_UINT64 PyArray_USHORT
+ typedef short Int64;
+ typedef unsigned short UInt64;
+#endif
+#define STRBITSOF_SHORT "64"
+#elif BITSOF_SHORT == 128
+#ifndef PyArray_INT128
+#define PyArray_INT128 PyArray_SHORT
+#define PyArray_UINT128 PyArray_USHORT
+ typedef short Int128;
+ typedef unsigned short UInt128;
+#endif
+#define STRBITSOF_SHORT "128"
+#endif
+
+
+#if BITSOF_CHAR == 8
+#ifndef PyArray_INT8
+#define PyArray_INT8 PyArray_BYTE
+#define PyArray_UINT8 PyArray_UBYTE
+ typedef signed char Int8;
+ typedef unsigned char UInt8;
+#endif
+#define STRBITSOF_CHAR "8"
+#elif BITSOF_CHAR == 16
+#ifndef PyArray_INT16
+#define PyArray_INT16 PyArray_BYTE
+#define PyArray_UINT16 PyArray_UBYTE
+ typedef signed char Int16;
+ typedef unsigned char UInt16;
+#endif
+#define STRBITSOF_CHAR "16"
+#elif BITSOF_CHAR == 32
+#ifndef PyArray_INT32
+#define PyArray_INT32 PyArray_BYTE
+#define PyArray_UINT32 PyArray_UBYTE
+ typedef signed char Int32;
+ typedef unsigned char UInt32;
+#endif
+#define STRBITSOF_CHAR "32"
+#elif BITSOF_CHAR == 64
+#ifndef PyArray_INT64
+#define PyArray_INT64 PyArray_BYTE
+#define PyArray_UINT64 PyArray_UBYTE
+ typedef signed char Int64;
+ typedef unsigned char UInt64;
+#endif
+#define STRBITSOF_CHAR "64"
+#elif BITSOF_CHAR == 128
+#ifndef PyArray_INT128
+#define PyArray_INT128 PyArray_BYTE
+#define PyArray_UINT128 PyArray_UBYTE
+ typedef signed char Int128;
+ typedef unsigned char UInt128;
+#endif
+#define STRBITSOF_CHAR "128"
+#endif
+
+
+
+#if BITSOF_DOUBLE == 16
+#define STRBITSOF_DOUBLE "16"
+#define STRBITSOF_CDOUBLE "32"
+#ifndef PyArray_FLOAT16
+#define PyArray_FLOAT16 PyArray_DOUBLE
+#define PyArray_COMPLEX32 PyArray_CDOUBLE
+ typedef double Float16;
+ typedef cdouble Complex32;
+#endif
+#elif BITSOF_DOUBLE == 32
+#define STRBITSOF_DOUBLE "32"
+#define STRBITSOF_CDOUBLE "64"
+#ifndef PyArray_FLOAT32
+#define PyArray_FLOAT32 PyArray_DOUBLE
+#define PyArray_COMPLEX64 PyArray_CDOUBLE
+ typedef double Float32;
+ typedef cdouble Complex64;
+#endif
+#elif BITSOF_DOUBLE == 64
+#define STRBITSOF_DOUBLE "64"
+#define STRBITSOF_CDOUBLE "128"
+#ifndef PyArray_FLOAT64
+#define PyArray_FLOAT64 PyArray_DOUBLE
+#define PyArray_COMPLEX128 PyArray_CDOUBLE
+ typedef double Float64;
+ typedef cdouble Complex128;
+#endif
+#elif BITSOF_DOUBLE == 80
+#define STRBITSOF_DOUBLE "80"
+#define STRBITSOF_CDOUBLE "160"
+#ifndef PyArray_FLOAT80
+#define PyArray_FLOAT80 PyArray_DOUBLE
+#define PyArray_COMPLEX160 PyArray_CDOUBLE
+ typedef double Float80;
+ typedef cdouble Complex160;
+#endif
+#elif BITSOF_DOUBLE == 96
+#define STRBITSOF_DOUBLE "96"
+#define STRBITSOF_CDOUBLE "192"
+#ifndef PyArray_FLOAT96
+#define PyArray_FLOAT96 PyArray_DOUBLE
+#define PyArray_COMPLEX192 PyArray_CDOUBLE
+ typedef double Float96;
+ typedef cdouble Complex192;
+#endif
+#elif BITSOF_DOUBLE == 128
+#define STRBITSOF_DOUBLE "128"
+#define STRBITSOF_CDOUBLE "256"
+#ifndef PyArray_FLOAT128
+#define PyArray_FLOAT128 PyArray_DOUBLE
+#define PyArray_COMPLEX256 PyArray_CDOUBLE
+ typedef double Float128;
+ typedef cdouble Complex256;
+#endif
+#endif
+
+
+
+#if BITSOF_FLOAT == 16
+#define STRBITSOF_FLOAT "16"
+#define STRBITSOF_CFLOAT "32"
+#ifndef PyArray_FLOAT16
+#define PyArray_FLOAT16 PyArray_FLOAT
+#define PyArray_COMPLEX32 PyArray_CFLOAT
+ typedef float Float16;
+ typedef cfloat Complex32;
+#endif
+#elif BITSOF_FLOAT == 32
+#define STRBITSOF_FLOAT "32"
+#define STRBITSOF_CFLOAT "64"
+#ifndef PyArray_FLOAT32
+#define PyArray_FLOAT32 PyArray_FLOAT
+#define PyArray_COMPLEX64 PyArray_CFLOAT
+ typedef float Float32;
+ typedef cfloat Complex64;
+#endif
+#elif BITSOF_FLOAT == 64
+#define STRBITSOF_FLOAT "64"
+#define STRBITSOF_CFLOAT "128"
+#ifndef PyArray_FLOAT64
+#define PyArray_FLOAT64 PyArray_FLOAT
+#define PyArray_COMPLEX128 PyArray_CFLOAT
+ typedef float Float64;
+ typedef cfloat Complex128;
+#endif
+#elif BITSOF_FLOAT == 80
+#define STRBITSOF_FLOAT "80"
+#define STRBITSOF_CFLOAT "160"
+#ifndef PyArray_FLOAT80
+#define PyArray_FLOAT80 PyArray_FLOAT
+#define PyArray_COMPLEX160 PyArray_CFLOAT
+ typedef float Float80;
+ typedef cfloat Complex160;
+#endif
+#elif BITSOF_FLOAT == 96
+#define STRBITSOF_FLOAT "96"
+#define STRBITSOF_CFLOAT "192"
+#ifndef PyArray_FLOAT96
+#define PyArray_FLOAT96 PyArray_FLOAT
+#define PyArray_COMPLEX192 PyArray_CFLOAT
+ typedef float Float96;
+ typedef cfloat Complex192;
+#endif
+#elif BITSOF_FLOAT == 128
+#define STRBITSOF_FLOAT "128"
+#define STRBITSOF_CFLOAT "256"
+#ifndef PyArray_FLOAT128
+#define PyArray_FLOAT128 PyArray_FLOAT
+#define PyArray_COMPLEX256 PyArray_CFLOAT
+ typedef float Float128;
+ typedef cfloat Complex256;
+#endif
+#endif
+
+
+#if BITSOF_LONGDOUBLE == 16
+#define STRBITSOF_LONGDOUBLE "16"
+#define STRBITSOF_CLONGDOUBLE "32"
+#ifndef PyArray_FLOAT16
+#define PyArray_FLOAT16 PyArray_LONGDOUBLE
+#define PyArray_COMPLEX32 PyArray_CLONGDOUBLE
+ typedef longdouble Float16;
+ typedef clongdouble Complex32;
+#endif
+#elif BITSOF_LONGDOUBLE == 32
+#define STRBITSOF_LONGDOUBLE "32"
+#define STRBITSOF_CLONGDOUBLE "64"
+#ifndef PyArray_FLOAT32
+#define PyArray_FLOAT32 PyArray_LONGDOUBLE
+#define PyArray_COMPLEX64 PyArray_CLONGDOUBLE
+ typedef longdouble Float32;
+ typedef clongdouble Complex64;
+#endif
+#elif BITSOF_LONGDOUBLE == 64
+#define STRBITSOF_LONGDOUBLE "64"
+#define STRBITSOF_CLONGDOUBLE "128"
+#ifndef PyArray_FLOAT64
+#define PyArray_FLOAT64 PyArray_LONGDOUBLE
+#define PyArray_COMPLEX128 PyArray_CLONGDOUBLE
+ typedef longdouble Float64;
+ typedef clongdouble Complex128;
+#endif
+#elif BITSOF_LONGDOUBLE == 80
+#define STRBITSOF_LONGDOUBLE "80"
+#define STRBITSOF_CLONGDOUBLE "160"
+#ifndef PyArray_FLOAT80
+#define PyArray_FLOAT80 PyArray_LONGDOUBLE
+#define PyArray_COMPLEX160 PyArray_CLONGDOUBLE
+ typedef longdouble Float80;
+ typedef clongdouble Complex160;
+#endif
+#elif BITSOF_LONGDOUBLE == 96
+#define STRBITSOF_LONGDOUBLE "96"
+#define STRBITSOF_CLONGDOUBLE "192"
+#ifndef PyArray_FLOAT96
+#define PyArray_FLOAT96 PyArray_LONGDOUBLE
+#define PyArray_COMPLEX192 PyArray_CLONGDOUBLE
+ typedef longdouble Float96;
+ typedef clongdouble Complex192;
+#endif
+#elif BITSOF_LONGDOUBLE == 128
+#define STRBITSOF_LONGDOUBLE "128"
+#define STRBITSOF_CLONGDOUBLE "256"
+#ifndef PyArray_FLOAT128
+#define PyArray_FLOAT128 PyArray_LONGDOUBLE
+#define PyArray_COMPLEX256 PyArray_CLONGDOUBLE
+ typedef longdouble Float128;
+ typedef clongdouble Complex256;
+#endif
+#elif BITSOF_LONGDOUBLE == 256
+#define STRBITSOF_LONGDOUBLE "256"
+#define STRBITSOF_CLONGDOUBLE "512"
+#define PyArray_FLOAT256 PyArray_LONGDOUBLE
+#define PyArray_COMPLEX512 PyArray_CLONGDOUBLE
+ typedef longdouble Float256;
+ typedef clongdouble Complex512;
+#endif
+
+
+ /* End of typedefs for numarray style bit-width names */
+
+/* This is to typedef Intp to the appropriate pointer size for this platform.
+ * Py_intptr_t, Py_uintptr_t are defined in pyport.h. */
+typedef Py_intptr_t intp;
+typedef Py_uintptr_t uintp;
+
+#define INTP_FMT "d"
+
+#if SIZEOF_PY_INTPTR_T == SIZEOF_INT
+ #define PyArray_INTP PyArray_INT
+ #define PyArray_UINTP PyArray_UINT
+ #define PyIntpArrType_Type PyIntArrType_Type
+ #define PyUIntpArrType_Type PyUIntArrType_Type
+ #define MAX_INTP MAX_INT
+ #define MIN_INTP MIN_INT
+ #define MAX_UINTP MAX_UINT
+#elif SIZEOF_PY_INTPTR_T == SIZEOF_LONG
+ #define PyArray_INTP PyArray_LONG
+ #define PyArray_UINTP PyArray_ULONG
+ #define PyIntpArrType_Type PyLongArrType_Type
+ #define PyUIntpArrType_Type PyULongArrType_Type
+ #define MAX_INTP MAX_LONG
+ #define MIN_INTP MIN_LONG
+ #define MAX_UINTP MAX_ULONG
+ #undef INTP_FMT
+ #define INTP_FMT "ld"
+#elif defined(PY_LONG_LONG) && (SIZEOF_PY_INTPTR_T == SIZEOF_LONG_LONG)
+ #define PyArray_INTP PyArray_LONGLONG
+ #define PyArray_UINTP PyArray_ULONGLONG
+ #define PyIntpArrType_Type PyLongLongArrType_Type
+ #define PyUIntpArrType_Type PyULongLongArrType_Type
+ #define MAX_INTP MAX_LONGLONG
+ #define MIN_INTP MIN_LONGLONG
+ #define MAX_UINTP MAX_ULONGLONG
+ #undef INTP_FMT
+ #define INTP_FMT "Ld"
+#endif
+
+#define ERR(str) fprintf(stderr, #str); fflush(stderr);
+#define ERR2(str) fprintf(stderr, str); fflush(stderr);
+
+ /* Macros to define how array, and dimension/strides data is
+ allocated.
+ */
+
+ /* Data buffer */
+#define PyDataMem_NEW(size) ((char *)malloc(size))
+ /* #define PyArrayMem_NEW(size) PyMem_NEW(char, size)*/
+#define PyDataMem_FREE(ptr) free(ptr)
+ /* #define PyArrayMem_FREE(ptr) PyMem_Free(ptr) */
+#define PyDataMem_RENEW(ptr,size) ((char *)realloc(ptr,size))
+
+#define PyArray_USE_PYMEM 0
+
+#if PyArray_USE_PYMEM == 1
+#define _pya_malloc PyObject_Malloc
+#define _pya_free PyObject_Free
+#define _pya_realloc PyObject_Realloc
+#else
+#define _pya_malloc malloc
+#define _pya_free free
+#define _pya_realloc realloc
+#endif
+
+/* Dimensions and strides */
+#define PyDimMem_NEW(size) ((intp *)_pya_malloc(size*sizeof(intp)))
+#define PyDimMem_FREE(ptr) _pya_free(ptr)
+#define PyDimMem_RENEW(ptr,size) ((intp *)_pya_realloc(ptr,size*sizeof(intp)))
+
+
+ /* These must deal with unaligned and swapped data if necessary */
+typedef PyObject * (PyArray_GetItemFunc) (void *, void *);
+typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *);
+
+typedef void (PyArray_CopySwapNFunc)(void *, void *, intp, int, int);
+typedef void (PyArray_CopySwapFunc)(void *, void *, int, int);
+typedef Bool (PyArray_NonzeroFunc)(void *, void *);
+
+
+ /* These assume aligned and notswapped data -- a buffer will be
+ used before or contiguous data will be obtained
+ */
+typedef int (PyArray_CompareFunc)(const void *, const void *, void *);
+typedef int (PyArray_ArgFunc)(void*, intp, intp*, void *);
+typedef void (PyArray_DotFunc)(void *, intp, void *, intp, void *, intp,
+ void *);
+typedef void (PyArray_VectorUnaryFunc)(void *, void *, intp, void *, void *);
+typedef int (PyArray_ScanFunc)(FILE *, void *, void *, void *);
+
+typedef int (PyArray_FillFunc)(void *, intp, void *);
+
+typedef int (PyArray_SortFunc)(void *, intp, void *);
+typedef int (PyArray_ArgSortFunc)(void *, intp *, intp, void *);
+
+typedef struct {
+ intp *ptr;
+ int len;
+} PyArray_Dims;
+
+typedef struct {
+ /* Functions to cast to all other standard types*/
+ PyArray_VectorUnaryFunc *cast[PyArray_NTYPES];
+
+ /* Functions to get and set items with standard
+ Python types -- not array scalars */
+ PyArray_GetItemFunc *getitem;
+ PyArray_SetItemFunc *setitem;
+
+ /* Function to compare items */
+ PyArray_CompareFunc *compare;
+
+ /* Function to select largest */
+ PyArray_ArgFunc *argmax;
+
+ /* Function to compute dot product */
+ PyArray_DotFunc *dotfunc;
+
+ /* Function to scan an ASCII file and
+ place a single value plus possible separator */
+ PyArray_ScanFunc *scanfunc;
+
+ /* Copy and/or swap data. Memory areas may not overlap */
+ /* Use memmove first if they might */
+ PyArray_CopySwapNFunc *copyswapn;
+ PyArray_CopySwapFunc *copyswap;
+
+ /* Function to determine if data is zero or not */
+ PyArray_NonzeroFunc *nonzero;
+
+ /* Used for arange */
+ PyArray_FillFunc *fill;
+
+ /* Sorting functions */
+ PyArray_SortFunc *sort[PyArray_NSORTS];
+ PyArray_ArgSortFunc *argsort[PyArray_NSORTS];
+
+} PyArray_ArrFuncs;
+
+
+typedef struct {
+ PyObject_HEAD
+ PyTypeObject *typeobj; /* the type object representing an
+ intance of this type */
+ char kind; /* kind for this type */
+ char type; /* unique-character representing this type */
+ char byteorder; /* '>' (big), '<' (little), '|'
+ (not-applicable), or '=' (native). */
+ int type_num; /* number representing this type */
+ int elsize; /* element size for this type */
+ int alignment; /* alignment needed for this type */
+ struct _arr_descr \
+ *subarray; /* Non-NULL if this type is
+ is an array (C-contiguous)
+ of some other type
+ */
+ PyObject *fields; /* The fields dictionary for this type */
+ /* For statically defined descr this
+ is always Py_None */
+
+ PyArray_ArrFuncs *f; /* a table of functions specific for each
+ basic data descriptor */
+} PyArray_Descr;
+
+typedef struct _arr_descr {
+ PyArray_Descr *base;
+ PyObject *shape; /* a tuple */
+} PyArray_ArrayDescr;
+
+
+typedef struct PyArrayObject {
+ PyObject_HEAD
+ char *data; /* pointer to raw data buffer */
+ int nd; /* number of dimensions, also called ndim */
+ intp *dimensions; /* size in each dimension */
+ intp *strides; /* bytes to jump to get to the
+ next element in each dimension */
+ PyObject *base; /* This object should be decref'd
+ upon deletion of array */
+ /* For views it points to the original array */
+ /* For creation from buffer object it points
+ to an object that shold be decref'd on
+ deletion */
+ /* For UPDATEIFCOPY flag this is an array
+ to-be-updated upon deletion of this one */
+ PyArray_Descr *descr; /* Pointer to type structure */
+ int flags; /* Flags describing array -- see below*/
+ PyObject *weakreflist; /* For weakreferences */
+} PyArrayObject;
+
+#define fortran fortran_ /* For some compilers */
+
+/* Mirrors buffer object to ptr */
+
+typedef struct {
+ PyObject_HEAD
+ PyObject *base;
+ void *ptr;
+ intp len;
+ int flags;
+} PyArray_Chunk;
+
+/* Array flags */
+#define CONTIGUOUS 1 /* means c-style contiguous (last index
+ varies the fastest) data elements right
+ after each other. */
+
+ /* All 0-d arrays are CONTIGUOUS and FORTRAN
+ contiguous. If a 1-d array is CONTIGUOUS
+ it is also FORTRAN contiguous
+ */
+
+#define FORTRAN 2 /* set if array is a contiguous Fortran array */
+ /* first index varies the fastest in memory
+ (strides array is reverse of C-contiguous
+ array)*/
+
+#define OWNDATA 4
+#define OWN_DATA OWNDATA
+
+ /* array never has these three set -- FromAny flags only */
+#define FORCECAST 0x010
+#define ENSURECOPY 0x020
+#define ENSUREARRAY 0x040
+
+#define ALIGNED 0x100
+#define WRITEABLE 0x400
+
+
+ /* If this flags is set, then base contains a pointer to
+ an array of the same size that should be updated with the
+ current contents of this array when this array is deallocated
+ */
+#define UPDATEIFCOPY 0x1000
+
+
+/* Size of internal buffers used for alignment */
+#define PyArray_BUFSIZE 10000
+#define PyArray_MIN_BUFSIZE 5
+#define PyArray_MAX_BUFSIZE 100000000
+
+#define BEHAVED_FLAGS ALIGNED | WRITEABLE
+#define CARRAY_FLAGS CONTIGUOUS | BEHAVED_FLAGS
+#define CARRAY_FLAGS_RO CONTIGUOUS | ALIGNED
+#define FARRAY_FLAGS FORTRAN | BEHAVED_FLAGS
+#define FARRAY_FLAGS_RO FORTRAN | ALIGNED
+#define DEFAULT_FLAGS CARRAY_FLAGS
+
+#define UPDATE_ALL_FLAGS CONTIGUOUS | FORTRAN | ALIGNED
+
+
+
+/*
+ * C API: consists of Macros and functions. The MACROS are defined here.
+ */
+
+
+#define PyArray_CHKFLAGS(m, FLAGS) \
+ ((((PyArrayObject *)(m))->flags & (FLAGS)) == (FLAGS))
+#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, CONTIGUOUS)
+#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, WRITEABLE)
+#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, ALIGNED)
+
+#ifndef MAX
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#endif
+#ifndef MIN
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+ /* Useful if a and b have to be evaluated. */
+
+#define tMAX(a,b,typ) {typ _x_=(a); typ _y_=(b); _x_>_y_ ? _x_ : _y_}
+#define tMIN(a,b,typ) {typ _x_=(a); typ _y_=(b); _x_<_y_ ? _x_ : _y_}
+
+#if defined(ALLOW_THREADS)
+#define BEGIN_THREADS_DEF PyThreadState *_save;
+#define BEGIN_THREADS _save = PyEval_SaveThread();
+#define END_THREADS PyEval_RestoreThread(_save);
+#define ALLOW_C_API_DEF PyGILState_STATE __save__;
+#define ALLOW_C_API __save__ = PyGILState_Ensure();
+#define DISABLE_C_API PyGILState_Release(__save__);
+#else
+#define BEGIN_THREADS_DEF
+#define BEGIN_THREADS
+#define END_THREADS
+#define ALLOW_C_API_DEF
+#define ALLOW_C_API
+#define DISABLE_C_API
+#endif
+
+typedef struct {
+ PyObject_HEAD
+ int nd_m1; /* number of dimensions - 1 */
+ intp index, size;
+ intp coordinates[MAX_DIMS];/* N-dimensional loop */
+ intp dims_m1[MAX_DIMS]; /* ao->dimensions - 1 */
+ intp strides[MAX_DIMS]; /* ao->strides or fake */
+ intp backstrides[MAX_DIMS];/* how far to jump back */
+ intp factors[MAX_DIMS]; /* shape factors */
+ PyArrayObject *ao;
+ char *dataptr; /* pointer to current item*/
+ Bool contiguous;
+} PyArrayIterObject;
+
+
+/* Iterator API */
+#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type)
+
+#define PyArray_ITER_RESET(it) { \
+ it->index = 0; \
+ it->dataptr = it->ao->data; \
+ memset(it->coordinates, 0, (it->nd_m1+1)*sizeof(intp)); \
+}
+
+
+#define PyArray_ITER_NEXT(it) { \
+ it->index++; \
+ if (it->contiguous) it->dataptr += it->ao->descr->elsize; \
+ else { \
+ int _i_; \
+ for (_i_ = it->nd_m1; _i_ >= 0; _i_--) { \
+ if (it->coordinates[_i_] < \
+ it->dims_m1[_i_]) { \
+ it->coordinates[_i_]++; \
+ it->dataptr += it->strides[_i_]; \
+ break; \
+ } \
+ else { \
+ it->coordinates[_i_] = 0; \
+ it->dataptr -= it->backstrides[_i_]; \
+ } \
+ } \
+ } \
+}
+
+#define PyArray_ITER_GOTO(it, destination) { \
+ int _i_; \
+ it->index = 0; \
+ it->dataptr = it->ao->data; \
+ for (_i_ = it->nd_m1; _i_>=0; _i_--) { \
+ it->dataptr += destination[_i_] * \
+ it->strides[_i_]; \
+ it->coordinates[_i_] = destination[_i_]; \
+ it->index += destination[_i_] * \
+ ( _i_==it->nd_m1 ? 1 : \
+ it->dims_m1[i+1]+1) ; \
+ } \
+ }
+
+#define PyArray_ITER_GOTO1D(it, ind) { \
+ int _i_; \
+ intp _lind_ = (intp) (ind); \
+ it->index = _lind_; \
+ if (it->contiguous) \
+ it->dataptr = it->ao->data + (ind) * \
+ it->ao->descr->elsize; \
+ else { \
+ it->dataptr = it->ao->data; \
+ for (_i_ = 0; _i_<=it->nd_m1; _i_++) { \
+ it->dataptr += (_lind_ / it->factors[_i_]) \
+ * it->strides[_i_]; \
+ _lind_ %= it->factors[_i_]; \
+ } \
+ } \
+}
+
+#define PyArray_ITER_DATA(it) ((PyArrayIterObject *)it)->dataptr
+
+
+/*
+ Any object passed to PyArray_Broadcast must be binary compatible with
+ this structure.
+*/
+
+typedef struct {
+ PyObject_HEAD
+
+ int numiter; /* number of iters */
+ intp size; /* broadcasted size */
+ intp index; /* current index */
+ int nd; /* number of dims */
+ intp dimensions[MAX_DIMS]; /* dimensions */
+ PyArrayIterObject *iters[MAX_DIMS]; /* iterators */
+} PyArrayMultiIterObject;
+
+#define PyArray_MultiIter_RESET(multi) { \
+ int _mi_; \
+ PyArrayMultiIterObject *_mul_ = (multi); \
+ _mul_->index = 0; \
+ for (_mi_ = 0; _mi_ < _mul_->numiter; _mi_++) { \
+ PyArray_ITER_RESET(_mul_->iters[_mi_]); \
+ } \
+ }
+
+#define PyArray_MultiIter_NEXT(multi) { \
+ int _mi_; \
+ PyArrayMultiIterObject *_mul_ = (multi); \
+ _mul_->index += 1; \
+ for (_mi_=0; _mi_<_mul_->numiter; _mi_++) { \
+ PyArray_ITER_NEXT(_mul_->iters[_mi_]); \
+ } \
+ }
+
+#define PyArray_MultiIter_GOTO(multi, dest) { \
+ int _mi_; \
+ PyArrayMultiIterObject *_mul_ = (multi); \
+ for (_mi_=0; _mi_<_mul_->numiter; _mi_++) { \
+ PyArray_ITER_GOTO(_mul_->iters[_mi_], dest); \
+ } \
+ _mul_->index = _mul_->iters[0]->index; \
+ }
+
+#define PyArray_MultiIter_GOTO1D(multi, ind) { \
+ int _mi_; \
+ PyArrayMultiIterObject *_mul_ = (multi); \
+ for (_mi_=0; _mi_<_mul_->numiter; _mi_++) { \
+ PyArray_ITER_GOTO1D(_mul_->iters[_mi_], ind); \
+ } \
+ _mul_->index = _mul_->iters[0]->index; \
+ }
+
+#define PyArray_MultiIter_DATA(multi, i) \
+ ((PyArrayMultiIterObject *)multi)->iters[i]->dataptr
+
+#define PyArray_MultiIter_SIZE(multi) \
+ ((PyArrayMultiIterObject *)multi)->size;
+
+
+/* Store the information needed for fancy-indexing over an array */
+
+typedef struct {
+ PyObject_HEAD
+ /* Multi-iterator portion --- needs to be present in this order to
+ work with PyArray_Broadcast */
+
+ int numiter; /* number of index-array
+ iterators */
+ intp size; /* size of broadcasted
+ result */
+ intp index; /* current index */
+ int nd; /* number of dims */
+ intp dimensions[MAX_DIMS]; /* dimensions */
+ PyArrayIterObject *iters[MAX_DIMS]; /* index object
+ iterators */
+ PyArrayIterObject *ait; /* flat Iterator for
+ underlying array */
+
+ /* flat iterator for subspace (when numiter < nd) */
+ PyArrayIterObject *subspace;
+
+ /* if subspace iteration, then this is the array of
+ axes in the underlying array represented by the
+ index objects */
+ int iteraxes[MAX_DIMS];
+ /* if subspace iteration, the these are the coordinates
+ to the start of the subspace.
+ */
+ intp bscoord[MAX_DIMS];
+
+
+ PyObject *indexobj; /* reference to
+ creating obj */
+ int view;
+ int consec;
+ char *dataptr;
+
+} PyArrayMapIterObject;
+
+
+#define PyArray_NDIM(obj) (((PyArrayObject *)(obj))->nd)
+#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || PyArray_CHKFLAGS(m, CONTIGUOUS) || \
+ PyArray_CHKFLAGS(m, FORTRAN))
+#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, FORTRAN) && (PyArray_NDIM(m) > 1))
+#define FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, FORTRAN) ? FORTRAN : 0))
+#define PyArray_DATA(obj) (((PyArrayObject *)(obj))->data)
+#define PyArray_DIMS(obj) (((PyArrayObject *)(obj))->dimensions)
+#define PyArray_STRIDES(obj) (((PyArrayObject *)(obj))->strides)
+#define PyArray_DIM(obj,n) (((PyArrayObject *)(obj))->dimensions[n])
+#define PyArray_STRIDE(obj,n) (((PyArrayObject *)(obj))->strides[n])
+#define PyArray_BASE(obj) (((PyArrayObject *)(obj))->base)
+#define PyArray_DESCR(obj) (((PyArrayObject *)(obj))->descr)
+#define PyArray_FLAGS(obj) (((PyArrayObject *)(obj))->flags)
+#define PyArray_ITEMSIZE(obj) (((PyArrayObject *)(obj))->descr->elsize)
+#define PyArray_TYPE(obj) (((PyArrayObject *)(obj))->descr->type_num)
+#define PyArray_GETITEM(obj,itemptr) \
+ ((PyArrayObject *)(obj))->descr->getitem((char *)itemptr, \
+ (PyArrayObject *)obj);
+#define PyArray_SETITEM(obj,itemptr,v) \
+ (obj)->descr->setitem((PyObject *)v,(char *)(itemptr), \
+ (PyArrayObject *)(obj));
+
+
+#define PyTypeNum_ISBOOL(type) (type == PyArray_BOOL)
+#define PyTypeNum_ISUNSIGNED(type) ((type == PyArray_UBYTE) || \
+ (type == PyArray_USHORT) || \
+ (type == PyArray_UINT) || \
+ (type == PyArray_ULONG) || \
+ (type == PyArray_ULONGLONG))
+
+#define PyTypeNum_ISSIGNED(type) ((type == PyArray_BYTE) || \
+ (type == PyArray_SHORT) || \
+ (type == PyArray_INT) || \
+ (type == PyArray_LONG) || \
+ (type == PyArray_LONGLONG))
+
+#define PyTypeNum_ISINTEGER(type) ((type >= PyArray_BYTE) && \
+ (type <= PyArray_ULONGLONG))
+
+#define PyTypeNum_ISFLOAT(type) ((type >= PyArray_FLOAT) && \
+ (type <= PyArray_LONGDOUBLE))
+
+#define PyTypeNum_ISNUMBER(type) (type <= PyArray_CLONGDOUBLE)
+
+#define PyTypeNum_ISSTRING(type) ((type == PyArray_UCHAR) || \
+ (type == PyArray_UNICODE))
+
+#define PyTypeNum_ISCOMPLEX(type) ((type >= PyArray_CFLOAT) && \
+ (type <= PyArray_CLONGDOUBLE))
+
+#define PyTypeNum_ISPYTHON(type) ((type == PyArray_LONG) || \
+ (type == PyArray_DOUBLE) || \
+ (type == PyArray_CDOUBLE) || \
+ (type == PyArray_BOOL) || \
+ (type == PyArray_OBJECT ))
+
+#define PyTypeNum_ISFLEXIBLE(type) ((type>=PyArray_STRING) && \
+ (type<=PyArray_VOID))
+
+#define PyTypeNum_ISUSERDEF(type) ((type >= PyArray_USERDEF) && \
+ (type < PyArray_USERDEF+\
+ PyArray_NUMUSERTYPES))
+
+#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \
+ PyTypeNum_ISUSERDEF(type))
+
+#define PyTypeNum_ISOBJECT(type) ((type) == PyArray_OBJECT)
+
+#define _PyADt(o) ((PyArray_Descr *)o)->type_num
+#define PyDescr_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj))
+#define PyDescr_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(_PyADt(obj))
+#define PyDescr_ISSIGNED(obj) PyTypeNum_ISSIGNED(_PyADt(obj))
+#define PyDescr_ISINTEGER(obj) PyTypeNum_ISINTEGER(_PyADt(obj))
+#define PyDescr_ISFLOAT(obj) PyTypeNum_ISFLOAT(_PyADt(obj))
+#define PyDescr_ISNUMBER(obj) PyTypeNum_ISNUMBER(_PyADt(obj))
+#define PyDescr_ISSTRING(obj) PyTypeNum_ISSTRING(_PyADt(obj))
+#define PyDescr_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(_PyADt(obj))
+#define PyDescr_ISPYTHON(obj) PyTypeNum_ISPYTHON(_PyADt(obj))
+#define PyDescr_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(_PyADt(obj))
+#define PyDescr_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(_PyADt(obj))
+#define PyDescr_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(_PyADt(obj))
+#define PyDescr_ISOBJECT(obj) PyTypeNum_ISOBJECT(_PyADt(obj))
+#undef _PyAD
+
+#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj))
+#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj))
+#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj))
+#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj))
+#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj))
+#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj))
+#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj))
+#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj))
+#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj))
+#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))
+#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj))
+#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj))
+#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj))
+
+#define PyArray_LITTLE '<'
+#define PyArray_BIG '>'
+#define PyArray_NATIVE '='
+#define PyArray_SWAP 's'
+#define PyArray_IGNORE '|'
+
+#ifdef WORDS_BIGENDIAN
+#define PyArray_NATBYTE PyArray_BIG
+#define PyArray_OPPBYTE PyArray_LITTLE
+#else
+#define PyArray_NATBYTE PyArray_LITTLE
+#define PyArray_OPPBYTE PyArray_BIG
+#endif
+
+#define PyArray_ISNBO(arg) ((arg) != PyArray_OPPBYTE)
+#define PyArray_IsNativeByteOrder PyArray_ISNBO
+#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder)
+
+#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \
+ PyArray_ISNOTSWAPPED(m))
+#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, CARRAY_FLAGS)
+#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, CARRAY_FLAGS_RO)
+#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, FARRAY_FLAGS)
+#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, FARRAY_FLAGS_RO)
+#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, BEHAVED_FLAGS)
+#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, ALIGNED)
+
+
+typedef struct {
+ int version; /* contains the integer 2 as a sanity check */
+ int nd; /* number of dimensions */
+ char typekind; /* kind in array --- character code of typestr */
+ int itemsize; /* size of each element */
+ int flags; /* how should be data interpreted */
+ intp *shape; /* A length-nd array of shape information */
+ intp *strides; /* A length-nd array of stride information */
+ void *data; /* A pointer to the first element of the array */
+} PyArrayInterface;
+#define NOTSWAPPED 0x200 /* part of the array interface */
+
+ /* Includes the "function" C-API -- these are all stored in a
+ list of pointers --- one for each file
+ The two lists are concatenated into one in multiarray.
+
+ They are available as import_array()
+ */
+
+
+#include "__multiarray_api.h"
+
+
+ /* C-API that requries previous API to be defined */
+
+#define PyArray_DescrCheck(op) ((op)->ob_type == &PyArrayDescr_Type)
+
+#define PyArray_Check(op) ((op)->ob_type == &PyArray_Type || \
+ PyObject_TypeCheck((op), &PyBigArray_Type))
+#define PyBigArray_CheckExact(op) ((op)->ob_type == &PyBigArray_Type)
+#define PyArray_CheckExact(op) ((op)->ob_type == &PyArray_Type)
+
+#define PyArray_IsZeroDim(op) (PyArray_Check(op) && (PyArray_NDIM(op) == 0))
+#define PyArray_IsScalar(obj, cls) \
+ (PyObject_TypeCheck((obj), &Py##cls##ArrType_Type))
+#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \
+ PyArray_IsZeroDim(m))
+#define PyArray_IsPythonScalar(obj) \
+ (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \
+ PyLong_Check(obj) || PyBool_Check(obj) || PyString_Check(obj) || \
+ PyUnicode_Check(obj))
+#define PyArray_IsAnyScalar(obj) \
+ (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))
+#define PyArray_CheckAnyScalar(obj) (PyArray_CheckScalar(obj) || \
+ PyArray_IsPythonScalar(obj))
+
+#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? Py_INCREF(m), m : \
+ (PyArrayObject *)(PyArray_Copy(m)))
+
+#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m))
+#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m))
+#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0)
+#define PyArray_FROM_OF(m,flags) PyArray_FromAny(m, NULL, 0, 0, flags)
+#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, PyArray_DescrFromType(type),\
+ 0, 0, 0);
+#define PyArray_FROM_OTF(m, type, flags) \
+ PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, flags)
+#define PyArray_FROMANY(m, type, min, max, flags) \
+ PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, flags)
+
+#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), (val), PyArray_NBYTES(obj))
+
+#define REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt)
+#define MAX_ELSIZE 2*SIZEOF_LONGDOUBLE
+
+
+#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, DEFAULT_FLAGS)
+
+#define PyArray_EquivArrTypes(a1, a2) \
+ PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2))
+#define PyArray_EquivTypenums(typenum1, typenum2) \
+ PyArray_EquivTypes(PyArray_DescrFromType(typenum1), \
+ PyArray_DescrFromType(typenum2))
+
+#define PyArray_EquivByteorders(b1, b2) \
+ ((b1 == b2) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2)))
+
+#define PyArray_SimpleNew(nd, dims, typenum) \
+ PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL)
+#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \
+ PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, data, 0, CARRAY_FLAGS, NULL)
+#define PyArray_SimpleNewFromDescr(nd, dims, descr) \
+ PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, NULL, NULL, 0, NULL)
+
+
+ /* These might be faster without the dereferencing of obj
+ going on inside -- of course an optimizing compiler should
+ inline the constants inside a for loop making it a moot point
+ */
+
+#define PyArray_GETPTR1(obj, i) (PyArray_DATA(obj) + \
+ i*PyArray_STRIDE(obj, 0))
+
+#define PyArray_GETPTR2(obj, i, j) (PyArray_DATA(obj) + \
+ i*PyArray_STRIDE(obj, 0) + \
+ j*PyArray_STRIDE(obj, 1))
+
+#define PyArray_GETPTR3(obj, i, j, k) (PyArray_DATA(obj) + \
+ i*PyArray_STRIDE(obj, 0) + \
+ j*PyArray_STRIDE(obj, 1) + \
+ k*PyArray_STRIDE(obj, 2)) \
+
+#define PyArray_GETPTR4(obj, i, j, k, l) (PyArray_DATA(obj) + \
+ i*PyArray_STRIDE(obj, 0) + \
+ j*PyArray_STRIDE(obj, 1) + \
+ k*PyArray_STRIDE(obj, 2) + \
+ l*PyArray_STRIDE(obj, 3))
+
+#define PyArray_DESCR_REPLACE(descr) do { \
+ PyArray_Descr *_new_; \
+ _new_ = PyArray_DescrNew(descr); \
+ Py_XDECREF(descr); \
+ descr = _new_; \
+ } while(0)
+
+ /* Copy should always return contiguous array */
+#define PyArray_Copy(obj) PyArray_NewCopy(obj, 0)
+
+#define PyArray_FromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, BEHAVED_FLAGS | ENSUREARRAY)
+
+#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, DEFAULT_FLAGS | ENSUREARRAY)
+
+#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, ENSURECOPY | ENSUREARRAY)
+
+#define PyArray_Cast(mp, type_num) \
+ PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0)
+
+ /*Compatibility with old Numeric stuff -- don't use in new code */
+
+#define PyArray_FromDimsAndData(nd, d, type, data) \
+ PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \
+ data)
+
+#define PyArray_UNSIGNED_TYPES
+#define PyArray_SBYTE PyArray_BYTE
+#define PyArray_CHAR PyArray_BYTE
+#define PyArray_CopyArray PyArray_CopyInto
+#define _PyArray_multiply_list PyArray_MultiplyIntList
+#define PyArray_ISSPACESAVER(m) FALSE
+#define PyScalarArray_Check PyArray_CheckScalar
+
+#ifdef PY_ARRAY_TYPES_PREFIX
+# undef CAT
+# undef CAT2
+# undef NS
+# undef longlong
+# undef ulonglong
+# undef Bool
+# undef longdouble
+# undef byte
+# undef ubyte
+# undef ushort
+# undef uint
+# undef ulong
+# undef cfloat
+# undef cdouble
+# undef clongdouble
+# undef Int8
+# undef UInt8
+# undef Int16
+# undef UInt16
+# undef Int32
+# undef UInt32
+# undef Int64
+# undef UInt64
+# undef Int128
+# undef UInt128
+# undef Int256
+# undef UInt256
+# undef Float16
+# undef Complex32
+# undef Float32
+# undef Complex64
+# undef Float64
+# undef Complex128
+# undef Float80
+# undef Complex160
+# undef Float96
+# undef Complex192
+# undef Float128
+# undef Complex256
+# undef intp
+# undef uintp
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !Py_ARRAYOBJECT_H */
diff --git a/numpy/base/include/scipy/ufuncobject.h b/numpy/base/include/scipy/ufuncobject.h
new file mode 100644
index 000000000..34e5ca061
--- /dev/null
+++ b/numpy/base/include/scipy/ufuncobject.h
@@ -0,0 +1,319 @@
+#ifndef Py_UFUNCOBJECT_H
+#define Py_UFUNCOBJECT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_ARGS 40
+
+typedef void (*PyUFuncGenericFunction) (char **, intp *, intp *, void *);
+
+typedef struct {
+ PyObject_HEAD
+ int nin, nout, nargs;
+ int identity;
+ PyUFuncGenericFunction *functions;
+ void **data;
+ int ntypes;
+ int check_return;
+ char *name, *types;
+ char *doc;
+ void *ptr;
+ PyObject *obj;
+ PyObject *userloops;
+} PyUFuncObject;
+
+#include "arrayobject.h"
+
+#ifdef PY_ARRAY_TYPES_PREFIX
+# define CAT2(x,y) x ## y
+# define CAT(x,y) CAT2(x,y)
+# define NS(name) CAT(PY_ARRAY_TYPES_PREFIX, name)
+# define intp NS(intp)
+#endif
+
+#define UFUNC_ERR_IGNORE 0
+#define UFUNC_ERR_WARN 1
+#define UFUNC_ERR_RAISE 2
+#define UFUNC_ERR_CALL 3
+
+ /* Python side integer mask */
+
+#define UFUNC_MASK_DIVIDEBYZERO 0x03
+#define UFUNC_MASK_OVERFLOW 0x0c
+#define UFUNC_MASK_UNDERFLOW 0x30
+#define UFUNC_MASK_INVALID 0xc0
+
+#define UFUNC_SHIFT_DIVIDEBYZERO 0
+#define UFUNC_SHIFT_OVERFLOW 2
+#define UFUNC_SHIFT_UNDERFLOW 4
+#define UFUNC_SHIFT_INVALID 6
+
+
+/* platform-dependent code translates floating point
+ status to an integer sum of these values
+*/
+#define UFUNC_FPE_DIVIDEBYZERO 1
+#define UFUNC_FPE_OVERFLOW 2
+#define UFUNC_FPE_UNDERFLOW 4
+#define UFUNC_FPE_INVALID 8
+
+#define UFUNC_ERR_DEFAULT 0 /* Default error mode */
+
+ /* Only internal -- not exported, yet*/
+typedef struct {
+ /* Multi-iterator portion --- needs to be present in this order
+ to work with PyArray_Broadcast */
+ PyObject_HEAD
+ int numiter;
+ intp size;
+ intp index;
+ int nd;
+ intp dimensions[MAX_DIMS];
+ PyArrayIterObject *iters[MAX_ARGS];
+ /* End of Multi-iterator portion */
+
+ /* The ufunc */
+ PyUFuncObject *ufunc;
+
+ /* The error handling */
+ int errormask; /* Integer showing desired error handling */
+ PyObject *errobj; /* currently a tuple with
+ (string, func or None)
+ */
+
+ /* Specific function and data to use */
+ PyUFuncGenericFunction function;
+ void *funcdata;
+
+ /* Loop method */
+ int meth;
+
+ /* Whether we need to copy to a buffer or not.*/
+ int needbuffer[MAX_ARGS];
+ int leftover;
+ int ninnerloops;
+ int lastdim;
+
+ /* Whether or not to swap */
+ int swap[MAX_ARGS];
+
+ /* Buffers for the loop */
+ void *buffer[MAX_ARGS];
+ int bufsize;
+ intp bufcnt;
+ void *dptr[MAX_ARGS];
+
+ /* For casting */
+ void *castbuf[MAX_ARGS];
+ PyArray_VectorUnaryFunc *cast[MAX_ARGS];
+
+ /* usually points to buffer but when a cast is to be
+ done it switches for that argument to castbuf.
+ */
+ void *bufptr[MAX_ARGS];
+
+ /* Steps filled in from iters or sizeof(item)
+ depending on loop method.
+ */
+ intp steps[MAX_ARGS];
+
+ int obj; /* This loop calls object functions */
+ int notimplemented; /* The loop caused notimplemented */
+
+} PyUFuncLoopObject;
+
+/* Could make this more clever someday */
+#define UFUNC_MAXIDENTITY 32
+
+typedef struct {
+ PyObject_HEAD
+ PyArrayIterObject *it;
+ PyArrayObject *ret;
+ PyArrayIterObject *rit; /* Needed for Accumulate */
+ int outsize;
+ intp index;
+ intp size;
+ char idptr[UFUNC_MAXIDENTITY];
+
+ /* The ufunc */
+ PyUFuncObject *ufunc;
+
+ /* The error handling */
+ int errormask;
+ PyObject *errobj;
+
+ PyUFuncGenericFunction function;
+ void *funcdata;
+ int meth;
+ int swap;
+
+ void *buffer;
+ int bufsize;
+
+ void *castbuf;
+ PyArray_VectorUnaryFunc *cast;
+
+ void *bufptr[3];
+ intp steps[3];
+
+ intp N;
+ int instrides;
+ int insize;
+ char *inptr;
+
+ /* For copying small arrays */
+ PyObject *decref;
+
+ int obj;
+
+} PyUFuncReduceObject;
+
+
+#if defined(ALLOW_THREADS)
+#define LOOP_BEGIN_THREADS if (!(loop->obj)) {_save = PyEval_SaveThread();}
+#define LOOP_END_THREADS if (!(loop->obj)) {PyEval_RestoreThread(_save);}
+#else
+#define LOOP_BEGIN_THREADS
+#define LOOP_END_THREADS
+#endif
+
+#define PyUFunc_One 1
+#define PyUFunc_Zero 0
+#define PyUFunc_None -1
+
+#define UFUNC_REDUCE 0
+#define UFUNC_ACCUMULATE 1
+#define UFUNC_REDUCEAT 2
+#define UFUNC_OUTER 3
+
+
+typedef struct {
+ int nin;
+ int nout;
+ PyObject *callable;
+} PyUFunc_PyFuncData;
+
+
+#include "__ufunc_api.h"
+
+#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
+
+#define UFUNC_CHECK_ERROR(arg) \
+ if (((arg)->obj && PyErr_Occurred()) || \
+ ((arg)->errormask && \
+ PyUFunc_checkfperr((arg)->errormask, \
+ (arg)->errobj))) \
+ goto fail
+
+/* This code checks the IEEE status flags in a platform-dependent way */
+/* Adapted from Numarray */
+
+/* OSF/Alpha (Tru64) ---------------------------------------------*/
+#if defined(__osf__) && defined(__alpha)
+
+#include <machine/fpu.h>
+
+#define UFUNC_CHECK_STATUS(ret) { \
+ unsigned long fpstatus; \
+ \
+ fpstatus = ieee_get_fp_control(); \
+ /* clear status bits as well as disable exception mode if on */ \
+ ieee_set_fp_control( 0 ); \
+ ret = ((IEEE_STATUS_DZE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \
+ | ((IEEE_STATUS_OVF & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \
+ | ((IEEE_STATUS_UNF & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \
+ | ((IEEE_STATUS_INV & fpstatus) ? UFUNC_FPE_INVALID : 0); \
+ }
+
+/* MS Windows -----------------------------------------------------*/
+#elif defined(_MSC_VER)
+
+#include <float.h>
+
+#define UFUNC_CHECK_STATUS(ret) { \
+ int fpstatus = (int) _clear87(); \
+ \
+ ret = ((SW_ZERODIVIDE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \
+ | ((SW_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \
+ | ((SW_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \
+ | ((SW_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \
+ }
+
+
+/* Solaris --------------------------------------------------------*/
+/* --------ignoring SunOS ieee_flags approach, someone else can
+** deal with that! */
+#elif defined(sun)
+#include <ieeefp.h>
+
+#define UFUNC_CHECK_STATUS(ret) { \
+ int fpstatus; \
+ \
+ fpstatus = (int) fpgetsticky(); \
+ ret = ((FP_X_DZ & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \
+ | ((FP_X_OFL & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \
+ | ((FP_X_UFL & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \
+ | ((FP_X_INV & fpstatus) ? UFUNC_FPE_INVALID : 0); \
+ (void) fpsetsticky(0); \
+ }
+
+#elif defined(linux) || defined(__APPLE__) || defined(__CYGWIN__) || defined(__MINGW32__)
+
+#if defined(__GLIBC__) || defined(__APPLE__) || defined(__MINGW32__)
+#include <fenv.h>
+#elif defined(__CYGWIN__)
+#include <mingw/fenv.h>
+#endif
+
+#define UFUNC_CHECK_STATUS(ret) { \
+ int fpstatus = (int) fetestexcept(FE_DIVBYZERO | FE_OVERFLOW | \
+ FE_UNDERFLOW | FE_INVALID); \
+ ret = ((FE_DIVBYZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \
+ | ((FE_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \
+ | ((FE_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \
+ | ((FE_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \
+ (void) feclearexcept(FE_DIVBYZERO | FE_OVERFLOW | \
+ FE_UNDERFLOW | FE_INVALID); \
+}
+
+#define generate_divbyzero_error() feraiseexcept(FE_DIVBYZERO)
+#define generate_overflow_error() feraiseexcept(FE_OVERFLOW)
+
+#elif defined(AIX)
+
+#include <float.h>
+#include <fpxcp.h>
+
+#define UFUNC_CHECK_STATUS(ret) { \
+ fpflag_t fpstatus; \
+ \
+ fpstatus = fp_read_flag();
+ ret = ((FP_DIV_BY_ZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \
+ | ((FP_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \
+ | ((FP_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \
+ | ((FP_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0);
+ fp_clr_flag( FP_DIV_BY_ZERO | FP_OVERFLOW | FP_UNDERFLOW | FP_INVALID); \
+}
+
+#else
+
+#define UFUNC_CHECK_STATUS(ret) { \
+ printf("floating point flags not supported on this platform\n"); \
+ ret = 0; \
+ }
+
+#endif
+
+#ifdef PY_ARRAY_TYPES_PREFIX
+# undef CAT
+# undef CAT2
+# undef NS
+# undef inpt
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_UFUNCOBJECT_H */
diff --git a/numpy/base/index_tricks.py b/numpy/base/index_tricks.py
new file mode 100644
index 000000000..71d30a387
--- /dev/null
+++ b/numpy/base/index_tricks.py
@@ -0,0 +1,291 @@
+## Automatically adapted for scipy Sep 19, 2005 by convertcode.py
+
+__all__ = ['mgrid','ogrid','r_', 'c_', 'index_exp', 'ix_','ndenumerate']
+
+import sys
+import types
+import numeric as _nx
+from numeric import asarray
+
+from type_check import ScalarType
+import function_base
+import twodim_base as matrix_base
+import matrix
+makemat = matrix.matrix
+
+def ix_(*args):
+ """ Construct an open mesh from multiple sequences.
+
+ This function takes n 1-d sequences and returns n outputs with n
+ dimensions each such that the shape is 1 in all but one dimension and
+ the dimension with the non-unit shape value cycles through all n
+ dimensions.
+
+ Using ix_() one can quickly construct index arrays that will index
+ the cross product.
+
+ a[ix_([1,3,7],[2,5,8])] returns the array
+
+ a[1,2] a[1,5] a[1,8]
+ a[3,2] a[3,5] a[3,8]
+ a[7,2] a[7,5] a[7,8]
+ """
+ out = []
+ nd = len(args)
+ baseshape = [1]*nd
+ for k in range(nd):
+ new = _nx.array(args[k])
+ if (new.ndim <> 1):
+ raise ValueError, "Cross index must be 1 dimensional"
+ baseshape[k] = len(new)
+ new.shape = tuple(baseshape)
+ out.append(new)
+ baseshape[k] = 1
+ return tuple(out)
+
+class nd_grid(object):
+ """ Construct a "meshgrid" in N-dimensions.
+
+ grid = nd_grid() creates an instance which will return a mesh-grid
+ when indexed. The dimension and number of the output arrays are equal
+ to the number of indexing dimensions. If the step length is not a
+ complex number, then the stop is not inclusive.
+
+ However, if the step length is a COMPLEX NUMBER (e.g. 5j), then the
+ integer part of it's magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value IS INCLUSIVE.
+
+ If instantiated with an argument of 1, the mesh-grid is open or not
+ fleshed out so that only one-dimension of each returned argument is
+ greater than 1
+
+ Example:
+
+ >>> mgrid = nd_grid()
+ >>> mgrid[0:5,0:5]
+ array([[[0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 2],
+ [3, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4]],
+ [[0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4]]])
+ >>> mgrid[-1:1:5j]
+ array([-1. , -0.5, 0. , 0.5, 1. ])
+
+ >>> ogrid = nd_grid(1)
+ >>> ogrid[0:5,0:5]
+ [array([[0],[1],[2],[3],[4]]), array([[0, 1, 2, 3, 4]])]
+ """
+ def __init__(self, sparse=False):
+ self.sparse = sparse
+ def __getitem__(self,key):
+ try:
+ size = []
+ typecode = _nx.Int
+ for k in range(len(key)):
+ step = key[k].step
+ start = key[k].start
+ if start is None: start=0
+ if step is None: step=1
+ if type(step) is type(1j):
+ size.append(int(abs(step)))
+ typecode = _nx.Float
+ else:
+ size.append(int((key[k].stop - start)/(step*1.0)))
+ if isinstance(step,types.FloatType) or \
+ isinstance(start, types.FloatType) or \
+ isinstance(key[k].stop, types.FloatType):
+ typecode = _nx.Float
+ if self.sparse:
+ nn = map(lambda x,t: _nx.arange(x,dtype=t),size,(typecode,)*len(size))
+ else:
+ nn = _nx.indices(size,typecode)
+ for k in range(len(size)):
+ step = key[k].step
+ start = key[k].start
+ if start is None: start=0
+ if step is None: step=1
+ if type(step) is type(1j):
+ step = int(abs(step))
+ step = (key[k].stop - start)/float(step-1)
+ nn[k] = (nn[k]*step+start)
+ if self.sparse:
+ slobj = [_nx.NewAxis]*len(size)
+ for k in range(len(size)):
+ slobj[k] = slice(None,None)
+ nn[k] = nn[k][slobj]
+ slobj[k] = _nx.NewAxis
+ return nn
+ except (IndexError, TypeError):
+ step = key.step
+ stop = key.stop
+ start = key.start
+ if start is None: start = 0
+ if type(step) is type(1j):
+ step = abs(step)
+ length = int(step)
+ step = (key.stop-start)/float(step-1)
+ stop = key.stop+step
+ return _nx.arange(0,length,1,_nx.Float)*step + start
+ else:
+ return _nx.arange(start, stop, step)
+
+ def __getslice__(self,i,j):
+ return _nx.arange(i,j)
+
+ def __len__(self):
+ return 0
+
+mgrid = nd_grid()
+ogrid = nd_grid(1)
+
+class concatenator(object):
+ """ Translates slice objects to concatenation along an axis.
+ """
+ def _retval(self, res):
+ if self.matrix:
+ oldndim = res.ndim
+ res = makemat(res)
+ if oldndim == 1 and self.col:
+ res = res.T
+ self.axis=self._axis
+ self.matrix=self._matrix
+ self.col=0
+ return res
+
+ def __init__(self, axis=0, matrix=False):
+ self._axis = axis
+ self._matrix = matrix
+ self.axis = axis
+ self.matrix = matrix
+ self.col = 0
+
+ def __getitem__(self,key):
+ if isinstance(key,types.StringType):
+ frame = sys._getframe().f_back
+ mymat = matrix.bmat(key,frame.f_globals,frame.f_locals)
+ return mymat
+ if type(key) is not types.TupleType:
+ key = (key,)
+ objs = []
+ for k in range(len(key)):
+ if type(key[k]) is types.SliceType:
+ step = key[k].step
+ start = key[k].start
+ stop = key[k].stop
+ if start is None: start = 0
+ if step is None:
+ step = 1
+ if type(step) is type(1j):
+ size = int(abs(step))
+ newobj = function_base.linspace(start, stop, num=size)
+ else:
+ newobj = _nx.arange(start, stop, step)
+ elif type(key[k]) is types.StringType:
+ if (key[k] in 'rc'):
+ self.matrix = True
+ self.col = (key[k] == 'c')
+ continue
+ try:
+ self.axis = int(key[k])
+ continue
+ except:
+ raise ValueError, "Unknown special directive."
+ elif type(key[k]) in ScalarType:
+ newobj = asarray([key[k]])
+ else:
+ newobj = key[k]
+ objs.append(newobj)
+ res = _nx.concatenate(tuple(objs),axis=self.axis)
+ return self._retval(res)
+
+ def __getslice__(self,i,j):
+ res = _nx.arange(i,j)
+ return self._retval(res)
+
+ def __len__(self):
+ return 0
+
+r_=concatenator(0)
+c_=concatenator(-1)
+#row = concatenator(0,1)
+#col = concatenator(-1,1)
+
+
+# A simple nd index iterator over an array:
+
+class ndenumerate(object):
+ def __init__(self, arr):
+ arr = asarray(arr)
+ self.iter = enumerate(arr.flat)
+ self.ashape = arr.shape
+ self.nd = arr.ndim
+ self.factors = [None]*(self.nd-1)
+ val = self.ashape[-1]
+ for i in range(self.nd-1,0,-1):
+ self.factors[i-1] = val
+ val *= self.ashape[i-1]
+
+ def next(self):
+ res = self.iter.next()
+ indxs = [None]*self.nd
+ val = res[0]
+ for i in range(self.nd-1):
+ indxs[i] = val / self.factors[i]
+ val = val % self.factors[i]
+ indxs[self.nd-1] = val
+ return tuple(indxs), res[1]
+
+ def __iter__(self):
+ return self
+
+
+
+# A nicer way to build up index tuples for arrays.
+#
+# You can do all this with slice() plus a few special objects,
+# but there's a lot to remember. This version is simpler because
+# it uses the standard array indexing syntax.
+#
+# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
+# last revision: 1999-7-23
+#
+# Cosmetic changes by T. Oliphant 2001
+#
+#
+# This module provides a convenient method for constructing
+# array indices algorithmically. It provides one importable object,
+# 'index_expression'.
+#
+# For any index combination, including slicing and axis insertion,
+# 'a[indices]' is the same as 'a[index_expression[indices]]' for any
+# array 'a'. However, 'index_expression[indices]' can be used anywhere
+# in Python code and returns a tuple of slice objects that can be
+# used in the construction of complex index expressions.
+
+class _index_expression_class(object):
+ maxint = sys.maxint
+
+ def __getitem__(self, item):
+ if type(item) != type(()):
+ return (item,)
+ else:
+ return item
+
+ def __len__(self):
+ return self.maxint
+
+ def __getslice__(self, start, stop):
+ if stop == self.maxint:
+ stop = None
+ return self[start:stop:None]
+
+index_exp = _index_expression_class()
+
+# End contribution from Konrad.
+
diff --git a/numpy/base/info.py b/numpy/base/info.py
new file mode 100644
index 000000000..b4c4b4e8c
--- /dev/null
+++ b/numpy/base/info.py
@@ -0,0 +1,208 @@
+__doc__ = """Defines a multi-dimensional array and useful procedures for Numerical computation.
+
+Functions
+
+- array - NumPy Array construction
+- zeros - Return an array of all zeros
+- empty - Return an unitialized array
+- shape - Return shape of sequence or array
+- rank - Return number of dimensions
+- size - Return number of elements in entire array or a
+ certain dimension
+- fromstring - Construct array from (byte) string
+- take - Select sub-arrays using sequence of indices
+- put - Set sub-arrays using sequence of 1-D indices
+- putmask - Set portion of arrays using a mask
+- reshape - Return array with new shape
+- repeat - Repeat elements of array
+- choose - Construct new array from indexed array tuple
+- cross_correlate - Correlate two 1-d arrays
+- searchsorted - Search for element in 1-d array
+- sum - Total sum over a specified dimension
+- average - Average, possibly weighted, over axis or array.
+- cumsum - Cumulative sum over a specified dimension
+- product - Total product over a specified dimension
+- cumproduct - Cumulative product over a specified dimension
+- alltrue - Logical and over an entire axis
+- sometrue - Logical or over an entire axis
+- allclose - Tests if sequences are essentially equal
+
+More Functions:
+
+- arrayrange (arange) - Return regularly spaced array
+- asarray - Guarantee NumPy array
+- sarray - Guarantee a NumPy array that keeps precision
+- convolve - Convolve two 1-d arrays
+- swapaxes - Exchange axes
+- concatenate - Join arrays together
+- transpose - Permute axes
+- sort - Sort elements of array
+- argsort - Indices of sorted array
+- argmax - Index of largest value
+- argmin - Index of smallest value
+- innerproduct - Innerproduct of two arrays
+- dot - Dot product (matrix multiplication)
+- outerproduct - Outerproduct of two arrays
+- resize - Return array with arbitrary new shape
+- indices - Tuple of indices
+- fromfunction - Construct array from universal function
+- diagonal - Return diagonal array
+- trace - Trace of array
+- dump - Dump array to file object (pickle)
+- dumps - Return pickled string representing data
+- load - Return array stored in file object
+- loads - Return array from pickled string
+- ravel - Return array as 1-D
+- nonzero - Indices of nonzero elements for 1-D array
+- shape - Shape of array
+- where - Construct array from binary result
+- compress - Elements of array where condition is true
+- clip - Clip array between two values
+- ones - Array of all ones
+- identity - 2-D identity array (matrix)
+
+(Universal) Math Functions
+
+ add logical_or exp
+ subtract logical_xor log
+ multiply logical_not log10
+ divide maximum sin
+ divide_safe minimum sinh
+ conjugate bitwise_and sqrt
+ power bitwise_or tan
+ absolute bitwise_xor tanh
+ negative invert ceil
+ greater left_shift fabs
+ greater_equal right_shift floor
+ less arccos arctan2
+ less_equal arcsin fmod
+ equal arctan hypot
+ not_equal cos around
+ logical_and cosh sign
+ arccosh arcsinh arctanh
+
+"""
+__doc__ += \
+""" Basic functions used by several sub-packages and useful to have in the
+main name-space
+
+Type handling
+==============
+iscomplexobj -- Test for complex object, scalar result
+isrealobj -- Test for real object, scalar result
+iscomplex -- Test for complex elements, array result
+isreal -- Test for real elements, array result
+imag -- Imaginary part
+real -- Real part
+real_if_close -- Turns complex number with tiny imaginary part to real
+isneginf -- Tests for negative infinity ---|
+isposinf -- Tests for positive infinity |
+isnan -- Tests for nans |---- array results
+isinf -- Tests for infinity |
+isfinite -- Tests for finite numbers ---|
+isscalar -- True if argument is a scalar
+nan_to_num -- Replaces NaN's with 0 and infinities with large numbers
+cast -- Dictionary of functions to force cast to each type
+common_type -- Determine the 'minimum common type code' for a group
+ of arrays
+mintypecode -- Return minimal allowed common typecode.
+
+Index tricks
+==================
+mgrid -- Method which allows easy construction of N-d 'mesh-grids'
+r_ -- Append and construct arrays: turns slice objects into
+ ranges and concatenates them, for 2d arrays appends
+ rows.
+index_exp -- Konrad Hinsen's index_expression class instance which
+ can be useful for building complicated slicing syntax.
+
+Useful functions
+==================
+select -- Extension of where to multiple conditions and choices
+extract -- Extract 1d array from flattened array according to mask
+insert -- Insert 1d array of values into Nd array according to mask
+linspace -- Evenly spaced samples in linear space
+logspace -- Evenly spaced samples in logarithmic space
+fix -- Round x to nearest integer towards zero
+mod -- Modulo mod(x,y) = x % y except keeps sign of y
+amax -- Array maximum along axis
+amin -- Array minimum along axis
+ptp -- Array max-min along axis
+cumsum -- Cumulative sum along axis
+prod -- Product of elements along axis
+cumprod -- Cumluative product along axis
+diff -- Discrete differences along axis
+angle -- Returns angle of complex argument
+unwrap -- Unwrap phase along given axis (1-d algorithm)
+sort_complex -- Sort a complex-array (based on real, then imaginary)
+trim_zeros -- trim the leading and trailing zeros from 1D array.
+
+vectorize -- a class that wraps a Python function taking scalar
+ arguments into a generalized function which
+ can handle arrays of arguments using the broadcast
+ rules of numerix Python.
+
+alter_numeric -- enhance numeric array behavior
+restore_numeric -- restore alterations done by alter_numeric
+
+Shape manipulation
+===================
+squeeze -- Return a with length-one dimensions removed.
+atleast_1d -- Force arrays to be > 1D
+atleast_2d -- Force arrays to be > 2D
+atleast_3d -- Force arrays to be > 3D
+vstack -- Stack arrays vertically (row on row)
+hstack -- Stack arrays horizontally (column on column)
+column_stack -- Stack 1D arrays as columns into 2D array
+dstack -- Stack arrays depthwise (along third dimension)
+split -- Divide array into a list of sub-arrays
+hsplit -- Split into columns
+vsplit -- Split into rows
+dsplit -- Split along third dimension
+
+Matrix (2d array) manipluations
+===============================
+fliplr -- 2D array with columns flipped
+flipud -- 2D array with rows flipped
+rot90 -- Rotate a 2D array a multiple of 90 degrees
+eye -- Return a 2D array with ones down a given diagonal
+diag -- Construct a 2D array from a vector, or return a given
+ diagonal from a 2D array.
+mat -- Construct a Matrix
+bmat -- Build a Matrix from blocks
+
+Polynomials
+============
+poly1d -- A one-dimensional polynomial class
+
+poly -- Return polynomial coefficients from roots
+roots -- Find roots of polynomial given coefficients
+polyint -- Integrate polynomial
+polyder -- Differentiate polynomial
+polyadd -- Add polynomials
+polysub -- Substract polynomials
+polymul -- Multiply polynomials
+polydiv -- Divide polynomials
+polyval -- Evaluate polynomial at given argument
+
+Import tricks
+=============
+ppimport -- Postpone module import until trying to use it
+ppimport_attr -- Postpone module import until trying to use its
+ attribute
+ppresolve -- Import postponed module and return it.
+
+Machine arithmetics
+===================
+machar_single -- MachAr instance storing the parameters of system
+ single precision floating point arithmetics
+machar_double -- MachAr instance storing the parameters of system
+ double precision floating point arithmetics
+
+Threading tricks
+================
+ParallelExec -- Execute commands in parallel thread.
+"""
+
+depends = ['testing']
+global_symbols = ['*']
diff --git a/numpy/base/ma.py b/numpy/base/ma.py
new file mode 100644
index 000000000..245351349
--- /dev/null
+++ b/numpy/base/ma.py
@@ -0,0 +1,2062 @@
+"""MA: a facility for dealing with missing observations
+MA is generally used as a scipy.array look-alike.
+by Paul F. Dubois.
+
+Copyright 1999, 2000, 2001 Regents of the University of California.
+Released for unlimited redistribution.
+Adapted for scipy_core 2005 by Travis Oliphant and
+(mainly) Paul Dubois.
+"""
+import string, types, sys
+
+import umath
+import oldnumeric
+import function_base
+from numeric import e, pi, newaxis, ndarray, inf
+from oldnumeric import typecodes, amax, amin
+from numerictypes import *
+import numeric
+
+
+MaskType=bool_
+divide_tolerance = 1.e-35
+
+class MAError (Exception):
+ def __init__ (self, args=None):
+ "Create an exception"
+ self.args = args
+ def __str__(self):
+ "Calculate the string representation"
+ return str(self.args)
+ __repr__ = __str__
+
+class _MaskedPrintOption:
+ "One instance of this class, masked_print_option, is created."
+ def __init__ (self, display):
+ "Create the masked print option object."
+ self.set_display(display)
+ self._enabled = 1
+
+ def display (self):
+ "Show what prints for masked values."
+ return self._display
+
+ def set_display (self, s):
+ "set_display(s) sets what prints for masked values."
+ self._display = s
+
+ def enabled (self):
+ "Is the use of the display value enabled?"
+ return self._enabled
+
+ def enable(self, flag=1):
+ "Set the enabling flag to flag."
+ self._enabled = flag
+
+ def __str__ (self):
+ return str(self._display)
+
+#if you single index into a masked location you get this object.
+masked_print_option = _MaskedPrintOption('--')
+
+# Use single element arrays or scalars.
+default_real_fill_value = 1.e20
+default_complex_fill_value = 1.e20 + 0.0j
+default_character_fill_value = '-'
+default_integer_fill_value = 999999
+default_object_fill_value = '?'
+
+def default_fill_value (obj):
+ "Function to calculate default fill value for an object."
+ if isinstance(obj, types.FloatType):
+ return default_real_fill_value
+ elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType):
+ return default_integer_fill_value
+ elif isinstance(obj, types.StringType):
+ return default_character_fill_value
+ elif isinstance(obj, types.ComplexType):
+ return default_complex_fill_value
+ elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray):
+ x = obj.dtypechar
+ if x in typecodes['Float']:
+ return default_real_fill_value
+ if x in typecodes['Integer']:
+ return default_integer_fill_value
+ if x in typecodes['Complex']:
+ return default_complex_fill_value
+ if x in typecodes['Character']:
+ return default_character_fill_value
+ if x in typecodes['UnsignedInteger']:
+ return umath.absolute(default_integer_fill_value)
+ return default_object_fill_value
+ else:
+ return default_object_fill_value
+
+def minimum_fill_value (obj):
+ "Function to calculate default fill value suitable for taking minima."
+ if isinstance(obj, types.FloatType):
+ return numeric.inf
+ elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType):
+ return sys.maxint
+ elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray):
+ x = obj.dtypechar
+ if x in typecodes['Float']:
+ return numeric.inf
+ if x in typecodes['Integer']:
+ return sys.maxint
+ if x in typecodes['UnsignedInteger']:
+ return sys.maxint
+ else:
+ raise TypeError, 'Unsuitable type for calculating minimum.'
+
+def maximum_fill_value (obj):
+ "Function to calculate default fill value suitable for taking maxima."
+ if isinstance(obj, types.FloatType):
+ return -inf
+ elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType):
+ return -sys.maxint
+ elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray):
+ x = obj.dtypechar
+ if x in typecodes['Float']:
+ return -inf
+ if x in typecodes['Integer']:
+ return -sys.maxint
+ if x in typecodes['UnsignedInteger']:
+ return 0
+ else:
+ raise TypeError, 'Unsuitable type for calculating maximum.'
+
+def set_fill_value (a, fill_value):
+ "Set fill value of a if it is a masked array."
+ if isMaskedArray(a):
+ a.set_fill_value (fill_value)
+
+def getmask (a):
+ """Mask of values in a; could be None.
+ Returns None if a is not a masked array.
+ To get an array for sure use getmaskarray."""
+ if isinstance(a, MaskedArray):
+ return a.raw_mask()
+ else:
+ return None
+
+def getmaskarray (a):
+ """Mask of values in a; an array of zeros if mask is None
+ or not a masked array, and is a byte-sized integer.
+ Do not try to add up entries, for example.
+ """
+ m = getmask(a)
+ if m is None:
+ return make_mask_none(shape(a))
+ else:
+ return m
+
+def is_mask (m):
+ """Is m a legal mask? Does not check contents, only type.
+ """
+ if m is None or (isinstance(m, ndarray) and \
+ m.dtype is MaskType):
+ return 1
+ else:
+ return 0
+
+def make_mask (m, copy=0, flag=0):
+ """make_mask(m, copy=0, flag=0)
+ return m as a mask, creating a copy if necessary or requested.
+ Can accept any sequence of integers or None. Does not check
+ that contents must be 0s and 1s.
+ if flag, return None if m contains no true elements.
+ """
+ if m is None:
+ return None
+ elif isinstance(m, ndarray):
+ if m.dtype is MaskType:
+ if copy:
+ result = numeric.array(m, dtype=MaskType, copy=copy)
+ else:
+ result = m
+ else:
+ result = m.astype(MaskType)
+ else:
+ result = filled(m,True).astype(MaskType)
+
+ if flag and not oldnumeric.sometrue(oldnumeric.ravel(result)):
+ return None
+ else:
+ return result
+
+def make_mask_none (s):
+ "Return a mask of all zeros of shape s."
+ result = numeric.zeros(s, dtype=MaskType)
+ result.shape = s
+ return result
+
+def mask_or (m1, m2):
+ """Logical or of the mask candidates m1 and m2, treating None as false.
+ Result may equal m1 or m2 if the other is None.
+ """
+ if m1 is None: return make_mask(m2)
+ if m2 is None: return make_mask(m1)
+ if m1 is m2 and is_mask(m1): return m1
+ return make_mask(umath.logical_or(m1, m2))
+
+def filled (a, value = None):
+ """a as a contiguous numeric array with any masked areas replaced by value
+ if value is None or the special element "masked", get_fill_value(a)
+ is used instead.
+
+ If a is already a contiguous numeric array, a itself is returned.
+
+ filled(a) can be used to be sure that the result is numeric when
+ passing an object a to other software ignorant of MA, in particular to
+ numeric itself.
+ """
+ if isinstance(a, MaskedArray):
+ return a.filled(value)
+ elif isinstance(a, ndarray) and a.flags['CONTIGUOUS']:
+ return a
+ elif isinstance(a, types.DictType):
+ return numeric.array(a, 'O')
+ else:
+ return numeric.array(a)
+
+def get_fill_value (a):
+ """
+ The fill value of a, if it has one; otherwise, the default fill value
+ for that type.
+ """
+ if isMaskedArray(a):
+ result = a.fill_value()
+ else:
+ result = default_fill_value(a)
+ return result
+
+def common_fill_value (a, b):
+ "The common fill_value of a and b, if there is one, or None"
+ t1 = get_fill_value(a)
+ t2 = get_fill_value(b)
+ if t1 == t2: return t1
+ return None
+
+# Domain functions return 1 where the argument(s) are not in the domain.
+class domain_check_interval:
+ "domain_check_interval(a,b)(x) = true where x < a or y > b"
+ def __init__(self, y1, y2):
+ "domain_check_interval(a,b)(x) = true where x < a or y > b"
+ self.y1 = y1
+ self.y2 = y2
+
+ def __call__ (self, x):
+ "Execute the call behavior."
+ return umath.logical_or(umath.greater (x, self.y2),
+ umath.less(x, self.y1)
+ )
+
+class domain_tan:
+ "domain_tan(eps) = true where abs(cos(x)) < eps)"
+ def __init__(self, eps):
+ "domain_tan(eps) = true where abs(cos(x)) < eps)"
+ self.eps = eps
+
+ def __call__ (self, x):
+ "Execute the call behavior."
+ return umath.less(umath.absolute(umath.cos(x)), self.eps)
+
+class domain_greater:
+ "domain_greater(v)(x) = true where x <= v"
+ def __init__(self, critical_value):
+ "domain_greater(v)(x) = true where x <= v"
+ self.critical_value = critical_value
+
+ def __call__ (self, x):
+ "Execute the call behavior."
+ return umath.less_equal (x, self.critical_value)
+
+class domain_greater_equal:
+ "domain_greater_equal(v)(x) = true where x < v"
+ def __init__(self, critical_value):
+ "domain_greater_equal(v)(x) = true where x < v"
+ self.critical_value = critical_value
+
+ def __call__ (self, x):
+ "Execute the call behavior."
+ return umath.less (x, self.critical_value)
+
+class masked_unary_operation:
+ def __init__ (self, aufunc, fill=0, domain=None):
+ """ masked_unary_operation(aufunc, fill=0, domain=None)
+ aufunc(fill) must be defined
+ self(x) returns aufunc(x)
+ with masked values where domain(x) is true or getmask(x) is true.
+ """
+ self.f = aufunc
+ self.fill = fill
+ self.domain = domain
+ self.__doc__ = getattr(aufunc, "__doc__", str(aufunc))
+
+ def __call__ (self, a, *args, **kwargs):
+ "Execute the call behavior."
+# numeric tries to return scalars rather than arrays when given scalars.
+ m = getmask(a)
+ d1 = filled(a, self.fill)
+ if self.domain is not None:
+ m = mask_or(m, self.domain(d1))
+ if m is None:
+ result = self.f(d1, *args, **kwargs)
+ if type(result) is ndarray:
+ return masked_array (result)
+ else:
+ return result
+ else:
+ dx = masked_array(d1, m)
+ result = self.f(filled(dx, self.fill), *args, **kwargs)
+ if type(result) is ndarray:
+ return masked_array(result, m)
+ elif m[...]:
+ return masked
+ else:
+ return result
+
+ def __str__ (self):
+ return "Masked version of " + str(self.f)
+
+
+class domain_safe_divide:
+ def __init__ (self, tolerance=divide_tolerance):
+ self.tolerance = tolerance
+ def __call__ (self, a, b):
+ return umath.absolute(a) * self.tolerance >= umath.absolute(b)
+
+class domained_binary_operation:
+ """Binary operations that have a domain, like divide. These are complicated so they
+ are a separate class. They have no reduce, outer or accumulate.
+ """
+ def __init__ (self, abfunc, domain, fillx=0, filly=0):
+ """abfunc(fillx, filly) must be defined.
+ abfunc(x, filly) = x for all x to enable reduce.
+ """
+ self.f = abfunc
+ self.domain = domain
+ self.fillx = fillx
+ self.filly = filly
+ self.__doc__ = getattr(abfunc, "__doc__", str(abfunc))
+
+ def __call__(self, a, b):
+ "Execute the call behavior."
+ ma = getmask(a)
+ mb = getmask(b)
+ d1 = filled(a, self.fillx)
+ d2 = filled(b, self.filly)
+ t = self.domain(d1, d2)
+
+ if oldnumeric.sometrue(t, None):
+ d2 = where(t, self.filly, d2)
+ mb = mask_or(mb, t)
+ m = mask_or(ma, mb)
+ if m is None:
+ result = self.f(d1, d2)
+ if type(result) is ndarray:
+ return masked_array(result)
+ else:
+ return result
+ result = self.f(d1, d2)
+ if type(result) is ndarray:
+ if m.shape != result.shape:
+ m = mask_or(getmaskarray(a), getmaskarray(b))
+ return masked_array(result, m)
+ elif m[...]:
+ return masked
+ else:
+ return result
+ def __str__ (self):
+ return "Masked version of " + str(self.f)
+
+class masked_binary_operation:
+ def __init__ (self, abfunc, fillx=0, filly=0):
+ """abfunc(fillx, filly) must be defined.
+ abfunc(x, filly) = x for all x to enable reduce.
+ """
+ self.f = abfunc
+ self.fillx = fillx
+ self.filly = filly
+ self.__doc__ = getattr(abfunc, "__doc__", str(abfunc))
+
+ def __call__ (self, a, b, *args, **kwargs):
+ "Execute the call behavior."
+ m = mask_or(getmask(a), getmask(b))
+ if m is None:
+ d1 = filled(a, self.fillx)
+ d2 = filled(b, self.filly)
+ result = self.f(d1, d2, *args, **kwargs)
+ if type(result) is ndarray:
+ return masked_array(result)
+ else:
+ return result
+ d1 = filled(a, self.fillx)
+ d2 = filled(b, self.filly)
+ result = self.f(d1, d2, *args, **kwargs)
+ if type(result) is ndarray:
+ if m.shape != result.shape:
+ m = mask_or(getmaskarray(a), getmaskarray(b))
+ return masked_array(result, m)
+ elif m[...]:
+ return masked
+ else:
+ return result
+
+ def reduce (self, target, axis=0):
+ """Reduce target along the given axis with this function."""
+ m = getmask(target)
+ t = filled(target, self.filly)
+ if t.shape == ():
+ t = t.reshape(1)
+ if m is not None:
+ m = make_mask(m, copy=1)
+ m.shape = (1,)
+ if m is None:
+ return masked_array (self.f.reduce (t, axis))
+ else:
+ t = masked_array (t, m)
+ t = self.f.reduce(filled(t, self.filly), axis)
+ m = umath.logical_and.reduce(m, axis)
+ if isinstance(t, ndarray):
+ return masked_array(t, m, get_fill_value(target))
+ elif m:
+ return masked
+ else:
+ return t
+
+ def outer (self, a, b):
+ "Return the function applied to the outer product of a and b."
+ ma = getmask(a)
+ mb = getmask(b)
+ if ma is None and mb is None:
+ m = None
+ else:
+ ma = getmaskarray(a)
+ mb = getmaskarray(b)
+ m = logical_or.outer(ma, mb)
+ d = self.f.outer(filled(a, self.fillx), filled(b, self.filly))
+ return masked_array(d, m)
+
+ def accumulate (self, target, axis=0):
+ """Accumulate target along axis after filling with y fill value."""
+ t = filled(target, self.filly)
+ return masked_array (self.f.accumulate (t, axis))
+ def __str__ (self):
+ return "Masked version of " + str(self.f)
+
+sqrt = masked_unary_operation(umath.sqrt, 0.0, domain_greater_equal(0.0))
+log = masked_unary_operation(umath.log, 1.0, domain_greater(0.0))
+log10 = masked_unary_operation(umath.log10, 1.0, domain_greater(0.0))
+exp = masked_unary_operation(umath.exp)
+conjugate = masked_unary_operation(umath.conjugate)
+sin = masked_unary_operation(umath.sin)
+cos = masked_unary_operation(umath.cos)
+tan = masked_unary_operation(umath.tan, 0.0, domain_tan(1.e-35))
+arcsin = masked_unary_operation(umath.arcsin, 0.0, domain_check_interval(-1.0, 1.0))
+arccos = masked_unary_operation(umath.arccos, 0.0, domain_check_interval(-1.0, 1.0))
+arctan = masked_unary_operation(umath.arctan)
+# Missing from numeric
+arcsinh = masked_unary_operation(umath.arcsinh)
+arccosh = masked_unary_operation(umath.arccosh)
+arctanh = masked_unary_operation(umath.arctanh)
+sinh = masked_unary_operation(umath.sinh)
+cosh = masked_unary_operation(umath.cosh)
+tanh = masked_unary_operation(umath.tanh)
+absolute = masked_unary_operation(umath.absolute)
+fabs = masked_unary_operation(umath.fabs)
+negative = masked_unary_operation(umath.negative)
+nonzero = masked_unary_operation(oldnumeric.nonzero)
+around = masked_unary_operation(function_base.round_)
+floor = masked_unary_operation(umath.floor)
+ceil = masked_unary_operation(umath.ceil)
+sometrue = masked_unary_operation(oldnumeric.sometrue)
+alltrue = masked_unary_operation(oldnumeric.alltrue, 1)
+logical_not = masked_unary_operation(umath.logical_not)
+
+add = masked_binary_operation(umath.add)
+subtract = masked_binary_operation(umath.subtract)
+subtract.reduce = None
+multiply = masked_binary_operation(umath.multiply, 1, 1)
+divide = domained_binary_operation(umath.divide, domain_safe_divide(), 0, 1)
+true_divide = domained_binary_operation(umath.true_divide, domain_safe_divide(), 0, 1)
+floor_divide = domained_binary_operation(umath.floor_divide, domain_safe_divide(), 0, 1)
+remainder = domained_binary_operation(umath.remainder, domain_safe_divide(), 0, 1)
+fmod = domained_binary_operation(umath.fmod, domain_safe_divide(), 0, 1)
+hypot = masked_binary_operation(umath.hypot)
+arctan2 = masked_binary_operation(umath.arctan2, 0.0, 1.0)
+arctan2.reduce = None
+equal = masked_binary_operation(umath.equal)
+equal.reduce = None
+not_equal = masked_binary_operation(umath.not_equal)
+not_equal.reduce = None
+less_equal = masked_binary_operation(umath.less_equal)
+less_equal.reduce = None
+greater_equal = masked_binary_operation(umath.greater_equal)
+greater_equal.reduce = None
+less = masked_binary_operation(umath.less)
+less.reduce = None
+greater = masked_binary_operation(umath.greater)
+greater.reduce = None
+logical_and = masked_binary_operation(umath.logical_and)
+logical_or = masked_binary_operation(umath.logical_or)
+logical_xor = masked_binary_operation(umath.logical_xor)
+bitwise_and = masked_binary_operation(umath.bitwise_and)
+bitwise_or = masked_binary_operation(umath.bitwise_or)
+bitwise_xor = masked_binary_operation(umath.bitwise_xor)
+
+def rank (object):
+ return oldnumeric.rank(filled(object))
+
+def shape (object):
+ return oldnumeric.shape(filled(object))
+
+def size (object, axis=None):
+ return oldnumeric.size(filled(object), axis)
+
+class MaskedArray (object):
+ """Arrays with possibly masked values.
+ Masked values of 1 exclude the corresponding element from
+ any computation.
+
+ Construction:
+ x = array(data, dtype=None, copy=True, fortran=False,
+ mask = None, fill_value=None)
+
+ If copy=False, every effort is made not to copy the data:
+ If data is a MaskedArray, and argument mask=None,
+ then the candidate data is data.data and the
+ mask used is data.mask. If data is a numeric array,
+ it is used as the candidate raw data.
+ If dtypechar is not None and
+ is != data.dtypechar then a data copy is required.
+ Otherwise, the candidate is used.
+
+ If a data copy is required, raw data stored is the result of:
+ numeric.array(data, dtype=dtypechar, copy=copy)
+
+ If mask is None there are no masked values. Otherwise mask must
+ be convertible to an array of booleans with the same shape as x.
+
+ fill_value is used to fill in masked values when necessary,
+ such as when printing and in method/function filled().
+ The fill_value is not used for computation within this module.
+ """
+ def __init__(self, data, dtype=None, copy=True, fortran=False,
+ mask=None, fill_value=None):
+ """array(data, dtype=None, copy=True, fortran=False, mask=None, fill_value=None)
+ If data already a numeric array, its dtype becomes the default value of dtype.
+ """
+ tc = dtype
+ need_data_copied = copy
+ if isinstance(data, MaskedArray):
+ c = data.data
+ ctc = c.dtypechar
+ if tc is None:
+ tc = ctc
+ elif dtype2char(tc) != ctc:
+ need_data_copied = True
+ if mask is None:
+ mask = data.mask
+ elif mask is not None: #attempting to change the mask
+ need_data_copied = True
+
+ elif isinstance(data, ndarray):
+ c = data
+ ctc = c.dtypechar
+ if tc is None:
+ tc = ctc
+ elif dtype2char(tc) != ctc:
+ need_data_copied = True
+ else:
+ need_data_copied = False #because I'll do it now
+ c = numeric.array(data, dtype=tc, copy=True, fortran=fortran)
+
+ if need_data_copied:
+ if tc == ctc:
+ self._data = numeric.array(c, dtype=tc, copy=True, fortran=fortran)
+ else:
+ self._data = c.astype(tc)
+ else:
+ self._data = c
+
+ if mask is None:
+ self._mask = None
+ self._shared_mask = 0
+ else:
+ self._mask = make_mask (mask)
+ if self._mask is None:
+ self._shared_mask = 0
+ else:
+ self._shared_mask = (self._mask is mask)
+ nm = size(self._mask)
+ nd = size(self._data)
+ if nm != nd:
+ if nm == 1:
+ self._mask = oldnumeric.resize(self._mask, self._data.shape)
+ self._shared_mask = 0
+ elif nd == 1:
+ self._data = oldnumeric.resize(self._data, self._mask.shape)
+ self._data.shape = self._mask.shape
+ else:
+ raise MAError, "Mask and data not compatible."
+ elif nm == 1 and shape(self._mask) != shape(self._data):
+ self.unshare_mask()
+ self._mask.shape = self._data.shape
+
+ self.set_fill_value(fill_value)
+
+ def __array__ (self, t = None):
+ "Special hook for numeric. Converts to numeric if possible."
+ if self._mask is not None:
+ if oldnumeric.ravel(self._mask).any():
+ raise MAError, \
+ """Cannot automatically convert masked array to numeric because data
+ is masked in one or more locations.
+ """
+ else: # Mask is all false
+ # Optimize to avoid future invocations of this section.
+ self._mask = None
+ self._shared_mask = 0
+ if t:
+ return self._data.astype(t)
+ else:
+ return self._data
+
+ def _get_shape(self):
+ "Return the current shape."
+ return self._data.shape
+
+ def _set_shape (self, newshape):
+ "Set the array's shape."
+ self._data.shape = newshape
+ if self._mask is not None:
+ self._mask = self._mask.copy()
+ self._mask.shape = newshape
+
+ def _get_flat(self):
+ """Calculate the flat value.
+ """
+ if self._mask is None:
+ return masked_array(self._data.ravel(), mask=None,
+ fill_value = self.fill_value())
+ else:
+ return masked_array(self._data.ravel(),
+ mask=self._mask.ravel(),
+ fill_value = self.fill_value())
+
+ def _set_flat (self, value):
+ "x.flat = value"
+ y = self.ravel()
+ y[:] = value
+
+ def _get_real(self):
+ "Get the real part of a complex array."
+ if self._mask is None:
+ return masked_array(self._data.real, mask=None,
+ fill_value = self.fill_value())
+ else:
+ return masked_array(self._data.real, mask=self._mask.ravel(),
+ fill_value = self.fill_value())
+
+ def _set_real (self, value):
+ "x.real = value"
+ y = self.real
+ y[...] = value
+
+ def _get_imaginary(self):
+ "Get the imaginary part of a complex array."
+ if self._mask is None:
+ return masked_array(self._data.imag, mask=None,
+ fill_value = self.fill_value())
+ else:
+ return masked_array(self._data.imag, mask=self._mask.ravel(),
+ fill_value = self.fill_value())
+
+ def _set_imaginary (self, value):
+ "x.imaginary = value"
+ y = self.imaginary
+ y[...] = value
+
+ def __str__(self):
+ """Calculate the str representation, using masked for fill if
+ it is enabled. Otherwise fill with fill value.
+ """
+ if masked_print_option.enabled():
+ f = masked_print_option
+ else:
+ f = self.fill_value()
+ res = self.filled(f)
+ return str(res)
+
+ def __repr__(self):
+ """Calculate the repr representation, using masked for fill if
+ it is enabled. Otherwise fill with fill value.
+ """
+ with_mask = """\
+array(data =
+ %(data)s,
+ mask =
+ %(mask)s,
+ fill_value=%(fill)s)
+"""
+ with_mask1 = """\
+array(data = %(data)s,
+ mask = %(mask)s,
+ fill_value=%(fill)s)
+"""
+ without_mask = """array(
+ %(data)s)"""
+ without_mask1 = """array(%(data)s)"""
+
+ n = len(self.shape)
+ if self._mask is None:
+ if n <=1:
+ return without_mask1 % {'data':str(self.filled())}
+ return without_mask % {'data':str(self.filled())}
+ else:
+ if n <=1:
+ return with_mask % {
+ 'data': str(self.filled()),
+ 'mask': str(self._mask),
+ 'fill': str(self.fill_value())
+ }
+ return with_mask % {
+ 'data': str(self.filled()),
+ 'mask': str(self._mask),
+ 'fill': str(self.fill_value())
+ }
+ without_mask1 = """array(%(data)s)"""
+ if self._mask is None:
+ return without_mask % {'data':str(self.filled())}
+ else:
+ return with_mask % {
+ 'data': str(self.filled()),
+ 'mask': str(self._mask),
+ 'fill': str(self.fill_value())
+ }
+
+ def __float__(self):
+ "Convert self to float."
+ self.unmask()
+ if self._mask is not None:
+ raise MAError, 'Cannot convert masked element to a Python float.'
+ return float(self.data.item())
+
+ def __int__(self):
+ "Convert self to int."
+ self.unmask()
+ if self._mask is not None:
+ raise MAError, 'Cannot convert masked element to a Python int.'
+ return int(self.data.item())
+
+ def __getitem__(self, i):
+ "Get item described by i. Not a copy as in previous versions."
+ self.unshare_mask()
+ m = self._mask
+ dout = self._data[i]
+ if m is None:
+ return dout
+ mi = m[i]
+ if mi.size == 1:
+ if mi:
+ return masked
+ else:
+ return dout
+ else:
+ return masked_array(dout, mi, fill_value=self._fill_value)
+
+ def __getslice__(self, i, j):
+ "Get slice described by i, j"
+ self.unshare_mask()
+ m = self._mask
+ dout = self._data[i:j]
+ if m is None:
+ return masked_array(dout, fill_value=self._fill_value)
+ else:
+ return masked_array(dout, mask = m[i:j], fill_value=self._fill_value)
+
+# --------
+# setitem and setslice notes
+# note that if value is masked, it means to mask those locations.
+# setting a value changes the mask to match the value in those locations.
+
+ def __setitem__(self, index, value):
+ "Set item described by index. If value is masked, mask those locations."
+ d = self._data
+ if self is masked:
+ raise MAError, 'Cannot alter the masked element.'
+ if value is masked:
+ if self._mask is None:
+ self._mask = make_mask_none(d.shape)
+ self._shared_mask = False
+ else:
+ self.unshare_mask()
+ self._mask[index] = True
+ return
+ m = getmask(value)
+ value = filled(value).astype(d.dtype)
+ d[index] = value
+ if m is None:
+ if self._mask is not None:
+ self.unshare_mask()
+ self._mask[index] = False
+ else:
+ if self._mask is None:
+ self._mask = make_mask_none(d.shape)
+ self._shared_mask = True
+ else:
+ self.unshare_mask()
+ self._mask[index] = m
+
+ def __setslice__(self, i, j, value):
+ "Set slice i:j; if value is masked, mask those locations."
+ d = self._data
+ if self is masked:
+ raise MAError, "Cannot alter the 'masked' object."
+ if value is masked:
+ if self._mask is None:
+ self._mask = make_mask_none(d.shape)
+ self._shared_mask = False
+ self._mask[i:j] = True
+ return
+ m = getmask(value)
+ value = filled(value).astype(d.dtype)
+ d[i:j] = value
+ if m is None:
+ if self._mask is not None:
+ self.unshare_mask()
+ self._mask[i:j] = False
+ else:
+ if self._mask is None:
+ self._mask = make_mask_none(self._data.shape)
+ self._shared_mask = False
+ self._mask[i:j] = m
+
+ def __len__ (self):
+ """Return length of first dimension. This is weird but Python's
+ slicing behavior depends on it."""
+ return len(self._data)
+
+ def __and__(self, other):
+ "Return bitwise_and"
+ return bitwise_and(self, other)
+
+ def __or__(self, other):
+ "Return bitwise_or"
+ return bitwise_or(self, other)
+
+ def __xor__(self, other):
+ "Return bitwise_xor"
+ return bitwise_xor(self, other)
+
+ __rand__ = __and__
+ __ror__ = __or__
+ __rxor__ = __xor__
+
+ def __abs__(self):
+ "Return absolute(self)"
+ return absolute(self)
+
+ def __neg__(self):
+ "Return negative(self)"
+ return negative(self)
+
+ def __pos__(self):
+ "Return array(self)"
+ return array(self)
+
+ def __add__(self, other):
+ "Return add(self, other)"
+ return add(self, other)
+
+ __radd__ = __add__
+
+ def __mod__ (self, other):
+ "Return remainder(self, other)"
+ return remainder(self, other)
+
+ def __rmod__ (self, other):
+ "Return remainder(other, self)"
+ return remainder(other, self)
+
+ def __lshift__ (self, n):
+ return left_shift(self, n)
+
+ def __rshift__ (self, n):
+ return right_shift(self, n)
+
+ def __sub__(self, other):
+ "Return subtract(self, other)"
+ return subtract(self, other)
+
+ def __rsub__(self, other):
+ "Return subtract(other, self)"
+ return subtract(other, self)
+
+ def __mul__(self, other):
+ "Return multiply(self, other)"
+ return multiply(self, other)
+
+ __rmul__ = __mul__
+
+ def __div__(self, other):
+ "Return divide(self, other)"
+ return divide(self, other)
+
+ def __rdiv__(self, other):
+ "Return divide(other, self)"
+ return divide(other, self)
+
+ def __truediv__(self, other):
+ "Return divide(self, other)"
+ return true_divide(self, other)
+
+ def __rtruediv__(self, other):
+ "Return divide(other, self)"
+ return true_divide(other, self)
+
+ def __floordiv__(self, other):
+ "Return divide(self, other)"
+ return floor_divide(self, other)
+
+ def __rfloordiv__(self, other):
+ "Return divide(other, self)"
+ return floor_divide(other, self)
+
+ def __pow__(self,other, third=None):
+ "Return power(self, other, third)"
+ return power(self, other, third)
+
+ def __sqrt__(self):
+ "Return sqrt(self)"
+ return sqrt(self)
+
+ def __iadd__(self, other):
+ "Add other to self in place."
+ t = self._data.dtypechar
+ f = filled(other,0)
+ t1 = f.dtypechar
+ if t == t1:
+ pass
+ elif t in typecodes['Integer']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ elif t in typecodes['Float']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ elif t1 in typecodes['Float']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ elif t in typecodes['Complex']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ elif t1 in typecodes['Float']:
+ f = f.astype(t)
+ elif t1 in typecodes['Complex']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+
+ if self._mask is None:
+ self._data += f
+ m = getmask(other)
+ self._mask = m
+ self._shared_mask = m is not None
+ else:
+ result = add(self, masked_array(f, mask=getmask(other)))
+ self._data = result.data
+ self._mask = result.mask
+ self._shared_mask = 1
+ return self
+
+ def __imul__(self, other):
+ "Add other to self in place."
+ t = self._data.dtypechar
+ f = filled(other,0)
+ t1 = f.dtypechar
+ if t == t1:
+ pass
+ elif t in typecodes['Integer']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ elif t in typecodes['Float']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ elif t1 in typecodes['Float']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ elif t in typecodes['Complex']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ elif t1 in typecodes['Float']:
+ f = f.astype(t)
+ elif t1 in typecodes['Complex']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+
+ if self._mask is None:
+ self._data *= f
+ m = getmask(other)
+ self._mask = m
+ self._shared_mask = m is not None
+ else:
+ result = multiply(self, masked_array(f, mask=getmask(other)))
+ self._data = result.data
+ self._mask = result.mask
+ self._shared_mask = 1
+ return self
+
+ def __isub__(self, other):
+ "Subtract other from self in place."
+ t = self._data.dtypechar
+ f = filled(other,0)
+ t1 = f.dtypechar
+ if t == t1:
+ pass
+ elif t in typecodes['Integer']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ elif t in typecodes['Float']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ elif t1 in typecodes['Float']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ elif t in typecodes['Complex']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ elif t1 in typecodes['Float']:
+ f = f.astype(t)
+ elif t1 in typecodes['Complex']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+
+ if self._mask is None:
+ self._data -= f
+ m = getmask(other)
+ self._mask = m
+ self._shared_mask = m is not None
+ else:
+ result = subtract(self, masked_array(f, mask=getmask(other)))
+ self._data = result.data
+ self._mask = result.mask
+ self._shared_mask = 1
+ return self
+
+
+
+ def __idiv__(self, other):
+ "Divide self by other in place."
+ t = self._data.dtypechar
+ f = filled(other,0)
+ t1 = f.dtypechar
+ if t == t1:
+ pass
+ elif t in typecodes['Integer']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ elif t in typecodes['Float']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ elif t1 in typecodes['Float']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ elif t in typecodes['Complex']:
+ if t1 in typecodes['Integer']:
+ f = f.astype(t)
+ elif t1 in typecodes['Float']:
+ f = f.astype(t)
+ elif t1 in typecodes['Complex']:
+ f = f.astype(t)
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ else:
+ raise TypeError, 'Incorrect type for in-place operation.'
+ mo = getmask(other)
+ result = divide(self, masked_array(f, mask=mo))
+ self._data = result.data
+ dm = result.raw_mask()
+ if dm is not self._mask:
+ self._mask = dm
+ self._shared_mask = 1
+ return self
+
+ def __eq__(self,other):
+ return equal(self,other)
+
+ def __ne__(self,other):
+ return not_equal(self,other)
+
+ def __lt__(self,other):
+ return less(self,other)
+
+ def __le__(self,other):
+ return less_equal(self,other)
+
+ def __gt__(self,other):
+ return greater(self,other)
+
+ def __ge__(self,other):
+ return greater_equal(self,other)
+
+ def astype (self, tc):
+ "return self as array of given type."
+ d = self._data.astype(tc)
+ return array(d, mask=self._mask)
+
+ def byte_swapped(self):
+ """Returns the raw data field, byte_swapped. Included for consistency
+ with numeric but doesn't make sense in this context.
+ """
+ return self._data.byte_swapped()
+
+ def compressed (self):
+ "A 1-D array of all the non-masked data."
+ d = oldnumeric.ravel(self._data)
+ if self._mask is None:
+ return array(d)
+ else:
+ m = 1 - oldnumeric.ravel(self._mask)
+ c = oldnumeric.compress(m, d)
+ return array(c, copy=0)
+
+ def count (self, axis = None):
+ "Count of the non-masked elements in a, or along a certain axis."
+ m = self._mask
+ s = self._data.shape
+ ls = len(s)
+ if m is None:
+ if ls == 0:
+ return 1
+ if ls == 1:
+ return s[0]
+ if axis is None:
+ return reduce(lambda x,y:x*y, s)
+ else:
+ n = s[axis]
+ t = list(s)
+ del t[axis]
+ return ones(t) * n
+ if axis is None:
+ w = oldnumeric.ravel(m).astype(int)
+ n1 = size(w)
+ if n1 == 1:
+ n2 = w[0]
+ else:
+ n2 = umath.add.reduce(w)
+ return n1 - n2
+ else:
+ n1 = size(m, axis)
+ n2 = sum(m.astype(int), axis)
+ return n1 - n2
+
+ def dot (self, other):
+ "s.dot(other) = innerproduct(s, other)"
+ return innerproduct(self, other)
+
+ def fill_value(self):
+ "Get the current fill value."
+ return self._fill_value
+
+ def filled (self, fill_value=None):
+ """A numeric array with masked values filled. If fill_value is None,
+ use self.fill_value().
+
+ If mask is None, copy data only if not contiguous.
+ Result is always a contiguous, numeric array.
+# Is contiguous really necessary now?
+ """
+ d = self._data
+ m = self._mask
+ if m is None:
+ if d.flags['CONTIGUOUS']:
+ return d
+ else:
+ return d.copy()
+ else:
+ if fill_value is None:
+ value = self._fill_value
+ else:
+ value = fill_value
+
+ if self is masked:
+ result = numeric.array(value).reshape(*d.shape)
+ else:
+ try:
+ result = numeric.array(d, dtype=d.dtype, copy=1)
+ result[m] = value
+ except:
+ #ok, can't put that value in here
+ value = numeric.array(value, dtype=object)
+ d = d.astype(object)
+ result = oldnumeric.choose(m, (d, value))
+ return result
+
+ def ids (self):
+ """Return the ids of the data and mask areas"""
+ return (id(self._data), id(self._mask))
+
+ def iscontiguous (self):
+ "Is the data contiguous?"
+ return self._data.flags['CONTIGUOUS']
+
+ def itemsize(self):
+ "Item size of each data item."
+ return self._data.itemsize
+
+
+ def outer(self, other):
+ "s.outer(other) = outerproduct(s, other)"
+ return outerproduct(self, other)
+
+ def put (self, values):
+ """Set the non-masked entries of self to filled(values).
+ No change to mask
+ """
+ iota = numeric.arange(self.size)
+ d = self._data
+ if self._mask is None:
+ ind = iota
+ else:
+ ind = oldnumeric.compress(1 - self._mask, iota)
+ d[ind] = filled(values).astype(d.dtype)
+
+ def putmask (self, values):
+ """Set the masked entries of self to filled(values).
+ Mask changed to None.
+ """
+ d = self._data
+ if self._mask is not None:
+ d[self._mask] = filled(values).astype(d.dtype)
+ self._shared_mask = 0
+ self._mask = None
+
+ def ravel (self):
+ """Return a 1-D view of self."""
+ if self._mask is None:
+ return masked_array(self._data.ravel())
+ else:
+ return masked_array(self._data.ravel(), self._mask.ravel())
+
+ def raw_data (self):
+ """ Obsolete; use data property instead.
+ The raw data; portions may be meaningless.
+ May be noncontiguous. Expert use only."""
+ return self._data
+ data = property(fget=raw_data,
+ doc="The data, but values at masked locations are meaningless.")
+
+ def raw_mask (self):
+ """ Obsolete; use mask property instead.
+ May be noncontiguous. Expert use only.
+ """
+ return self._mask
+ mask = property(fget=raw_mask,
+ doc="The mask, may be None. Values where mask true are meaningless.")
+
+ def reshape (self, *s):
+ """This array reshaped to shape s"""
+ d = self._data.reshape(*s)
+ if self._mask is None:
+ return masked_array(d)
+ else:
+ m = self._mask.reshape(*s)
+ return masked_array(d, m)
+
+ def set_fill_value (self, v=None):
+ "Set the fill value to v. Omit v to restore default."
+ if v is None:
+ v = default_fill_value (self.raw_data())
+ self._fill_value = v
+
+ def _get_size (self):
+ return self._data.size
+ size = property(fget=_get_size, doc="Number of elements in the array.")
+## CHECK THIS: signature of numeric.array.size?
+
+ def _get_dtypechar(self):
+ return self._data.dtypechar
+ dtypechar = property(fget=_get_dtypechar, doc="type character of the array.")
+
+ def _get_dtype(self):
+ return self._data.dtype
+ dtype = property(fget=_get_dtype, doc="type of the array elements.")
+
+ def item(self):
+ "Return Python scalar if possible."
+ if self._mask is not None:
+ m = oldnumeric.ravel(self._mask)
+ try:
+ if m[0]:
+ return masked
+ except IndexError:
+ return masked
+ return self._data.item()
+
+ def tolist(self, fill_value=None):
+ "Convert to list"
+ return self.filled(fill_value).tolist()
+
+ def tostring(self, fill_value=None):
+ "Convert to string"
+ return self.filled(fill_value).tostring()
+
+ def unmask (self):
+ "Replace the mask by None if possible."
+ if self._mask is None: return
+ m = make_mask(self._mask, flag=1)
+ if m is None:
+ self._mask = None
+ self._shared_mask = 0
+
+ def unshare_mask (self):
+ "If currently sharing mask, make a copy."
+ if self._shared_mask:
+ self._mask = make_mask (self._mask, copy=1, flag=0)
+ self._shared_mask = 0
+
+ shape = property(_get_shape, _set_shape,
+ doc = 'tuple giving the shape of the array')
+
+ flat = property(_get_flat, _set_flat,
+ doc = 'Access array in flat form.')
+
+ real = property(_get_real, _set_real,
+ doc = 'Access the real part of the array')
+
+ imaginary = property(_get_imaginary, _set_imaginary,
+ doc = 'Access the imaginary part of the array')
+
+ imag = imaginary
+
+#end class MaskedArray
+
+array = MaskedArray
+
+def isMaskedArray (x):
+ "Is x a masked array, that is, an instance of MaskedArray?"
+ return isinstance(x, MaskedArray)
+
+isarray = isMaskedArray
+isMA = isMaskedArray #backward compatibility
+
+def allclose (a, b, fill_value=1, rtol=1.e-5, atol=1.e-8):
+ """ Returns true if all components of a and b are equal
+ subject to given tolerances.
+ If fill_value is 1, masked values considered equal.
+ If fill_value is 0, masked values considered unequal.
+ The relative error rtol should be positive and << 1.0
+ The absolute error atol comes into play for those elements
+ of b that are very small or zero; it says how small a must be also.
+ """
+ m = mask_or(getmask(a), getmask(b))
+ d1 = filled(a)
+ d2 = filled(b)
+ x = filled(array(d1, copy=0, mask=m), fill_value).astype(float)
+ y = filled(array(d2, copy=0, mask=m), 1).astype(float)
+ d = umath.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y))
+ return oldnumeric.alltrue(oldnumeric.ravel(d))
+
+def allequal (a, b, fill_value=1):
+ """
+ True if all entries of a and b are equal, using
+ fill_value as a truth value where either or both are masked.
+ """
+ m = mask_or(getmask(a), getmask(b))
+ if m is None:
+ x = filled(a)
+ y = filled(b)
+ d = umath.equal(x, y)
+ return oldnumeric.alltrue(oldnumeric.ravel(d))
+ elif fill_value:
+ x = filled(a)
+ y = filled(b)
+ d = umath.equal(x, y)
+ dm = array(d, mask=m, copy=0)
+ return oldnumeric.alltrue(oldnumeric.ravel(filled(dm, 1)))
+ else:
+ return 0
+
+def masked_values (data, value, rtol=1.e-5, atol=1.e-8, copy=1):
+ """
+ masked_values(data, value, rtol=1.e-5, atol=1.e-8)
+ Create a masked array; mask is None if possible.
+ If copy==0, and otherwise possible, result
+ may share data values with original array.
+ Let d = filled(data, value). Returns d
+ masked where abs(data-value)<= atol + rtol * abs(value)
+ if d is of a floating point type. Otherwise returns
+ masked_object(d, value, copy)
+ """
+ abs = umath.absolute
+ d = filled(data, value)
+ if issubclass(d.dtype, numeric.floating):
+ m = umath.less_equal(abs(d-value), atol+rtol*abs(value))
+ m = make_mask(m, flag=1)
+ return array(d, mask = m, copy=copy,
+ fill_value=value)
+ else:
+ return masked_object(d, value, copy=copy)
+
+def masked_object (data, value, copy=1):
+ "Create array masked where exactly data equal to value"
+ d = filled(data, value)
+ dm = make_mask(umath.equal(d, value), flag=1)
+ return array(d, mask=dm, copy=copy, fill_value=value)
+
+def arrayrange(start, stop=None, step=1, dtype=None):
+ """Just like range() except it returns a array whose type can be specified
+ by the keyword argument dtypechar.
+ """
+ return array(numeric.arrayrange(start, stop, step, dtype))
+
+arange = arrayrange
+
+def fromstring (s, t):
+ "Construct a masked array from a string. Result will have no mask."
+ return masked_array(numeric.fromstring(s, t))
+
+def left_shift (a, n):
+ "Left shift n bits"
+ m = getmask(a)
+ if m is None:
+ d = umath.left_shift(filled(a), n)
+ return masked_array(d)
+ else:
+ d = umath.left_shift(filled(a,0), n)
+ return masked_array(d, m)
+
+def right_shift (a, n):
+ "Right shift n bits"
+ m = getmask(a)
+ if m is None:
+ d = umath.right_shift(filled(a), n)
+ return masked_array(d)
+ else:
+ d = umath.right_shift(filled(a,0), n)
+ return masked_array(d, m)
+
+def resize (a, new_shape):
+ """resize(a, new_shape) returns a new array with the specified shape.
+ The original array's total size can be any size."""
+ m = getmask(a)
+ if m is not None:
+ m = oldnumeric.resize(m, new_shape)
+ result = array(oldnumeric.resize(filled(a), new_shape), mask=m)
+ result.set_fill_value(get_fill_value(a))
+ return result
+
+def repeat(a, repeats, axis=0):
+ """repeat elements of a repeats times along axis
+ repeats is a sequence of length a.shape[axis]
+ telling how many times to repeat each element.
+ """
+ af = filled(a)
+ if isinstance(repeats, types.IntType):
+ repeats = tuple([repeats]*(shape(af)[axis]))
+
+ m = getmask(a)
+ if m is not None:
+ m = oldnumeric.repeat(m, repeats, axis)
+ d = oldnumeric.repeat(af, repeats, axis)
+ result = masked_array(d, m)
+ result.set_fill_value(get_fill_value(a))
+ return result
+
+def identity(n):
+ """identity(n) returns the identity matrix of shape n x n.
+ """
+ return array(numeric.identity(n))
+
+def indices (dimensions, dtype=None):
+ """indices(dimensions,dtype=None) returns an array representing a grid
+ of indices with row-only, and column-only variation.
+ """
+ return array(numeric.indices(dimensions, dtype))
+
+def zeros (shape, dtype=int):
+ """zeros(n, dtype=int) =
+ an array of all zeros of the given length or shape."""
+ return array(numeric.zeros(shape, dtype))
+
+def ones (shape, dtype=int):
+ """ones(n, dtype=int) =
+ an array of all ones of the given length or shape."""
+ return array(numeric.ones(shape, dtype))
+
+
+def count (a, axis = None):
+ "Count of the non-masked elements in a, or along a certain axis."
+ a = masked_array(a)
+ return a.count(axis)
+
+def power (a, b, third=None):
+ "a**b"
+ if third is not None:
+ raise MAError, "3-argument power not supported."
+ ma = getmask(a)
+ mb = getmask(b)
+ m = mask_or(ma, mb)
+ fa = filled(a, 1)
+ fb = filled(b, 1)
+ if fb.dtypechar in typecodes["Integer"]:
+ return masked_array(umath.power(fa, fb), m)
+ md = make_mask(umath.less_equal (fa, 0), flag=1)
+ m = mask_or(m, md)
+ if m is None:
+ return masked_array(umath.power(fa, fb))
+ else:
+ fa = numeric.where(m, 1, fa)
+ return masked_array(umath.power(fa, fb), m)
+
+def masked_array (a, mask=None, fill_value=None):
+ """masked_array(a, mask=None) =
+ array(a, mask=mask, copy=0, fill_value=fill_value)
+ """
+ return array(a, mask=mask, copy=0, fill_value=fill_value)
+
+sum = add.reduce
+product = multiply.reduce
+
+def average (a, axis=0, weights=None, returned = 0):
+ """average(a, axis=0, weights=None)
+ Computes average along indicated axis.
+ If axis is None, average over the entire array
+ Inputs can be integer or floating types; result is of type float.
+
+ If weights are given, result is sum(a*weights)/(sum(weights)*1.0)
+ weights must have a's shape or be the 1-d with length the size
+ of a in the given axis.
+
+ If returned, return a tuple: the result and the sum of the weights
+ or count of values. Results will have the same shape.
+
+ masked values in the weights will be set to 0.0
+ """
+ a = masked_array(a)
+ mask = a.mask
+ ash = a.shape
+ if ash == ():
+ ash = (1,)
+ if axis is None:
+ if mask is None:
+ if weights is None:
+ n = add.reduce(a.raw_data().ravel())
+ d = reduce(lambda x, y: x * y, ash, 1.0)
+ else:
+ w = filled(weights, 0.0).ravel()
+ n = umath.add.reduce(a.raw_data().ravel() * w)
+ d = umath.add.reduce(w)
+ del w
+ else:
+ if weights is None:
+ n = add.reduce(a.ravel())
+ w = oldnumeric.choose(mask, (1.0,0.0)).ravel()
+ d = umath.add.reduce(w)
+ del w
+ else:
+ w = array(filled(weights, 0.0), float, mask=mask).ravel()
+ n = add.reduce(a.ravel() * w)
+ d = add.reduce(w)
+ del w
+ else:
+ if mask is None:
+ if weights is None:
+ d = ash[axis] * 1.0
+ n = umath.add.reduce(a.raw_data(), axis)
+ else:
+ w = filled(weights, 0.0)
+ wsh = w.shape
+ if wsh == ():
+ wsh = (1,)
+ if wsh == ash:
+ w = numeric.array(w, float, copy=0)
+ n = add.reduce(a*w, axis)
+ d = add.reduce(w, axis)
+ del w
+ elif wsh == (ash[axis],):
+ ni = ash[axis]
+ r = [newaxis]*len(ash)
+ r[axis] = slice(None,None,1)
+ w = eval ("w["+ repr(tuple(r)) + "] * ones(ash, float)")
+ n = add.reduce(a*w, axis)
+ d = add.reduce(w, axis)
+ del w, r
+ else:
+ raise ValueError, 'average: weights wrong shape.'
+ else:
+ if weights is None:
+ n = add.reduce(a, axis)
+ w = numeric.choose(mask, (1.0, 0.0))
+ d = umath.add.reduce(w, axis)
+ del w
+ else:
+ w = filled(weights, 0.0)
+ wsh = w.shape
+ if wsh == ():
+ wsh = (1,)
+ if wsh == ash:
+ w = array(w, float, mask=mask, copy=0)
+ n = add.reduce(a*w, axis)
+ d = add.reduce(w, axis)
+ elif wsh == (ash[axis],):
+ ni = ash[axis]
+ r = [newaxis]*len(ash)
+ r[axis] = slice(None,None,1)
+ w = eval ("w["+ repr(tuple(r)) + "] * masked_array(ones(ash, float), mask)")
+ n = add.reduce(a*w, axis)
+ d = add.reduce(w, axis)
+ else:
+ raise ValueError, 'average: weights wrong shape.'
+ del w
+ #print n, d, repr(mask), repr(weights)
+ if n is masked or d is masked: return masked
+ result = divide (n, d)
+ del n
+
+ if isinstance(result, MaskedArray):
+ result.unmask()
+ if returned:
+ if not isinstance(d, MaskedArray):
+ d = masked_array(d)
+ if not d.shape == result.shape:
+ d = ones(result.shape, float) * d
+ d.unmask()
+ if returned:
+ return result, d
+ else:
+ return result
+
+def where (condition, x, y):
+ """where(condition, x, y) is x where condition is nonzero, y otherwise.
+ condition must be convertible to an integer array.
+ Answer is always the shape of condition.
+ The type depends on x and y. It is integer if both x and y are
+ the value masked.
+ """
+ fc = filled(not_equal(condition,0), 0)
+ if x is masked:
+ xv = 0
+ xm = 1
+ else:
+ xv = filled(x)
+ xm = getmask(x)
+ if xm is None: xm = 0
+ if y is masked:
+ yv = 0
+ ym = 1
+ else:
+ yv = filled(y)
+ ym = getmask(y)
+ if ym is None: ym = 0
+ d = numeric.choose(fc, (yv, xv))
+ md = numeric.choose(fc, (ym, xm))
+ m = getmask(condition)
+ m = make_mask(mask_or(m, md), copy=0, flag=1)
+ return masked_array(d, m)
+
+def choose (indices, t):
+ "Returns array shaped like indices with elements chosen from t"
+ def fmask (x):
+ if x is masked: return 1
+ return filled(x)
+ def nmask (x):
+ if x is masked: return 1
+ m = getmask(x)
+ if m is None: return 0
+ return m
+ c = filled(indices,0)
+ masks = [nmask(x) for x in t]
+ a = [fmask(x) for x in t]
+ d = numeric.choose(c, a)
+ m = numeric.choose(c, masks)
+ m = make_mask(mask_or(m, getmask(indices)), copy=0, flag=1)
+ return masked_array(d, m)
+
+def masked_where(condition, x, copy=1):
+ """Return x as an array masked where condition is true.
+ Also masked where x or condition masked.
+ """
+ cm = filled(condition,1)
+ m = mask_or(getmask(x), cm)
+ return array(filled(x), copy=copy, mask=m)
+
+def masked_greater(x, value, copy=1):
+ "masked_greater(x, value) = x masked where x > value"
+ return masked_where(greater(x, value), x, copy)
+
+def masked_greater_equal(x, value, copy=1):
+ "masked_greater_equal(x, value) = x masked where x >= value"
+ return masked_where(greater_equal(x, value), x, copy)
+
+def masked_less(x, value, copy=1):
+ "masked_less(x, value) = x masked where x < value"
+ return masked_where(less(x, value), x, copy)
+
+def masked_less_equal(x, value, copy=1):
+ "masked_less_equal(x, value) = x masked where x <= value"
+ return masked_where(less_equal(x, value), x, copy)
+
+def masked_not_equal(x, value, copy=1):
+ "masked_not_equal(x, value) = x masked where x != value"
+ d = filled(x,0)
+ c = umath.not_equal(d, value)
+ m = mask_or(c, getmask(x))
+ return array(d, mask=m, copy=copy)
+
+def masked_equal(x, value, copy=1):
+ """masked_equal(x, value) = x masked where x == value
+ For floating point consider masked_values(x, value) instead.
+ """
+ d = filled(x,0)
+ c = umath.equal(d, value)
+ m = mask_or(c, getmask(x))
+ return array(d, mask=m, copy=copy)
+
+def masked_inside(x, v1, v2, copy=1):
+ """x with mask of all values of x that are inside [v1,v2]
+ v1 and v2 can be given in either order.
+ """
+ if v2 < v1:
+ t = v2
+ v2 = v1
+ v1 = t
+ d=filled(x, 0)
+ c = umath.logical_and(umath.less_equal(d, v2), umath.greater_equal(d, v1))
+ m = mask_or(c, getmask(x))
+ return array(d, mask = m, copy=copy)
+
+def masked_outside(x, v1, v2, copy=1):
+ """x with mask of all values of x that are outside [v1,v2]
+ v1 and v2 can be given in either order.
+ """
+ if v2 < v1:
+ t = v2
+ v2 = v1
+ v1 = t
+ d = filled(x,0)
+ c = umath.logical_or(umath.less(d, v1), umath.greater(d, v2))
+ m = mask_or(c, getmask(x))
+ return array(d, mask = m, copy=copy)
+
+def reshape (a, *newshape):
+ "Copy of a with a new shape."
+ m = getmask(a)
+ d = filled(a).reshape(*newshape)
+ if m is None:
+ return masked_array(d)
+ else:
+ return masked_array(d, mask=numeric.reshape(m, *newshape))
+
+def ravel (a):
+ "a as one-dimensional, may share data and mask"
+ m = getmask(a)
+ d = oldnumeric.ravel(filled(a))
+ if m is None:
+ return masked_array(d)
+ else:
+ return masked_array(d, mask=numeric.ravel(m))
+
+def concatenate (arrays, axis=0):
+ "Concatenate the arrays along the given axis"
+ d = []
+ for x in arrays:
+ d.append(filled(x))
+ d = numeric.concatenate(d, axis)
+ for x in arrays:
+ if getmask(x) is not None: break
+ else:
+ return masked_array(d)
+ dm = []
+ for x in arrays:
+ dm.append(getmaskarray(x))
+ dm = numeric.concatenate(dm, axis)
+ return masked_array(d, mask=dm)
+
+def take (a, indices, axis=0):
+ "take(a, indices, axis=0) returns selection of items from a."
+ m = getmask(a)
+ d = masked_array(a).raw_data()
+ if m is None:
+ return masked_array(numeric.take(d, indices, axis))
+ else:
+ return masked_array(numeric.take(d, indices, axis),
+ mask = numeric.take(m, indices, axis))
+
+def transpose(a, axes=None):
+ "transpose(a, axes=None) reorder dimensions per tuple axes"
+ m = getmask(a)
+ d = filled(a)
+ if m is None:
+ return masked_array(numeric.transpose(d, axes))
+ else:
+ return masked_array(numeric.transpose(d, axes),
+ mask = numeric.transpose(m, axes))
+
+
+def put(a, indices, values):
+ """put(a, indices, values) sets storage-indexed locations to corresponding values.
+
+ Values and indices are filled if necessary.
+
+ """
+ d = a.raw_data()
+ ind = filled(indices)
+ v = filled(values)
+ numeric.put (d, ind, v)
+ m = getmask(a)
+ if m is not None:
+ a.unshare_mask()
+ numeric.put(a.raw_mask(), ind, 0)
+
+def putmask(a, mask, values):
+ "putmask(a, mask, values) sets a where mask is true."
+ if mask is None:
+ return
+ numeric.putmask(a.raw_data(), mask, values)
+ m = getmask(a)
+ if m is None: return
+ a.unshare_mask()
+ numeric.putmask(a.raw_mask(), mask, 0)
+
+def innerproduct(a,b):
+ """innerproduct(a,b) returns the dot product of two arrays, which has
+ shape a.shape[:-1] + b.shape[:-1] with elements computed by summing the
+ product of the elements from the last dimensions of a and b.
+ Masked elements are replace by zeros.
+ """
+ fa = filled(a, 0)
+ fb = filled(b, 0)
+ if len(fa.shape) == 0: fa.shape = (1,)
+ if len(fb.shape) == 0: fb.shape = (1,)
+ return masked_array(numeric.innerproduct(fa, fb))
+
+def outerproduct(a, b):
+ """outerproduct(a,b) = {a[i]*b[j]}, has shape (len(a),len(b))"""
+ fa = filled(a,0).ravel()
+ fb = filled(b,0).ravel()
+ d = numeric.outerproduct(fa, fb)
+ ma = getmask(a)
+ mb = getmask(b)
+ if ma is None and mb is None:
+ return masked_array(d)
+ ma = getmaskarray(a)
+ mb = getmaskarray(b)
+ m = make_mask(1-numeric.outerproduct(1-ma,1-mb), copy=0)
+ return masked_array(d, m)
+
+def dot(a, b):
+ """dot(a,b) returns matrix-multiplication between a and b. The product-sum
+ is over the last dimension of a and the second-to-last dimension of b.
+ Masked values are replaced by zeros. See also innerproduct.
+ """
+ return innerproduct(filled(a,0), numeric.swapaxes(filled(b,0), -1, -2))
+
+def compress(condition, x, dimension=-1):
+ """Select those parts of x for which condition is true.
+ Masked values in condition are considered false.
+ """
+ c = filled(condition, 0)
+ m = getmask(x)
+ if m is not None:
+ m=numeric.compress(c, m, dimension)
+ d = numeric.compress(c, filled(x), dimension)
+ return masked_array(d, m)
+
+class _minimum_operation:
+ "Object to calculate minima"
+ def __init__ (self):
+ """minimum(a, b) or minimum(a)
+ In one argument case returns the scalar minimum.
+ """
+ pass
+
+ def __call__ (self, a, b=None):
+ "Execute the call behavior."
+ if b is None:
+ m = getmask(a)
+ if m is None:
+ d = amin(filled(a).ravel())
+ return d
+ ac = a.compressed()
+ if len(ac) == 0:
+ return masked
+ else:
+ return amin(ac.raw_data())
+ else:
+ return where(less(a, b), a, b)[...]
+
+ def reduce (self, target, axis=0):
+ """Reduce target along the given axis."""
+ m = getmask(target)
+ if m is None:
+ t = filled(target)
+ return masked_array (umath.minimum.reduce (t, axis))
+ else:
+ t = umath.minimum.reduce(filled(target, minimum_fill_value(target)), axis)
+ m = umath.logical_and.reduce(m, axis)
+ return masked_array(t, m, get_fill_value(target))
+
+ def outer (self, a, b):
+ "Return the function applied to the outer product of a and b."
+ ma = getmask(a)
+ mb = getmask(b)
+ if ma is None and mb is None:
+ m = None
+ else:
+ ma = getmaskarray(a)
+ mb = getmaskarray(b)
+ m = logical_or.outer(ma, mb)
+ d = umath.minimum.outer(filled(a), filled(b))
+ return masked_array(d, m)
+
+minimum = _minimum_operation ()
+
+class _maximum_operation:
+ "Object to calculate maxima"
+ def __init__ (self):
+ """maximum(a, b) or maximum(a)
+ In one argument case returns the scalar maximum.
+ """
+ pass
+
+ def __call__ (self, a, b=None):
+ "Execute the call behavior."
+ if b is None:
+ m = getmask(a)
+ if m is None:
+ d = amax(filled(a).ravel())
+ return d
+ ac = a.compressed()
+ if len(ac) == 0:
+ return masked
+ else:
+ return amax(ac.raw_data())
+ else:
+ return where(greater(a, b), a, b)[...]
+
+ def reduce (self, target, axis=0):
+ """Reduce target along the given axis."""
+ m = getmask(target)
+ if m is None:
+ t = filled(target)
+ return masked_array (umath.maximum.reduce (t, axis))
+ else:
+ t = umath.maximum.reduce(filled(target, maximum_fill_value(target)), axis)
+ m = umath.logical_and.reduce(m, axis)
+ return masked_array(t, m, get_fill_value(target))
+
+ def outer (self, a, b):
+ "Return the function applied to the outer product of a and b."
+ ma = getmask(a)
+ mb = getmask(b)
+ if ma is None and mb is None:
+ m = None
+ else:
+ ma = getmaskarray(a)
+ mb = getmaskarray(b)
+ m = logical_or.outer(ma, mb)
+ d = umath.maximum.outer(filled(a), filled(b))
+ return masked_array(d, m)
+
+maximum = _maximum_operation ()
+
+def sort (x, axis = -1, fill_value=None):
+ """If x does not have a mask, return a masked array formed from the
+ result of numeric.sort(x, axis).
+ Otherwise, fill x with fill_value. Sort it.
+ Set a mask where the result is equal to fill_value.
+ Note that this may have unintended consequences if the data contains the
+ fill value at a non-masked site.
+
+ If fill_value is not given the default fill value for x's type will be
+ used.
+ """
+ if fill_value is None:
+ fill_value = default_fill_value (x)
+ d = filled(x, fill_value)
+ s = oldnumeric.sort(d, axis)
+ if getmask(x) is None:
+ return masked_array(s)
+ return masked_values(s, fill_value, copy=0)
+
+def diagonal(a, k = 0, axis1=0, axis2=1):
+ """diagonal(a,k=0,axis1=0, axis2=1) = the k'th diagonal of a"""
+ d = oldnumeric.diagonal(filled(a), k, axis1, axis2)
+ m = getmask(a)
+ if m is None:
+ return masked_array(d, m)
+ else:
+ return masked_array(d, oldnumeric.diagonal(m, k, axis1, axis2))
+
+def argsort (x, axis = -1, fill_value=None):
+ """Treating masked values as if they have the value fill_value,
+ return sort indices for sorting along given axis.
+ if fill_value is None, use get_fill_value(x)
+ Returns a scipy array.
+ """
+ d = filled(x, fill_value)
+ return oldnumeric.argsort(d, axis)
+
+def argmin (x, axis = -1, fill_value=None):
+ """Treating masked values as if they have the value fill_value,
+ return indices for minimum values along given axis.
+ if fill_value is None, use get_fill_value(x).
+ Returns a scipy array if x has more than one dimension.
+ Otherwise, returns a scalar index.
+ """
+ d = filled(x, fill_value)
+ return oldnumeric.argmin(d, axis)
+
+def argmax (x, axis = -1, fill_value=None):
+ """Treating masked values as if they have the value fill_value,
+ return sort indices for maximum along given axis.
+ if fill_value is None, use -get_fill_value(x) if it exists.
+ Returns a scipy array if x has more than one dimension.
+ Otherwise, returns a scalar index.
+ """
+ if fill_value is None:
+ fill_value = default_fill_value (x)
+ try:
+ fill_value = - fill_value
+ except:
+ pass
+ d = filled(x, fill_value)
+ return oldnumeric.argmax(d, axis)
+
+def fromfunction (f, s):
+ """apply f to s to create array as in umath."""
+ return masked_array(numeric.fromfunction(f,s))
+
+def asarray(data, dtype=None):
+ """asarray(data, dtype) = array(data, dtype, copy=0)
+ """
+ if isinstance(data, MaskedArray) and \
+ (dtype is None or dtype == data.dtype):
+ return data
+ return array(data, dtype=dtype, copy=0)
+
+masked = MaskedArray([0], int, mask=[1])[0:0]
+masked = masked[0:0]
diff --git a/numpy/base/machar.py b/numpy/base/machar.py
new file mode 100644
index 000000000..e00d112ef
--- /dev/null
+++ b/numpy/base/machar.py
@@ -0,0 +1,268 @@
+#
+# Machine arithmetics - determine the parameters of the
+# floating-point arithmetic system
+#
+# Author: Pearu Peterson, September 2003
+#
+
+__all__ = ['MachAr']
+
+from numeric import array
+from oldnumeric import any
+
+# Need to speed this up...especially for longfloat
+
+class MachAr(object):
+ """Diagnosing machine parameters.
+
+ The following attributes are available:
+
+ ibeta - radix in which numbers are represented
+ it - number of base-ibeta digits in the floating point mantissa M
+ machep - exponent of the smallest (most negative) power of ibeta that,
+ added to 1.0,
+ gives something different from 1.0
+ eps - floating-point number beta**machep (floating point precision)
+ negep - exponent of the smallest power of ibeta that, substracted
+ from 1.0, gives something different from 1.0
+ epsneg - floating-point number beta**negep
+ iexp - number of bits in the exponent (including its sign and bias)
+ minexp - smallest (most negative) power of ibeta consistent with there
+ being no leading zeros in the mantissa
+ xmin - floating point number beta**minexp (the smallest (in
+ magnitude) usable floating value)
+ maxexp - smallest (positive) power of ibeta that causes overflow
+ xmax - (1-epsneg)* beta**maxexp (the largest (in magnitude)
+ usable floating value)
+ irnd - in range(6), information on what kind of rounding is done
+ in addition, and on how underflow is handled
+ ngrd - number of 'guard digits' used when truncating the product
+ of two mantissas to fit the representation
+
+ epsilon - same as eps
+ tiny - same as xmin
+ huge - same as xmax
+ precision - int(-log10(eps))
+ resolution - 10**(-precision)
+
+ Reference:
+ Numerical Recipies.
+ """
+ def __init__(self, float_conv=float,int_conv=int,
+ float_to_float=float,
+ float_to_str = lambda v:'%24.16e' % v,
+ title = 'Python floating point number'):
+ """
+ float_conv - convert integer to float (array)
+ int_conv - convert float (array) to integer
+ float_to_float - convert float array to float
+ float_to_str - convert array float to str
+ title - description of used floating point numbers
+ """
+ one = float_conv(1)
+ two = one + one
+ zero = one - one
+
+ # Do we really need to do this? Aren't they 2 and 2.0?
+ # Determine ibeta and beta
+ a = one
+ while 1:
+ a = a + a
+ temp = a + one
+ temp1 = temp - a
+ if any(temp1 - one != zero):
+ break
+ b = one
+ while 1:
+ b = b + b
+ temp = a + b
+ itemp = int_conv(temp-a)
+ if any(itemp != 0):
+ break
+ ibeta = itemp
+ beta = float_conv(ibeta)
+
+ # Determine it and irnd
+ it = -1
+ b = one
+ while 1:
+ it = it + 1
+ b = b * beta
+ temp = b + one
+ temp1 = temp - b
+ if any(temp1 - one != zero):
+ break
+
+ betah = beta / two
+ a = one
+ while 1:
+ a = a + a
+ temp = a + one
+ temp1 = temp - a
+ if any(temp1 - one != zero):
+ break
+ temp = a + betah
+ irnd = 0
+ if any(temp-a != zero):
+ irnd = 1
+ tempa = a + beta
+ temp = tempa + betah
+ if irnd==0 and any(temp-tempa != zero):
+ irnd = 2
+
+ # Determine negep and epsneg
+ negep = it + 3
+ betain = one / beta
+ a = one
+ for i in range(negep):
+ a = a * betain
+ b = a
+ while 1:
+ temp = one - a
+ if any(temp-one != zero):
+ break
+ a = a * beta
+ negep = negep - 1
+ # Prevent infinite loop on PPC with gcc 4.0:
+ if negep < 0:
+ raise RuntimeError, "could not determine machine tolerance " \
+ "for 'negep'"
+ negep = -negep
+ epsneg = a
+
+ # Determine machep and eps
+ machep = - it - 3
+ a = b
+
+ while 1:
+ temp = one + a
+ if any(temp-one != zero):
+ break
+ a = a * beta
+ machep = machep + 1
+ eps = a
+
+ # Determine ngrd
+ ngrd = 0
+ temp = one + eps
+ if irnd==0 and any(temp*one - one != zero):
+ ngrd = 1
+
+ # Determine iexp
+ i = 0
+ k = 1
+ z = betain
+ t = one + eps
+ nxres = 0
+ while 1:
+ y = z
+ z = y*y
+ a = z*one # Check here for underflow
+ temp = z*t
+ if any(a+a == zero) or any(abs(z)>=y):
+ break
+ temp1 = temp * betain
+ if any(temp1*beta == z):
+ break
+ i = i + 1
+ k = k + k
+ if ibeta != 10:
+ iexp = i + 1
+ mx = k + k
+ else:
+ iexp = 2
+ iz = ibeta
+ while k >= iz:
+ iz = iz * ibeta
+ iexp = iexp + 1
+ mx = iz + iz - 1
+
+ # Determine minexp and xmin
+ while 1:
+ xmin = y
+ y = y * betain
+ a = y * one
+ temp = y * t
+ if any(a+a != zero) and any(abs(y) < xmin):
+ k = k + 1
+ temp1 = temp * betain
+ if any(temp1*beta == y) and any(temp != y):
+ nxres = 3
+ xmin = y
+ break
+ else:
+ break
+ minexp = -k
+
+ # Determine maxexp, xmax
+ if mx <= k + k - 3 and ibeta != 10:
+ mx = mx + mx
+ iexp = iexp + 1
+ maxexp = mx + minexp
+ irnd = irnd + nxres
+ if irnd >= 2:
+ maxexp = maxexp - 2
+ i = maxexp + minexp
+ if ibeta == 2 and not i:
+ maxexp = maxexp - 1
+ if i > 20:
+ maxexp = maxexp - 1
+ if any(a != y):
+ maxexp = maxexp - 2
+ xmax = one - epsneg
+ if any(xmax*one != xmax):
+ xmax = one - beta*epsneg
+ xmax = xmax / (xmin*beta*beta*beta)
+ i = maxexp + minexp + 3
+ for j in range(i):
+ if ibeta==2:
+ xmax = xmax + xmax
+ else:
+ xmax = xmax * beta
+
+ self.ibeta = ibeta
+ self.it = it
+ self.negep = negep
+ self.epsneg = float_to_float(epsneg)
+ self._str_epsneg = float_to_str(epsneg)
+ self.machep = machep
+ self.eps = float_to_float(eps)
+ self._str_eps = float_to_str(eps)
+ self.ngrd = ngrd
+ self.iexp = iexp
+ self.minexp = minexp
+ self.xmin = float_to_float(xmin)
+ self._str_xmin = float_to_str(xmin)
+ self.maxexp = maxexp
+ self.xmax = float_to_float(xmax)
+ self._str_xmax = float_to_str(xmax)
+ self.irnd = irnd
+
+ self.title = title
+ # Commonly used parameters
+ self.epsilon = self.eps
+ self.tiny = self.xmin
+ self.huge = self.xmax
+
+ import math
+ self.precision = int(-math.log10(float_to_float(self.eps)))
+ ten = two + two + two + two + two
+ resolution = ten ** (-self.precision)
+ self.resolution = float_to_float(resolution)
+ self._str_resolution = float_to_str(resolution)
+
+ def __str__(self):
+ return '''\
+Machine parameters for %(title)s
+---------------------------------------------------------------------
+ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s
+machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)
+negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)
+minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)
+maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)
+---------------------------------------------------------------------
+''' % self.__dict__
+
+
+if __name__ == '__main__':
+ print MachAr()
diff --git a/numpy/base/matrix.py b/numpy/base/matrix.py
new file mode 100644
index 000000000..1c7ca7cca
--- /dev/null
+++ b/numpy/base/matrix.py
@@ -0,0 +1,273 @@
+
+__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
+
+import numeric as N
+from numeric import ArrayType, concatenate, integer, multiply, power
+from type_check import isscalar
+from function_base import binary_repr
+import types
+import string as str_
+import sys
+
+# make translation table
+_table = [None]*256
+for k in range(256):
+ _table[k] = chr(k)
+_table = ''.join(_table)
+
+_numchars = str_.digits + ".-+jeEL"
+del str_
+_todelete = []
+for k in _table:
+ if k not in _numchars:
+ _todelete.append(k)
+_todelete = ''.join(_todelete)
+del k
+
+def _eval(astr):
+ return eval(astr.translate(_table,_todelete))
+
+def _convert_from_string(data):
+ rows = data.split(';')
+ newdata = []
+ count = 0
+ for row in rows:
+ trow = row.split(',')
+ newrow = []
+ for col in trow:
+ temp = col.split()
+ newrow.extend(map(_eval,temp))
+ if count == 0:
+ Ncols = len(newrow)
+ elif len(newrow) != Ncols:
+ raise ValueError, "Rows not the same size."
+ count += 1
+ newdata.append(newrow)
+ return newdata
+
+def asmatrix(data, dtype=None):
+ """ Returns 'data' as a matrix. Unlike matrix(), no copy is performed
+ if 'data' is already a matrix or array. Equivalent to:
+ matrix(data, copy=False)
+ """
+ return matrix(data, dtype=dtype, copy=False)
+
+class matrix(N.ndarray):
+ __array_priority__ = 10.0
+ def __new__(subtype, data, dtype=None, copy=True):
+ if isinstance(data, matrix):
+ dtype2 = data.dtype
+ if (dtype is None):
+ dtype = dtype2
+ if (dtype2 is dtype) and (not copy):
+ return data
+ return data.astype(dtype)
+
+ if dtype is None:
+ if isinstance(data, N.ndarray):
+ dtype = data.dtype
+ intype = N.obj2dtype(dtype)
+
+ if isinstance(data, types.StringType):
+ data = _convert_from_string(data)
+
+ # now convert data to an array
+ arr = N.array(data, dtype=intype, copy=copy)
+ ndim = arr.ndim
+ shape = arr.shape
+ if (ndim > 2):
+ raise ValueError, "matrix must be 2-dimensional"
+ elif ndim == 0:
+ shape = (1,1)
+ elif ndim == 1:
+ shape = (1,shape[0])
+
+ fortran = False
+ if (ndim == 2) and arr.flags.fortran:
+ fortran = True
+
+ if not (fortran or arr.flags.contiguous):
+ arr = arr.copy()
+
+ ret = N.ndarray.__new__(subtype, shape, arr.dtypedescr,
+ buffer=arr,
+ fortran=fortran)
+ return ret
+
+ def __array_finalize__(self, obj):
+ ndim = self.ndim
+ if ndim == 0:
+ self.shape = (1,1)
+ elif ndim == 1:
+ self.shape = (1,self.shape[0])
+ return
+
+ def __getitem__(self, index):
+ out = N.ndarray.__getitem__(self, index)
+ # Need to swap if slice is on first index
+ retscal = False
+ try:
+ n = len(index)
+ if (n==2):
+ if isinstance(index[0], types.SliceType):
+ if (isscalar(index[1])):
+ sh = out.shape
+ out.shape = (sh[1], sh[0])
+ else:
+ if (isscalar(index[0])) and (isscalar(index[1])):
+ retscal = True
+ except TypeError:
+ pass
+ if retscal and out.shape == (1,1): # convert scalars
+ return out.A[0,0]
+ return out
+
+ def __mul__(self, other):
+ if isinstance(other, N.ndarray) and other.ndim == 0:
+ return N.multiply(self, other)
+ else:
+ return N.dot(self, other)
+
+ def __rmul__(self, other):
+ if isinstance(other, N.ndarray) and other.ndim == 0:
+ return N.multiply(other, self)
+ else:
+ return N.dot(other, self)
+
+ def __imul__(self, other):
+ self[:] = self * other
+ return self
+
+ def __pow__(self, other):
+ shape = self.shape
+ if len(shape) != 2 or shape[0] != shape[1]:
+ raise TypeError, "matrix is not square"
+ if type(other) in (type(1), type(1L)):
+ if other==0:
+ return matrix(N.identity(shape[0]))
+ if other<0:
+ x = self.I
+ other=-other
+ else:
+ x=self
+ result = x
+ if other <= 3:
+ while(other>1):
+ result=result*x
+ other=other-1
+ return result
+ # binary decomposition to reduce the number of Matrix
+ # Multiplies for other > 3.
+ beta = binary_repr(other)
+ t = len(beta)
+ Z,q = x.copy(),0
+ while beta[t-q-1] == '0':
+ Z *= Z
+ q += 1
+ result = Z.copy()
+ for k in range(q+1,t):
+ Z *= Z
+ if beta[t-k-1] == '1':
+ result *= Z
+ return result
+ else:
+ raise TypeError, "exponent must be an integer"
+
+ def __rpow__(self, other):
+ raise NotImplementedError
+
+ def __repr__(self):
+ return repr(self.__array__()).replace('array','matrix')
+
+ def __str__(self):
+ return str(self.__array__())
+
+ # Needed becase tolist method expects a[i]
+ # to have dimension a.ndim-1
+ def tolist(self):
+ return self.__array__().tolist()
+
+ def getA(self):
+ return self.__array__()
+
+ def getT(self):
+ return self.transpose()
+
+ def getH(self):
+ if issubclass(self.dtype, N.complexfloating):
+ return self.transpose().conjugate()
+ else:
+ return self.transpose()
+
+ def getI(self):
+ from scipy.corelinalg import inv
+ return matrix(inv(self))
+
+ A = property(getA, None, doc="base array")
+ T = property(getT, None, doc="transpose")
+ H = property(getH, None, doc="hermitian (conjugate) transpose")
+ I = property(getI, None, doc="inverse")
+
+
+def _from_string(str,gdict,ldict):
+ rows = str.split(';')
+ rowtup = []
+ for row in rows:
+ trow = row.split(',')
+ newrow = []
+ for x in trow:
+ newrow.extend(x.split())
+ trow = newrow
+ coltup = []
+ for col in trow:
+ col = col.strip()
+ try:
+ thismat = ldict[col]
+ except KeyError:
+ try:
+ thismat = gdict[col]
+ except KeyError:
+ raise KeyError, "%s not found" % (col,)
+
+ coltup.append(thismat)
+ rowtup.append(concatenate(coltup,axis=-1))
+ return concatenate(rowtup,axis=0)
+
+
+def bmat(obj,ldict=None, gdict=None):
+ """Build a matrix object from string, nested sequence, or array.
+
+ Ex: F = bmat('A, B; C, D')
+ F = bmat([[A,B],[C,D]])
+ F = bmat(r_[c_[A,B],c_[C,D]])
+
+ all produce the same Matrix Object [ A B ]
+ [ C D ]
+
+ if A, B, C, and D are appropriately shaped 2-d arrays.
+ """
+ if isinstance(obj, types.StringType):
+ if gdict is None:
+ # get previous frame
+ frame = sys._getframe().f_back
+ glob_dict = frame.f_globals
+ loc_dict = frame.f_locals
+ else:
+ glob_dict = gdict
+ loc_dict = ldict
+
+ return matrix(_from_string(obj, glob_dict, loc_dict))
+
+ if isinstance(obj, (types.TupleType, types.ListType)):
+ # [[A,B],[C,D]]
+ arr_rows = []
+ for row in obj:
+ if isinstance(row, ArrayType): # not 2-d
+ return matrix(concatenate(obj,axis=-1))
+ else:
+ arr_rows.append(concatenate(row,axis=-1))
+ return matrix(concatenate(arr_rows,axis=0))
+ if isinstance(obj, ArrayType):
+ return matrix(obj)
+
+mat = matrix
diff --git a/numpy/base/memmap.py b/numpy/base/memmap.py
new file mode 100644
index 000000000..8a791e5aa
--- /dev/null
+++ b/numpy/base/memmap.py
@@ -0,0 +1,88 @@
+__all__ = ['memmap']
+
+import mmap
+from numeric import uint8, ndarray, dtypedescr
+from numerictypes import nbytes
+
+valid_filemodes = ["r", "c", "r+", "w+"]
+writeable_filemodes = ["r+","w+"]
+
+mode_equivalents = {
+ "readonly":"r",
+ "copyonwrite":"c",
+ "readwrite":"r+",
+ "write":"w+"
+ }
+
+
+class memmap(ndarray):
+ def __new__(subtype, name, dtype=uint8, mode='r+', offset=0,
+ shape=None, fortran=0):
+
+ try:
+ mode = mode_equivalents[mode]
+ except KeyError:
+ if mode not in valid_filemodes:
+ raise ValueError("mode must be one of %s" % \
+ (valid_filemodes + mode_equivalents.keys()))
+
+ fid = file(name, (mode == 'c' and 'r' or mode)+'b')
+
+ if (mode == 'w+') and shape is None:
+ raise ValueError, "shape must be given"
+
+ fid.seek(0,2)
+ flen = fid.tell()
+ descr = dtypedescr(dtype)
+ _dbytes = descr.itemsize
+
+ if shape is None:
+ bytes = flen-offset
+ if (bytes % _dbytes):
+ fid.close()
+ raise ValueError, "Size of available data is not a "\
+ "multiple of data-type size."
+ size = bytes // _dbytes
+ shape = (size,)
+ else:
+ if not isinstance(shape, tuple):
+ shape = (shape,)
+ size = 1
+ for k in shape:
+ size *= k
+
+ bytes = offset + size*_dbytes
+
+ if mode == 'w+' or (mode == 'r+' and flen < bytes):
+ fid.seek(bytes-1,0)
+ fid.write(chr(0))
+ fid.flush()
+
+ if mode == 'c':
+ acc = mmap.ACCESS_COPY
+ elif mode == 'r':
+ acc = mmap.ACCESS_READ
+ else:
+ acc = mmap.ACCESS_WRITE
+
+ mm = mmap.mmap(fid.fileno(), bytes, access=acc)
+
+ self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
+ offset=offset, fortran=fortran)
+ self._mmap = mm
+ self._offset = offset
+ self._mode = mode
+ self._size = size
+ self._name = name
+
+ fid.close()
+ return self
+
+ def sync(self):
+ self._mmap.flush()
+
+ def __del__(self):
+ self._mmap.flush()
+ del self._mmap
+
+
diff --git a/numpy/base/mlab.py b/numpy/base/mlab.py
new file mode 100644
index 000000000..749600d9b
--- /dev/null
+++ b/numpy/base/mlab.py
@@ -0,0 +1,14 @@
+# This module is for compatibility only. All functions are defined elsewhere.
+
+from numeric import *
+
+from twodim_base import eye, tri, diag, fliplr, flipud, rot90, tril, triu
+from oldnumeric import amax as max
+from oldnumeric import amin as min
+from function_base import msort, median, trapz, diff, cov, corrcoef, kaiser, blackman, \
+ bartlett, hanning, hamming, sinc, angle
+from oldnumeric import cumsum, ptp, mean, std, prod, cumprod, squeeze
+from polynomial import roots
+
+from scipy.random import rand, randn
+from scipy.corelinalg import eig, svd
diff --git a/numpy/base/numeric.py b/numpy/base/numeric.py
new file mode 100644
index 000000000..03a2e520f
--- /dev/null
+++ b/numpy/base/numeric.py
@@ -0,0 +1,428 @@
+__all__ = ['newaxis', 'ndarray', 'bigndarray', 'flatiter', 'ufunc',
+ 'arange', 'array', 'zeros', 'empty', 'broadcast', 'dtypedescr',
+ 'fromstring', 'fromfile', 'frombuffer','newbuffer','getbuffer',
+ 'where', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
+ 'register_dtype', 'set_numeric_ops', 'can_cast',
+ 'asarray', 'asanyarray', 'isfortran', 'zeros_like', 'empty_like',
+ 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot',
+ 'alterdot', 'restoredot', 'cross',
+ 'array2string', 'get_printoptions', 'set_printoptions',
+ 'array_repr', 'array_str', 'set_string_function',
+ 'little_endian',
+ 'indices', 'fromfunction',
+ 'load', 'loads',
+ 'ones', 'identity', 'allclose',
+ 'seterr', 'geterr', 'setbufsize', 'getbufsize',
+ 'seterrcall', 'geterrcall',
+ 'Inf', 'inf', 'infty', 'Infinity',
+ 'nan', 'NaN']
+
+import sys
+import multiarray
+import umath
+from umath import *
+import numerictypes
+from numerictypes import *
+
+def extend_all(module):
+ adict = {}
+ for a in __all__:
+ adict[a] = 1
+ try:
+ mall = getattr(module, '__all__')
+ except AttributeError:
+ mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
+ for a in mall:
+ if a not in adict:
+ __all__.append(a)
+
+extend_all(umath)
+extend_all(numerictypes)
+
+newaxis = None
+
+ndarray = multiarray.ndarray
+bigndarray = multiarray.bigndarray
+flatiter = multiarray.flatiter
+broadcast = multiarray.broadcast
+dtypedescr=multiarray.dtypedescr
+ufunc = type(sin)
+
+arange = multiarray.arange
+array = multiarray.array
+zeros = multiarray.zeros
+empty = multiarray.empty
+fromstring = multiarray.fromstring
+fromfile = multiarray.fromfile
+frombuffer = multiarray.frombuffer
+newbuffer = multiarray.newbuffer
+getbuffer = multiarray.getbuffer
+where = multiarray.where
+concatenate = multiarray.concatenate
+fastCopyAndTranspose = multiarray._fastCopyAndTranspose
+register_dtype = multiarray.register_dtype
+set_numeric_ops = multiarray.set_numeric_ops
+can_cast = multiarray.can_cast
+lexsort = multiarray.lexsort
+
+
+def asarray(a, dtype=None, fortran=False):
+ """returns a as an array. Unlike array(),
+ no copy is performed if a is already an array. Subclasses are converted
+ to base class ndarray.
+ """
+ return array(a, dtype, copy=False, fortran=fortran)
+
+def asanyarray(a, dtype=None, copy=False, fortran=False):
+ """will pass subclasses through...
+ """
+ return array(a, dtype, copy=False, fortran=fortran, subok=1)
+
+def isfortran(a):
+ return a.flags['FNC']
+
+# from Fernando Perez's IPython
+def zeros_like(a):
+ """Return an array of zeros of the shape and typecode of a.
+
+ If you don't explicitly need the array to be zeroed, you should instead
+ use empty_like(), which is faster as it only allocates memory."""
+ a = asanyarray(a)
+ return a.__array_wrap__(zeros(a.shape, a.dtype, a.flags['FNC']))
+
+def empty_like(a):
+ """Return an empty (uninitialized) array of the shape and typecode of a.
+
+ Note that this does NOT initialize the returned array. If you require
+ your array to be initialized, you should use zeros_like().
+
+ """
+ a = asanyarray(a)
+ return a.__array_wrap__(empty(a.shape, a.dtype, a.flags['FNC']))
+
+# end Fernando's utilities
+
+_mode_from_name_dict = {'v': 0,
+ 's' : 1,
+ 'f' : 2}
+
+def _mode_from_name(mode):
+ if isinstance(mode, type("")):
+ return _mode_from_name_dict[mode.lower()[0]]
+ return mode
+
+def correlate(a,v,mode='valid'):
+ mode = _mode_from_name(mode)
+ return multiarray.correlate(a,v,mode)
+
+
+def convolve(a,v,mode='full'):
+ """Returns the discrete, linear convolution of 1-D
+ sequences a and v; mode can be 0 (valid), 1 (same), or 2 (full)
+ to specify size of the resulting sequence.
+ """
+ if (len(v) > len(a)):
+ a, v = v, a
+ mode = _mode_from_name(mode)
+ return multiarray.correlate(a,asarray(v)[::-1],mode)
+
+
+inner = multiarray.inner
+dot = multiarray.dot
+
+def outer(a,b):
+ """outer(a,b) returns the outer product of two vectors.
+ result(i,j) = a(i)*b(j) when a and b are vectors
+ Will accept any arguments that can be made into vectors.
+ """
+ a = asarray(a)
+ b = asarray(b)
+ return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
+
+def vdot(a, b):
+ """Returns the dot product of 2 vectors (or anything that can be made into
+ a vector). NB: this is not the same as `dot`, as it takes the conjugate
+ of its first argument if complex and always returns a scalar."""
+ return dot(asarray(a).ravel().conj(), asarray(b).ravel())
+
+# try to import blas optimized dot if available
+try:
+ # importing this changes the dot function for basic 4 types
+ # to blas-optimized versions.
+ from _dotblas import dot, vdot, inner, alterdot, restoredot
+except ImportError:
+ def alterdot():
+ pass
+ def restoredot():
+ pass
+
+
+def _move_axis_to_0(a, axis):
+ if axis == 0:
+ return a
+ n = a.ndim
+ if axis < 0:
+ axis += n
+ axes = range(1, axis+1) + [0,] + range(axis+1, n)
+ return a.transpose(axes)
+
+def cross(a, b, axisa=-1, axisb=-1, axisc=-1):
+ """Return the cross product of two (arrays of) vectors.
+
+ The cross product is performed over the last axis of a and b by default,
+ and can handle axes with dimensions 2 and 3. For a dimension of 2,
+ the z-component of the equivalent three-dimensional cross product is
+ returned.
+ """
+ a = _move_axis_to_0(asarray(a), axisa)
+ b = _move_axis_to_0(asarray(b), axisb)
+ msg = "incompatible dimensions for cross product\n"\
+ "(dimension must be 2 or 3)"
+ if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
+ raise ValueError(msg)
+ if a.shape[0] == 2:
+ if (b.shape[0] == 2):
+ cp = a[0]*b[1] - a[1]*b[0]
+ if cp.ndim == 0:
+ return cp
+ else:
+ return cp.swapaxes(0,axisc)
+ else:
+ x = a[1]*b[2]
+ y = -a[0]*b[2]
+ z = a[0]*b[1] - a[1]*b[0]
+ elif a.shape[0] == 3:
+ if (b.shape[0] == 3):
+ x = a[1]*b[2] - a[2]*b[1]
+ y = a[2]*b[0] - a[0]*b[2]
+ z = a[0]*b[1] - a[1]*b[0]
+ else:
+ x = -a[2]*b[1]
+ y = a[2]*b[0]
+ z = a[0]*b[1] - a[1]*b[0]
+ cp = array([x,y,z])
+ if cp.ndim == 1:
+ return cp
+ else:
+ return cp.swapaxes(0,axisc)
+
+
+#Use numarray's printing function
+from arrayprint import array2string, get_printoptions, set_printoptions
+
+_typelessdata = [int_, float_, complex_]
+if issubclass(intc, int):
+ _typelessdata.append(intc)
+
+if issubclass(longlong, int):
+ _typelessdata.append(longlong)
+
+def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
+ if arr.size > 0 or arr.shape==(0,):
+ lst = array2string(arr, max_line_width, precision, suppress_small,
+ ', ', "array(")
+ else: # show zero-length shape unless it is (0,)
+ lst = "[], shape=%s" % (repr(arr.shape),)
+ typeless = arr.dtype in _typelessdata
+
+ if arr.__class__ is not ndarray:
+ cName= arr.__class__.__name__
+ else:
+ cName = "array"
+ if typeless and arr.size:
+ return cName + "(%s)" % lst
+ else:
+ typename=arr.dtype.__name__[:-8]
+ if issubclass(arr.dtype, flexible):
+ if typename not in ['unicode','string','void']:
+ typename = arr.dtype.__name__
+ typename = "(%s,%d)" % (typename, arr.itemsize)
+ return cName + "(%s, dtype=%s)" % (lst, typename)
+
+def array_str(a, max_line_width=None, precision=None, suppress_small=None):
+ return array2string(a, max_line_width, precision, suppress_small, ' ', "")
+
+set_string_function = multiarray.set_string_function
+set_string_function(array_str, 0)
+set_string_function(array_repr, 1)
+
+
+little_endian = (sys.byteorder == 'little')
+
+def indices(dimensions, dtype=int_):
+ """indices(dimensions,dtype=int_) returns an array representing a grid
+ of indices with row-only, and column-only variation.
+ """
+ tmp = ones(dimensions, dtype)
+ lst = []
+ for i in range(len(dimensions)):
+ lst.append( add.accumulate(tmp, i, )-1 )
+ return array(lst)
+
+def fromfunction(function, dimensions, **kwargs):
+ """fromfunction(function, dimensions) returns an array constructed by
+ calling function on a tuple of number grids. The function should
+ accept as many arguments as there are dimensions which is a list of
+ numbers indicating the length of the desired output for each axis.
+
+ The function can also accept keyword arguments which will be
+ passed in as well.
+ """
+ args = indices(dimensions)
+ return function(*args,**kwargs)
+
+
+from cPickle import load, loads
+_cload = load
+_file = file
+
+def load(file):
+ if isinstance(file, type("")):
+ file = _file(file,"rb")
+ return _cload(file)
+
+# These are all essentially abbreviations
+# These might wind up in a special abbreviations module
+
+def ones(shape, dtype=int_, fortran=False):
+ """ones(shape, dtype=int_) returns an array of the given
+ dimensions which is initialized to all ones.
+ """
+ # This appears to be slower...
+ #a = empty(shape, dtype, fortran)
+ #a.fill(1)
+ a = zeros(shape, dtype, fortran)
+ a+=1
+ return a
+
+def identity(n,dtype=int_):
+ """identity(n) returns the identity matrix of shape n x n.
+ """
+ a = array([1]+n*[0],dtype=dtype)
+ b = empty((n,n),dtype=dtype)
+ b.flat = a
+ return b
+
+def allclose (a, b, rtol=1.e-5, atol=1.e-8):
+ """ allclose(a,b,rtol=1.e-5,atol=1.e-8)
+ Returns true if all components of a and b are equal
+ subject to given tolerances.
+ The relative error rtol must be positive and << 1.0
+ The absolute error atol comes into play for those elements
+ of y that are very small or zero; it says how small x must be also.
+ """
+ x = array(a, copy=False)
+ y = array(b, copy=False)
+ d = less(absolute(x-y), atol + rtol * absolute(y))
+ return d.ravel().all()
+
+def _setpyvals(lst, frame, where=0):
+ if not isinstance(lst, list) or len(lst) != 3:
+ raise ValueError, "Invalid pyvalues (length 3 list needed)."
+
+ try:
+ wh = where.lower()[0]
+ except (AttributeError, TypeError, IndexError):
+ wh = None
+
+ if where==0 or wh == 'l':
+ frame.f_locals[UFUNC_PYVALS_NAME] = lst
+ elif where == 1 or wh == 'g':
+ frame.f_globals[UFUNC_PYVALS_NAME] = lst
+ elif where == 2 or wh == 'b':
+ frame.f_builtins[UFUNC_PYVALS_NAME] = lst
+
+ umath.update_use_defaults()
+ return
+
+def _getpyvals(frame):
+ try:
+ return frame.f_locals[UFUNC_PYVALS_NAME]
+ except KeyError:
+ try:
+ return frame.f_globals[UFUNC_PYVALS_NAME]
+ except KeyError:
+ try:
+ return frame.f_builtins[UFUNC_PYVALS_NAME]
+ except KeyError:
+ return [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
+
+_errdict = {"ignore":ERR_IGNORE,
+ "warn":ERR_WARN,
+ "raise":ERR_RAISE,
+ "call":ERR_CALL}
+
+_errdict_rev = {}
+for key in _errdict.keys():
+ _errdict_rev[_errdict[key]] = key
+del key
+
+def seterr(divide="ignore", over="ignore", under="ignore",
+ invalid="ignore", where=0):
+ maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
+ (_errdict[over] << SHIFT_OVERFLOW ) +
+ (_errdict[under] << SHIFT_UNDERFLOW) +
+ (_errdict[invalid] << SHIFT_INVALID))
+
+ frame = sys._getframe().f_back
+ pyvals = _getpyvals(frame)
+ pyvals[1] = maskvalue
+ _setpyvals(pyvals, frame, where)
+
+def geterr():
+ frame = sys._getframe().f_back
+ maskvalue = _getpyvals(frame)[1]
+
+ mask = 3
+ res = {}
+ val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
+ res['divide'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_OVERFLOW) & mask
+ res['over'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_UNDERFLOW) & mask
+ res['under'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_INVALID) & mask
+ res['invalid'] = _errdict_rev[val]
+ return res
+
+def setbufsize(size, where=0):
+ if size > 10e6:
+ raise ValueError, "Very big buffers.. %s" % size
+
+ frame = sys._getframe().f_back
+ pyvals = _getpyvals(frame)
+ pyvals[0] = size
+ _setpyvals(pyvals, frame, where)
+
+def getbufsize():
+ frame = sys._getframe().f_back
+ return _getpyvals(frame)[0]
+
+def seterrcall(func, where=0):
+ if not callable(func):
+ raise ValueError, "Only callable can be used as callback"
+ frame = sys._getframe().f_back
+ pyvals = _getpyvals(frame)
+ pyvals[2] = func
+ _setpyvals(pyvals, frame, where)
+
+def geterrcall():
+ frame = sys._getframe().f_back
+ return _getpyvals(frame)[2]
+
+def _setdef():
+ frame = sys._getframe()
+ defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
+ frame.f_globals[UFUNC_PYVALS_NAME] = defval
+ frame.f_builtins[UFUNC_PYVALS_NAME] = defval
+ umath.update_use_defaults()
+
+# set the default values
+_setdef()
+
+Inf = inf = infty = Infinity = PINF
+nan = NaN = NAN
+
+import oldnumeric
+from oldnumeric import *
+extend_all(oldnumeric)
diff --git a/numpy/base/numerictypes.py b/numpy/base/numerictypes.py
new file mode 100644
index 000000000..81d80c73f
--- /dev/null
+++ b/numpy/base/numerictypes.py
@@ -0,0 +1,382 @@
+# Borrowed and adapted from numarray
+
+"""numerictypes: Define the numeric type objects
+
+This module is designed so 'from numerictypes import *' is safe.
+Exported symbols include:
+
+ Dictionary with all registered number types (including aliases):
+ typeDict
+
+ Type objects (not all will be available, depends on platform):
+ see variable arraytypes for which ones you have
+
+ Bit-width names
+
+ int8 int16 int32 int64 int128
+ uint8 uint16 uint32 uint64 uint128
+ float16 float32 float64 float96 float128 float256
+ complex32 complex64 complex128 complex192 complex256 complex512
+
+ c-based names
+
+ bool_
+
+ object_
+
+ void, str_, unicode_
+
+ byte, ubyte,
+ short, ushort
+ intc, uintc,
+ intp, uintp,
+ int_, uint,
+ longlong, ulonglong,
+
+ single, csingle,
+ float_, complex_,
+ longfloat, clongfloat,
+
+ As part of the type-hierarchy: xx -- is bit-width
+
+ generic
+ bool_
+ numeric
+ integer
+ signedinteger (intxx)
+ byte
+ short
+ intc
+ intp int0
+ int_
+ longlong
+ unsignedinteger (uintxx)
+ ubyte
+ ushort
+ uintc
+ uintp uint0
+ uint_
+ ulonglong
+ floating (floatxx)
+ single
+ float_ (double)
+ longfloat
+ complexfloating (complexxx)
+ csingle
+ complex_ (cfloat, cdouble)
+ clongfloat
+
+ flexible
+ character
+ str_ (string)
+ unicode_
+ void
+
+ object_
+
+$Id: numerictypes.py,v 1.17 2005/09/09 22:20:06 teoliphant Exp $
+"""
+
+# we add more at the bottom
+__all__ = ['typeDict', 'typeNA', 'arraytypes', 'ScalarType', 'obj2dtype', 'cast', 'nbytes', 'dtype2char']
+
+from multiarray import typeinfo, ndarray, array, empty
+import types as _types
+
+# we don't export these for import *, but we do want them accessible
+# as numerictypes.bool, etc.
+from __builtin__ import bool, int, long, float, complex, object, unicode, str
+
+typeDict = {} # Contains all leaf-node numeric types with aliases
+typeNA = {} # Contails all leaf-node types -> numarray type equivalences
+allTypes = {} # Collect the types we will add to the module here
+
+def _evalname(name):
+ k = 0
+ for ch in name:
+ if ch in '0123456789':
+ break
+ k += 1
+ try:
+ bits = int(name[k:])
+ except ValueError:
+ bits = 0
+ base = name[:k]
+ return base, bits
+
+def bitname(obj):
+ """Return a bit-width name for a given type object"""
+ name = obj.__name__[:-8]
+ base = ''
+ char = ''
+ try:
+ info = typeinfo[name.upper()]
+ assert(info[-1] == obj) # sanity check
+ bits = info[2]
+
+ except KeyError: # bit-width name
+ base, bits = _evalname(name)
+ char = base[0]
+
+ if name == 'bool':
+ char = 'b'
+ base = 'bool'
+ elif name=='string':
+ char = 'S'
+ base = 'string'
+ elif name=='unicode':
+ char = 'U'
+ base = 'unicode'
+ elif name=='void':
+ char = 'V'
+ base = 'void'
+ elif name=='object':
+ char = 'O'
+ base = 'object'
+ bits = 0
+
+ bytes = bits / 8
+
+ if char != '' and bytes != 0:
+ char = "%s%d" % (char, bytes)
+
+ return base, bits, char
+
+revdict = {}
+
+def _add_types():
+ for a in typeinfo.keys():
+ name = a.lower()
+ if isinstance(typeinfo[a], type(())):
+ typeobj = typeinfo[a][-1]
+
+ # define C-name and insert typenum and typechar references also
+ allTypes[name] = typeobj
+ typeDict[name] = typeobj
+ typeDict[typeinfo[a][0]] = typeobj
+ typeDict[typeinfo[a][1]] = typeobj
+
+ # insert bit-width version for this class (if relevant)
+ base, bit, char = bitname(typeobj)
+ revdict[typeobj] = (typeinfo[a][:-1], (base, bit, char), a)
+ if base != '':
+ allTypes["%s%d" % (base, bit)] = typeobj
+ typeDict["%s%d" % (base, bit)] = typeobj
+ if base == 'uint':
+ tmpstr = 'UInt%d' % bit
+ typeDict[tmpstr] = typeobj
+ na_name = tmpstr
+ elif base == 'complex':
+ na_num = '%s%d' % (base.capitalize(), bit/2)
+ elif base == 'bool':
+ na_name = base.capitalize()
+ typeDict[na_name] = typeobj
+ else:
+ na_name = "%s%d" % (base.capitalize(), bit)
+ typeDict[na_name] = typeobj
+ typeNA[na_name] = typeobj
+ typeNA[typeobj] = na_name
+ typeNA[typeinfo[a][0]] = na_name
+ if char != '':
+ typeDict[char] = typeobj
+ typeNA[char] = na_name
+ else: # generic class
+ allTypes[name] = typeinfo[a]
+_add_types()
+
+
+# We use these later
+void = allTypes['void']
+generic = allTypes['generic']
+
+#
+# Rework the Python names (so that float and complex and int are consistent
+# with Python usage)
+#
+def _set_up_aliases():
+ type_pairs = [('complex_', 'cdouble'),
+ ('int0', 'intp'),
+ ('uint0', 'uintp'),
+ ('single', 'float'),
+ ('csingle', 'cfloat'),
+ ('float_', 'double'),
+ ('intc', 'int'),
+ ('uintc', 'uint'),
+ ('int_', 'long'),
+ ('uint', 'ulong'),
+ ('cfloat', 'cdouble'),
+ ('longfloat', 'longdouble'),
+ ('clongfloat', 'clongdouble'),
+ ('bool_', 'bool'),
+ ('unicode_', 'unicode'),
+ ('str_', 'string'),
+ ('object_', 'object')]
+ for alias, t in type_pairs:
+ allTypes[alias] = allTypes[t]
+ # Remove aliases overriding python types
+ for t in ['ulong', 'object', 'unicode', 'int', 'long', 'float',
+ 'complex', 'bool']:
+ try:
+ del allTypes[t]
+ except KeyError:
+ pass
+_set_up_aliases()
+
+# Now, construct dictionary to lookup character codes from types
+_dtype2char_dict = {}
+def _construct_char_code_lookup():
+ for name in typeinfo.keys():
+ tup = typeinfo[name]
+ if isinstance(tup, tuple):
+ if tup[0] not in ['p','P']:
+ _dtype2char_dict[tup[-1]] = tup[0]
+_construct_char_code_lookup()
+
+
+arraytypes = {'int': [],
+ 'uint':[],
+ 'float':[],
+ 'complex':[],
+ 'others':[bool,object,str,unicode,void]}
+
+def _add_array_type(typename, bits):
+ try:
+ t = allTypes['%s%d' % (typename, bits)]
+ except KeyError:
+ pass
+ else:
+ arraytypes[typename].append(t)
+
+def _set_array_types():
+ ibytes = [1, 2, 4, 8, 16, 32, 64]
+ fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
+ for bytes in ibytes:
+ bits = 8*bytes
+ _add_array_type('int', bits)
+ _add_array_type('uint', bits)
+ for bytes in fbytes:
+ bits = 8*bytes
+ _add_array_type('float', bits)
+ _add_array_type('complex', bits)
+_set_array_types()
+
+genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
+ 'int32', 'uint32', 'int64', 'uint64', 'int128',
+ 'uint128', 'float16',
+ 'float32', 'float64', 'float80', 'float96', 'float128',
+ 'float256',
+ 'complex32', 'complex64', 'complex128', 'complex160',
+ 'complex192', 'complex256', 'complex512', 'object']
+
+def maximum_dtype(t):
+ """returns the type of highest precision of the same general kind as 't'"""
+ g = obj2dtype(t)
+ if g is None:
+ return t
+ t = g
+ name = t.__name__[:-8]
+ base, bits = _evalname(name)
+ if bits == 0:
+ return t
+ else:
+ return arraytypes[base][-1]
+
+_python_types = {int : 'int_',
+ float: 'float_',
+ complex: 'complex_',
+ bool: 'bool_',
+ str: 'string',
+ unicode: 'unicode_',
+ _types.BufferType: 'void',
+ }
+def _python_type(t):
+ """returns the type corresponding to a certain Python type"""
+ if not isinstance(t, _types.TypeType):
+ t = type(t)
+ return allTypes[_python_types.get(t, 'object_')]
+
+def isdtype(rep):
+ """Determines whether the given object represents
+ a numeric array type."""
+ try:
+ char = dtype2char(rep)
+ return True
+ except (KeyError, ValueError):
+ return False
+
+def obj2dtype(rep, default=None):
+ try:
+ if issubclass(rep, generic):
+ return rep
+ except TypeError:
+ pass
+ if isinstance(rep, type):
+ return _python_type(rep)
+ if isinstance(rep, ndarray):
+ return rep.dtype
+ res = typeDict.get(rep, default)
+ return res
+
+
+# This dictionary allows look up based on any alias for a type
+class _typedict(dict):
+ def __getitem__(self, obj):
+ return dict.__getitem__(self, obj2dtype(obj))
+
+nbytes = _typedict()
+_alignment = _typedict()
+_maxvals = _typedict()
+_minvals = _typedict()
+def _construct_lookups():
+ for name, val in typeinfo.iteritems():
+ if not isinstance(val, tuple):
+ continue
+ obj = val[-1]
+ nbytes[obj] = val[2] / 8
+ _alignment[obj] = val[3]
+ if (len(val) > 5):
+ _maxvals[obj] = val[4]
+ _minvals[obj] = val[5]
+ else:
+ _maxvals[obj] = None
+ _minvals[obj] = None
+
+_construct_lookups()
+
+def dtype2char(dtype):
+ dtype = obj2dtype(dtype)
+ if dtype is None:
+ raise ValueError, "unrecognized type"
+ return _dtype2char_dict[dtype]
+
+# Create dictionary of casting functions that wrap sequences
+# indexed by type or type character
+
+
+cast = _typedict()
+ScalarType = [_types.IntType, _types.FloatType,
+ _types.ComplexType, _types.LongType, _types.BooleanType,
+ _types.StringType, _types.UnicodeType, _types.BufferType]
+ScalarType.extend(_dtype2char_dict.keys())
+ScalarType = tuple(ScalarType)
+for key in _dtype2char_dict.keys():
+ cast[key] = lambda x, k=key : array(x, copy=False).astype(k)
+
+
+_unicodesize = array('u','U').itemsize
+
+# Create the typestring lookup dictionary
+_typestr = _typedict()
+for key in _dtype2char_dict.keys():
+ if issubclass(key, allTypes['flexible']):
+ _typestr[key] = _dtype2char_dict[key]
+ else:
+ _typestr[key] = empty((1,),key).dtypestr[1:]
+
+# Now add the types we've determined to this module
+for key in allTypes:
+ globals()[key] = allTypes[key]
+ __all__.append(key)
+
+del key
+
diff --git a/numpy/base/oldnumeric.py b/numpy/base/oldnumeric.py
new file mode 100644
index 000000000..9cf87218e
--- /dev/null
+++ b/numpy/base/oldnumeric.py
@@ -0,0 +1,432 @@
+# Compatibility module containing deprecated names
+
+__all__ = ['asarray', 'array', 'concatenate',
+ 'NewAxis',
+ 'UFuncType', 'UfuncType', 'ArrayType', 'arraytype',
+ 'LittleEndian', 'Bool',
+ 'Character', 'UnsignedInt8', 'UnsignedInt16', 'UnsignedInt',
+ 'UInt8','UInt16','UInt32',
+ # UnsignedInt64 and Unsigned128 added below if possible
+ # same for Int64 and Int128, Float128, and Complex128
+ 'Int8', 'Int16', 'Int32',
+ 'Int0', 'Int', 'Float0', 'Float', 'Complex0', 'Complex',
+ 'PyObject', 'Float32', 'Float64',
+ 'Complex32', 'Complex64',
+ 'typecodes', 'sarray', 'arrayrange', 'cross_correlate',
+ 'matrixmultiply', 'outerproduct', 'innerproduct',
+ # from cPickle
+ 'dump', 'dumps',
+ # functions that are now methods
+ 'take', 'reshape', 'choose', 'repeat', 'put', 'putmask',
+ 'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin',
+ 'searchsorted', 'alen',
+ 'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape',
+ 'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue',
+ 'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim',
+ 'rank', 'size', 'around', 'mean', 'std', 'var', 'squeeze', 'amax', 'amin'
+ ]
+
+import multiarray as mu
+import umath as um
+import numerictypes as nt
+from numeric import asarray, array, correlate, outer, concatenate
+import sys
+_dt_ = nt.dtype2char
+
+#Use this to add a new axis to an array
+#compatibility only
+NewAxis = None
+
+#deprecated
+UFuncType = type(um.sin)
+UfuncType = type(um.sin)
+ArrayType = mu.ndarray
+arraytype = mu.ndarray
+
+LittleEndian = (sys.byteorder == 'little')
+
+# backward compatible names from old Precision.py
+
+Character = 'S1'
+UnsignedInt8 = _dt_(nt.uint8)
+UInt8 = UnsignedInt8
+UnsignedInt16 = _dt_(nt.uint16)
+UInt16 = UnsignedInt16
+UnsignedInt32 = _dt_(nt.uint32)
+UInt32 = UnsignedInt32
+UnsignedInt = _dt_(nt.uint)
+
+try:
+ UnsignedInt64 = _dt_(nt.uint64)
+except AttributeError:
+ pass
+else:
+ UInt64 = UnsignedInt64
+ __all__ += ['UnsignedInt64', 'UInt64']
+try:
+ UnsignedInt128 = _dt_(nt.uint128)
+except AttributeError:
+ pass
+else:
+ UInt128 = UnsignedInt128
+ __all__ += ['UnsignedInt128','UInt128']
+
+Int8 = _dt_(nt.int8)
+Int16 = _dt_(nt.int16)
+Int32 = _dt_(nt.int32)
+
+try:
+ Int64 = _dt_(nt.int64)
+except AttributeError:
+ pass
+else:
+ __all__ += ['Int64']
+
+try:
+ Int128 = _dt_(nt.int128)
+except AttributeError:
+ pass
+else:
+ __all__ += ['Int128']
+
+Bool = _dt_(bool)
+Int0 = _dt_(int)
+Int = _dt_(int)
+Float0 = _dt_(float)
+Float = _dt_(float)
+Complex0 = _dt_(complex)
+Complex = _dt_(complex)
+PyObject = _dt_(nt.object_)
+Float32 = _dt_(nt.float32)
+Float64 = _dt_(nt.float64)
+
+try:
+ Float128 = _dt_(nt.float128)
+except AttributeError:
+ pass
+else:
+ __all__ += ['Float128']
+
+Complex32 = _dt_(nt.complex64)
+Complex64 = _dt_(nt.complex128)
+
+try:
+ Complex128 = _dt_(nt.complex256)
+except AttributeError:
+ pass
+else:
+ __all__ += ['Complex128']
+
+typecodes = {'Character':'S1',
+ 'Integer':'bhilqp',
+ 'UnsignedInteger':'BHILQP',
+ 'Float':'fdg',
+ 'Complex':'FDG',
+ 'AllInteger':'bBhHiIlLqQpP',
+ 'AllFloat':'fdgFDG',
+ 'All':'?bhilqpBHILQPfdgFDGSUVO'}
+
+def sarray(a, dtype=None, copy=False):
+ return array(a, dtype, copy)
+
+# backward compatibility
+arrayrange = mu.arange
+cross_correlate = correlate
+
+# deprecated names
+matrixmultiply = mu.dot
+outerproduct = outer
+innerproduct = mu.inner
+
+from cPickle import dump, dumps
+
+# functions that are now methods
+
+def take(a, indices, axis=0):
+ a = asarray(a)
+ return a.take(indices, axis)
+
+def reshape(a, newshape):
+ """Change the shape of a to newshape. Return a new view object.
+ """
+ return asarray(a).reshape(newshape)
+
+def choose(a, choices):
+ a = asarray(a)
+ return a.choose(choices)
+
+def repeat(a, repeats, axis=0):
+ """repeat elements of a repeats times along axis
+ repeats is a sequence of length a.shape[axis]
+ telling how many times to repeat each element.
+ If repeats is an integer, it is interpreted as
+ a tuple of length a.shape[axis] containing repeats.
+ The argument a can be anything array(a) will accept.
+ """
+ a = array(a, copy=False)
+ return a.repeat(repeats, axis)
+
+def put (a, ind, v):
+ """put(a, ind, v) results in a[n] = v[n] for all n in ind
+ If v is shorter than mask it will be repeated as necessary.
+ In particular v can be a scalar or length 1 array.
+ The routine put is the equivalent of the following (although the loop
+ is in C for speed):
+
+ ind = array(indices, copy=False)
+ v = array(values, copy=False).astype(a, a.dtype)
+ for i in ind: a.flat[i] = v[i]
+ a must be a contiguous Numeric array.
+ """
+ return a.put(v,ind)
+
+def putmask (a, mask, v):
+ """putmask(a, mask, v) results in a = v for all places mask is true.
+ If v is shorter than mask it will be repeated as necessary.
+ In particular v can be a scalar or length 1 array.
+ """
+ return a.putmask(v, mask)
+
+def swapaxes(a, axis1, axis2):
+ """swapaxes(a, axis1, axis2) returns array a with axis1 and axis2
+ interchanged.
+ """
+ a = array(a, copy=False)
+ return a.swapaxes(axis1, axis2)
+
+def transpose(a, axes=None):
+ """transpose(a, axes=None) returns array with dimensions permuted
+ according to axes. If axes is None (default) returns array with
+ dimensions reversed.
+ """
+ a = array(a,copy=False)
+ return a.transpose(axes)
+
+def sort(a, axis=-1):
+ """sort(a,axis=-1) returns array with elements sorted along given axis.
+ """
+ a = array(a, copy=True)
+ a.sort(axis)
+ return a
+
+def argsort(a, axis=-1):
+ """argsort(a,axis=-1) return the indices into a of the sorted array
+ along the given axis, so that take(a,result,axis) is the sorted array.
+ """
+ a = array(a, copy=False)
+ return a.argsort(axis)
+
+def argmax(a, axis=-1):
+ """argmax(a,axis=-1) returns the indices to the maximum value of the
+ 1-D arrays along the given axis.
+ """
+ a = array(a, copy=False)
+ return a.argmax(axis)
+
+def argmin(a, axis=-1):
+ """argmin(a,axis=-1) returns the indices to the minimum value of the
+ 1-D arrays along the given axis.
+ """
+ a = array(a,copy=False)
+ return a.argmin(axis)
+
+def searchsorted(a, v):
+ """searchsorted(a, v)
+ """
+ a = array(a,copy=False)
+ return a.searchsorted(v)
+
+def resize(a, new_shape):
+ """resize(a,new_shape) returns a new array with the specified shape.
+ The original array's total size can be any size. It
+ fills the new array with repeated copies of a.
+
+ Note that a.resize(new_shape) will fill array with 0's
+ beyond current definition of a.
+ """
+
+ a = ravel(a)
+ Na = len(a)
+ if not Na: return mu.zeros(new_shape, a.dtypechar)
+ total_size = um.multiply.reduce(new_shape)
+ n_copies = int(total_size / Na)
+ extra = total_size % Na
+
+ if extra != 0:
+ n_copies = n_copies+1
+ extra = Na-extra
+
+ a = concatenate( (a,)*n_copies)
+ if extra > 0:
+ a = a[:-extra]
+
+ return reshape(a, new_shape)
+
+def squeeze(a):
+ "Returns a with any ones from the shape of a removed"
+ return asarray(a).squeeze()
+
+def diagonal(a, offset=0, axis1=0, axis2=1):
+ """diagonal(a, offset=0, axis1=0, axis2=1) returns the given diagonals
+ defined by the last two dimensions of the array.
+ """
+ return asarray(a).diagonal(offset, axis1, axis2)
+
+def trace(a, offset=0, axis1=0, axis2=1, dtype=None):
+ """trace(a,offset=0, axis1=0, axis2=1) returns the sum along diagonals
+ (defined by the last two dimenions) of the array.
+ """
+ return asarray(a).trace(offset, axis1, axis2, dtype)
+
+def ravel(m):
+ """ravel(m) returns a 1d array corresponding to all the elements of it's
+ argument.
+ """
+ return asarray(m).ravel()
+
+def nonzero(a):
+ """nonzero(a) returns the indices of the elements of a which are not zero,
+ a must be 1d
+ """
+ return asarray(a).nonzero()
+
+def shape(a):
+ """shape(a) returns the shape of a (as a function call which
+ also works on nested sequences).
+ """
+ return asarray(a).shape
+
+def compress(condition, m, axis=-1):
+ """compress(condition, x, axis=-1) = those elements of x corresponding
+ to those elements of condition that are "true". condition must be the
+ same size as the given dimension of x."""
+ return asarray(m).compress(condition, axis)
+
+def clip(m, m_min, m_max):
+ """clip(m, m_min, m_max) = every entry in m that is less than m_min is
+ replaced by m_min, and every entry greater than m_max is replaced by
+ m_max.
+ """
+ return asarray(m).clip(m_min, m_max)
+
+def sum(x, axis=0, dtype=None):
+ """Sum the array over the given axis. The optional dtype argument
+ is the data type for intermediate calculations.
+
+ The default is to upcast (promote) smaller integer types to the
+ platform-dependent Int. For example, on 32-bit platforms:
+
+ x.dtype default sum() dtype
+ ---------------------------------------------------
+ bool, Int8, Int16, Int32 Int32
+
+ Examples:
+ >>> sum([0.5, 1.5])
+ 2.0
+ >>> sum([0.5, 1.5], dtype=Int32)
+ 1
+ >>> sum([[0, 1], [0, 5]])
+ array([0, 6])
+ >>> sum([[0, 1], [0, 5]], axis=1)
+ array([1, 5])
+ """
+ return asarray(x).sum(axis, dtype)
+
+def product (x, axis=0, dtype=None):
+ """Product of the array elements over the given axis."""
+ return asarray(x).prod(axis, dtype)
+
+def sometrue (x, axis=0):
+ """Perform a logical_or over the given axis."""
+ return asarray(x).any(axis)
+
+def alltrue (x, axis=0):
+ """Perform a logical_and over the given axis."""
+ return asarray(x).all(axis)
+
+def any(x,axis=None):
+ """Return true if any elements of x are true: sometrue(ravel(x))
+ """
+ return ravel(x).any(axis)
+
+def all(x,axis=None):
+ """Return true if all elements of x are true: alltrue(ravel(x))
+ """
+ return ravel(x).all(axis)
+
+def cumsum (x, axis=0, dtype=None):
+ """Sum the array over the given axis."""
+ return asarray(x).cumsum(axis, dtype)
+
+def cumproduct (x, axis=0, dtype=None):
+ """Sum the array over the given axis."""
+ return asarray(x).cumprod(axis, dtype)
+
+def ptp(a, axis=0):
+ """Return maximum - minimum along the the given dimension
+ """
+ return asarray(a).ptp(axis)
+
+def amax(a, axis=0):
+ """Return the maximum of 'a' along dimension axis.
+ """
+ return asarray(a).max(axis)
+
+def amin(a, axis=0):
+ """Return the minimum of a along dimension axis.
+ """
+ return asarray(a).min(axis)
+
+def alen(a):
+ """Return the length of a Python object interpreted as an array
+ """
+ return len(asarray(a))
+
+def prod(a, axis=0):
+ """Return the product of the elements along the given axis
+ """
+ return asarray(a).prod(axis)
+
+def cumprod(a, axis=0):
+ """Return the cumulative product of the elments along the given axis
+ """
+ return asarray(a).cumprod(axis)
+
+def ndim(a):
+ try:
+ return a.ndim
+ except AttributeError:
+ return asarray(a).ndim
+
+def rank (a):
+ """Get the rank of sequence a (the number of dimensions, not a matrix rank)
+ The rank of a scalar is zero.
+ """
+ try:
+ return a.ndim
+ except AttributeError:
+ return asarray(a).ndim
+
+def size (a, axis=None):
+ "Get the number of elements in sequence a, or along a certain axis."
+ if axis is None:
+ try:
+ return a.size
+ except AttributeError:
+ return asarray(a).size
+ else:
+ try:
+ return a.shape[axis]
+ except AttributeError:
+ return asarray(a).shape[axis]
+
+from function_base import round_ as around
+
+def mean(a, axis=0, dtype=None):
+ return asarray(a).mean(axis, dtype)
+
+def std(a, axis=0, dtype=None):
+ return asarray(a).std(axis, dtype)
+
+def var(a, axis=0, dtype=None):
+ return asarray(a).var(axis, dtype)
diff --git a/numpy/base/polynomial.py b/numpy/base/polynomial.py
new file mode 100644
index 000000000..df7013bab
--- /dev/null
+++ b/numpy/base/polynomial.py
@@ -0,0 +1,554 @@
+"""
+Functions to operate on polynomials.
+"""
+
+__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
+ 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
+ 'polyfit']
+
+import re
+import numeric as NX
+
+from type_check import isscalar
+from twodim_base import diag, vander
+from shape_base import hstack, atleast_1d
+from function_base import trim_zeros, sort_complex
+eigvals = None
+lstsq = None
+
+def get_linalg_funcs():
+ "Look for linear algebra functions in scipy"
+ global eigvals, lstsq
+ from scipy.corelinalg import eigvals, lstsq
+ return
+
+def _eigvals(arg):
+ "Return the eigenvalues of the argument"
+ try:
+ return eigvals(arg)
+ except TypeError:
+ get_linalg_funcs()
+ return eigvals(arg)
+
+def _lstsq(X, y):
+ "Do least squares on the arguments"
+ try:
+ return lstsq(X, y)
+ except TypeError:
+ get_linalg_funcs()
+ return lstsq(X, y)
+
+def poly(seq_of_zeros):
+ """ Return a sequence representing a polynomial given a sequence of roots.
+
+ If the input is a matrix, return the characteristic polynomial.
+
+ Example:
+
+ >>> b = roots([1,3,1,5,6])
+ >>> poly(b)
+ array([1., 3., 1., 5., 6.])
+ """
+ seq_of_zeros = atleast_1d(seq_of_zeros)
+ sh = seq_of_zeros.shape
+ if len(sh) == 2 and sh[0] == sh[1]:
+ seq_of_zeros = _eigvals(seq_of_zeros)
+ elif len(sh) ==1:
+ pass
+ else:
+ raise ValueError, "input must be 1d or square 2d array."
+
+ if len(seq_of_zeros) == 0:
+ return 1.0
+
+ a = [1]
+ for k in range(len(seq_of_zeros)):
+ a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
+
+ if issubclass(a.dtype, NX.complexfloating):
+ # if complex roots are all complex conjugates, the roots are real.
+ roots = NX.asarray(seq_of_zeros, complex)
+ pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
+ neg_roots = NX.conjugate(sort_complex(
+ NX.compress(roots.imag < 0,roots)))
+ if (len(pos_roots) == len(neg_roots) and
+ NX.alltrue(neg_roots == pos_roots)):
+ a = a.real.copy()
+
+ return a
+
+def roots(p):
+ """ Return the roots of the polynomial coefficients in p.
+
+ The values in the rank-1 array p are coefficients of a polynomial.
+ If the length of p is n+1 then the polynomial is
+ p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
+ """
+ # If input is scalar, this makes it an array
+ p = atleast_1d(p)
+ if len(p.shape) != 1:
+ raise ValueError,"Input must be a rank-1 array."
+
+ # find non-zero array entries
+ non_zero = NX.nonzero(NX.ravel(p))
+
+ # find the number of trailing zeros -- this is the number of roots at 0.
+ trailing_zeros = len(p) - non_zero[-1] - 1
+
+ # strip leading and trailing zeros
+ p = p[int(non_zero[0]):int(non_zero[-1])+1]
+
+ # casting: if incoming array isn't floating point, make it floating point.
+ if not issubclass(p.dtype, (NX.floating, NX.complexfloating)):
+ p = p.astype(float)
+
+ N = len(p)
+ if N > 1:
+ # build companion matrix and find its eigenvalues (the roots)
+ A = diag(NX.ones((N-2,), p.dtype), -1)
+ A[0, :] = -p[1:] / p[0]
+ roots = _eigvals(A)
+ else:
+ return NX.array([])
+
+ # tack any zeros onto the back of the array
+ roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
+ return roots
+
+def polyint(p, m=1, k=None):
+ """Return the mth analytical integral of the polynomial p.
+
+ If k is None, then zero-valued constants of integration are used.
+ otherwise, k should be a list of length m (or a scalar if m=1) to
+ represent the constants of integration to use for each integration
+ (starting with k[0])
+ """
+ m = int(m)
+ if m < 0:
+ raise ValueError, "Order of integral must be positive (see polyder)"
+ if k is None:
+ k = NX.zeros(m, float)
+ k = atleast_1d(k)
+ if len(k) == 1 and m > 1:
+ k = k[0]*NX.ones(m, float)
+ if len(k) < m:
+ raise ValueError, \
+ "k must be a scalar or a rank-1 array of length 1 or >m."
+ if m == 0:
+ return p
+ else:
+ truepoly = isinstance(p, poly1d)
+ p = NX.asarray(p)
+ y = NX.zeros(len(p)+1, float)
+ y[:-1] = p*1.0/NX.arange(len(p), 0, -1)
+ y[-1] = k[0]
+ val = polyint(y, m-1, k=k[1:])
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+def polyder(p, m=1):
+ """Return the mth derivative of the polynomial p.
+ """
+ m = int(m)
+ truepoly = isinstance(p, poly1d)
+ p = NX.asarray(p)
+ n = len(p)-1
+ y = p[:-1] * NX.arange(n, 0, -1)
+ if m < 0:
+ raise ValueError, "Order of derivative must be positive (see polyint)"
+ if m == 0:
+ return p
+ else:
+ val = polyder(y, m-1)
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+def polyfit(x, y, N):
+ """
+
+ Do a best fit polynomial of order N of y to x. Return value is a
+ vector of polynomial coefficients [pk ... p1 p0]. Eg, for N=2
+
+ p2*x0^2 + p1*x0 + p0 = y1
+ p2*x1^2 + p1*x1 + p0 = y1
+ p2*x2^2 + p1*x2 + p0 = y2
+ .....
+ p2*xk^2 + p1*xk + p0 = yk
+
+
+ Method: if X is a the Vandermonde Matrix computed from x (see
+ http://mathworld.wolfram.com/VandermondeMatrix.html), then the
+ polynomial least squares solution is given by the 'p' in
+
+ X*p = y
+
+ where X is a len(x) x N+1 matrix, p is a N+1 length vector, and y
+ is a len(x) x 1 vector
+
+ This equation can be solved as
+
+ p = (XT*X)^-1 * XT * y
+
+ where XT is the transpose of X and -1 denotes the inverse.
+
+ For more info, see
+ http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html,
+ but note that the k's and n's in the superscripts and subscripts
+ on that page. The linear algebra is correct, however.
+
+ See also polyval
+
+ """
+ x = NX.asarray(x)+0.
+ y = NX.asarray(y)+0.
+ y = NX.reshape(y, (len(y), 1))
+ X = vander(x, N+1)
+ c, resids, rank, s = _lstsq(X, y)
+ c.shape = (N+1,)
+ return c
+
+
+
+def polyval(p, x):
+ """Evaluate the polynomial p at x. If x is a polynomial then composition.
+
+ Description:
+
+ If p is of length N, this function returns the value:
+ p[0]*(x**N-1) + p[1]*(x**N-2) + ... + p[N-2]*x + p[N-1]
+
+ x can be a sequence and p(x) will be returned for all elements of x.
+ or x can be another polynomial and the composite polynomial p(x) will be
+ returned.
+
+ Notice: This can produce inaccurate results for polynomials with
+ significant variability. Use carefully.
+ """
+ p = NX.asarray(p)
+ if isinstance(x, poly1d):
+ y = 0
+ else:
+ x = NX.asarray(x)
+ y = NX.zeros_like(x)
+ for i in range(len(p)):
+ y = x * y + p[i]
+ return y
+
+def polyadd(a1, a2):
+ """Adds two polynomials represented as sequences
+ """
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
+ a1 = atleast_1d(a1)
+ a2 = atleast_1d(a2)
+ diff = len(a2) - len(a1)
+ if diff == 0:
+ return a1 + a2
+ elif diff > 0:
+ zr = NX.zeros(diff, a1.dtype)
+ val = NX.concatenate((zr, a1)) + a2
+ else:
+ zr = NX.zeros(abs(diff), a2.dtype)
+ val = a1 + NX.concatenate((zr, a2))
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+def polysub(a1, a2):
+ """Subtracts two polynomials represented as sequences
+ """
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
+ a1 = atleast_1d(a1)
+ a2 = atleast_1d(a2)
+ diff = len(a2) - len(a1)
+ if diff == 0:
+ return a1 - a2
+ elif diff > 0:
+ zr = NX.zeros(diff, a1)
+ val = NX.concatenate((zr, a1)) - a2
+ else:
+ zr = NX.zeros(abs(diff), a2)
+ val = a1 - NX.concatenate((zr, a2))
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+
+def polymul(a1, a2):
+ """Multiplies two polynomials represented as sequences.
+ """
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
+ val = NX.convolve(a1, a2)
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+
+def deconvolve(signal, divisor):
+ """Deconvolves divisor out of signal. Requires scipy.signal library
+ """
+ import scipy.signal
+ num = atleast_1d(signal)
+ den = atleast_1d(divisor)
+ N = len(num)
+ D = len(den)
+ if D > N:
+ quot = [];
+ rem = num;
+ else:
+ input = NX.ones(N-D+1, float)
+ input[1:] = 0
+ quot = scipy.signal.lfilter(num, den, input)
+ rem = num - NX.convolve(den, quot, mode='full')
+ return quot, rem
+
+def polydiv(u, v):
+ """Computes q and r polynomials so that u(s) = q(s)*v(s) + r(s)
+ and deg r < deg v.
+ """
+ truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
+ u = atleast_1d(u)
+ v = atleast_1d(v)
+ m = len(u) - 1
+ n = len(v) - 1
+ scale = 1. / v[0]
+ q = NX.zeros((m-n+1,), float)
+ r = u.copy()
+ for k in range(0, m-n+1):
+ d = scale * r[k]
+ q[k] = d
+ r[k:k+n+1] -= d*v
+ while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
+ r = r[1:]
+ if truepoly:
+ q = poly1d(q)
+ r = poly1d(r)
+ return q, r
+
+_poly_mat = re.compile(r"[*][*]([0-9]*)")
+def _raise_power(astr, wrap=70):
+ n = 0
+ line1 = ''
+ line2 = ''
+ output = ' '
+ while 1:
+ mat = _poly_mat.search(astr, n)
+ if mat is None:
+ break
+ span = mat.span()
+ power = mat.groups()[0]
+ partstr = astr[n:span[0]]
+ n = span[1]
+ toadd2 = partstr + ' '*(len(power)-1)
+ toadd1 = ' '*(len(partstr)-1) + power
+ if ((len(line2)+len(toadd2) > wrap) or \
+ (len(line1)+len(toadd1) > wrap)):
+ output += line1 + "\n" + line2 + "\n "
+ line1 = toadd1
+ line2 = toadd2
+ else:
+ line2 += partstr + ' '*(len(power)-1)
+ line1 += ' '*(len(partstr)-1) + power
+ output += line1 + "\n" + line2
+ return output + astr[n:]
+
+
+class poly1d(object):
+ """A one-dimensional polynomial class.
+
+ p = poly1d([1,2,3]) constructs the polynomial x**2 + 2 x + 3
+
+ p(0.5) evaluates the polynomial at the location
+ p.r is a list of roots
+ p.c is the coefficient array [1,2,3]
+ p.order is the polynomial order (after leading zeros in p.c are removed)
+ p[k] is the coefficient on the kth power of x (backwards from
+ sequencing the coefficient array.
+
+ polynomials can be added, substracted, multplied and divided (returns
+ quotient and remainder).
+ asarray(p) will also give the coefficient array, so polynomials can
+ be used in all functions that accept arrays.
+
+ p = poly1d([1,2,3], variable='lambda') will use lambda in the
+ string representation of p.
+ """
+ def __init__(self, c_or_r, r=0, variable=None):
+ if isinstance(c_or_r, poly1d):
+ for key in c_or_r.__dict__.keys():
+ self.__dict__[key] = c_or_r.__dict__[key]
+ if variable is not None:
+ self.__dict__['variable'] = variable
+ return
+ if r:
+ c_or_r = poly(c_or_r)
+ c_or_r = atleast_1d(c_or_r)
+ if len(c_or_r.shape) > 1:
+ raise ValueError, "Polynomial must be 1d only."
+ c_or_r = trim_zeros(c_or_r, trim='f')
+ if len(c_or_r) == 0:
+ c_or_r = NX.array([0.])
+ self.__dict__['coeffs'] = c_or_r
+ self.__dict__['order'] = len(c_or_r) - 1
+ if variable is None:
+ variable = 'x'
+ self.__dict__['variable'] = variable
+
+ def __array__(self, t=None):
+ if t:
+ return NX.asarray(self.coeffs, t)
+ else:
+ return NX.asarray(self.coeffs)
+
+ def __repr__(self):
+ vals = repr(self.coeffs)
+ vals = vals[6:-1]
+ return "poly1d(%s)" % vals
+
+ def __len__(self):
+ return self.order
+
+ def __str__(self):
+ N = self.order
+ thestr = "0"
+ var = self.variable
+ for k in range(len(self.coeffs)):
+ coefstr ='%.4g' % abs(self.coeffs[k])
+ if coefstr[-4:] == '0000':
+ coefstr = coefstr[:-5]
+ power = (N-k)
+ if power == 0:
+ if coefstr != '0':
+ newstr = '%s' % (coefstr,)
+ else:
+ if k == 0:
+ newstr = '0'
+ else:
+ newstr = ''
+ elif power == 1:
+ if coefstr == '0':
+ newstr = ''
+ elif coefstr == 'b':
+ newstr = var
+ else:
+ newstr = '%s %s' % (coefstr, var)
+ else:
+ if coefstr == '0':
+ newstr = ''
+ elif coefstr == 'b':
+ newstr = '%s**%d' % (var, power,)
+ else:
+ newstr = '%s %s**%d' % (coefstr, var, power)
+
+ if k > 0:
+ if newstr != '':
+ if self.coeffs[k] < 0:
+ thestr = "%s - %s" % (thestr, newstr)
+ else:
+ thestr = "%s + %s" % (thestr, newstr)
+ elif (k == 0) and (newstr != '') and (self.coeffs[k] < 0):
+ thestr = "-%s" % (newstr,)
+ else:
+ thestr = newstr
+ return _raise_power(thestr)
+
+
+ def __call__(self, val):
+ return polyval(self.coeffs, val)
+
+ def __mul__(self, other):
+ if isscalar(other):
+ return poly1d(self.coeffs * other)
+ else:
+ other = poly1d(other)
+ return poly1d(polymul(self.coeffs, other.coeffs))
+
+ def __rmul__(self, other):
+ if isscalar(other):
+ return poly1d(other * self.coeffs)
+ else:
+ other = poly1d(other)
+ return poly1d(polymul(self.coeffs, other.coeffs))
+
+ def __add__(self, other):
+ other = poly1d(other)
+ return poly1d(polyadd(self.coeffs, other.coeffs))
+
+ def __radd__(self, other):
+ other = poly1d(other)
+ return poly1d(polyadd(self.coeffs, other.coeffs))
+
+ def __pow__(self, val):
+ if not isscalar(val) or int(val) != val or val < 0:
+ raise ValueError, "Power to non-negative integers only."
+ res = [1]
+ for k in range(val):
+ res = polymul(self.coeffs, res)
+ return poly1d(res)
+
+ def __sub__(self, other):
+ other = poly1d(other)
+ return poly1d(polysub(self.coeffs, other.coeffs))
+
+ def __rsub__(self, other):
+ other = poly1d(other)
+ return poly1d(polysub(other.coeffs, self.coeffs))
+
+ def __div__(self, other):
+ if isscalar(other):
+ return poly1d(self.coeffs/other)
+ else:
+ other = poly1d(other)
+ return polydiv(self, other)
+
+ def __rdiv__(self, other):
+ if isscalar(other):
+ return poly1d(other/self.coeffs)
+ else:
+ other = poly1d(other)
+ return polydiv(other, self)
+
+ def __setattr__(self, key, val):
+ raise ValueError, "Attributes cannot be changed this way."
+
+ def __getattr__(self, key):
+ if key in ['r', 'roots']:
+ return roots(self.coeffs)
+ elif key in ['c','coef','coefficients']:
+ return self.coeffs
+ elif key in ['o']:
+ return self.order
+ else:
+ return self.__dict__[key]
+
+ def __getitem__(self, val):
+ ind = self.order - val
+ if val > self.order:
+ return 0
+ if val < 0:
+ return 0
+ return self.coeffs[ind]
+
+ def __setitem__(self, key, val):
+ ind = self.order - key
+ if key < 0:
+ raise ValueError, "Does not support negative powers."
+ if key > self.order:
+ zr = NX.zeros(key-self.order, self.coeffs.dtype)
+ self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
+ self.__dict__['order'] = key
+ ind = 0
+ self.__dict__['coeffs'][ind] = val
+ return
+
+ def integ(self, m=1, k=0):
+ """Return the mth analytical integral of this polynomial.
+ See the documentation for polyint.
+ """
+ return poly1d(polyint(self.coeffs, m=m, k=k))
+
+ def deriv(self, m=1):
+ """Return the mth derivative of this polynomial.
+ """
+ return poly1d(polyder(self.coeffs, m=m))
diff --git a/numpy/base/records.py b/numpy/base/records.py
new file mode 100644
index 000000000..ee697cff4
--- /dev/null
+++ b/numpy/base/records.py
@@ -0,0 +1,402 @@
+__all__ = ['record', 'recarray','format_parser']
+
+import numeric as sb
+import numerictypes as nt
+import sys
+import types
+import stat, os
+import _internal
+
+_byteorderconv = {'b':'>',
+ 'l':'<',
+ 'n':'=',
+ 'B':'>',
+ 'L':'<',
+ 'N':'=',
+ 'S':'s',
+ 's':'s',
+ '>':'>',
+ '<':'<',
+ '=':'=',
+ '|':'|',
+ 'I':'|',
+ 'i':'|'}
+
+# formats regular expression
+# allows multidimension spec with a tuple syntax in front
+# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
+# are equally allowed
+
+numfmt = nt.typeDict
+_typestr = nt._typestr
+
+def find_duplicate(list):
+ """Find duplication in a list, return a list of duplicated elements"""
+ dup = []
+ for i in range(len(list)):
+ if (list[i] in list[i+1:]):
+ if (list[i] not in dup):
+ dup.append(list[i])
+ return dup
+
+
+class format_parser:
+ def __init__(self, formats, names, titles, aligned=False):
+ self._parseFormats(formats, aligned)
+ self._setfieldnames(names, titles)
+ self._createdescr()
+
+ def _parseFormats(self, formats, aligned=0):
+ """ Parse the field formats """
+
+ dtypedescr = sb.dtypedescr(formats, aligned)
+ fields = dtypedescr.fields
+ keys = fields[-1]
+ self._f_formats = [fields[key][0] for key in keys]
+ self._offsets = [fields[key][1] for key in keys]
+ self._nfields = len(keys)
+
+ def _setfieldnames(self, names, titles):
+ """convert input field names into a list and assign to the _names
+ attribute """
+
+ if (names):
+ if (type(names) in [types.ListType, types.TupleType]):
+ pass
+ elif (type(names) == types.StringType):
+ names = names.split(',')
+ else:
+ raise NameError, "illegal input names %s" % `names`
+
+ self._names = map(lambda n:n.strip(), names)[:self._nfields]
+ else:
+ self._names = []
+
+ # if the names are not specified, they will be assigned as "f1, f2,..."
+ # if not enough names are specified, they will be assigned as "f[n+1],
+ # f[n+2],..." etc. where n is the number of specified names..."
+ self._names += map(lambda i:
+ 'f'+`i`, range(len(self._names)+1,self._nfields+1))
+
+ # check for redundant names
+ _dup = find_duplicate(self._names)
+ if _dup:
+ raise ValueError, "Duplicate field names: %s" % _dup
+
+ if (titles):
+ self._titles = [n.strip() for n in titles][:self._nfields]
+ else:
+ self._titles = []
+ titles = []
+
+ if (self._nfields > len(titles)):
+ self._titles += [None]*(self._nfields-len(titles))
+
+ def _createdescr(self):
+ self._descr = sb.dtypedescr({'names':self._names,
+ 'formats':self._f_formats,
+ 'offsets':self._offsets,
+ 'titles':self._titles})
+
+class record(nt.void):
+ def __repr__(self):
+ return self.__str__()
+
+ def __str__(self):
+ return str(self.item())
+
+ def __getattribute__(self, attr):
+ if attr in ['setfield', 'getfield', 'dtypedescr']:
+ return nt.void.__getattribute__(self, attr)
+ fielddict = nt.void.__getattribute__(self, 'dtypedescr').fields
+ res = fielddict.get(attr,None)
+ if res:
+ return self.getfield(*res[:2])
+ return nt.void.__getattribute__(self, attr)
+
+ def __setattr__(self, attr, val):
+ if attr in ['setfield', 'getfield', 'dtypedescr']:
+ raise AttributeError, "Cannot set '%s' attribute" % attr;
+ fielddict = nt.void.__getattribute__(self,'dtypedescr').fields
+ res = fielddict.get(attr,None)
+ if res:
+ return self.setfield(val,*res[:2])
+
+ return nt.void.__setattr__(self,attr,val)
+
+ def __getitem__(self, obj):
+ return self.getfield(*(self.dtypedescr.fields[obj][:2]))
+
+ def __setitem__(self, obj, val):
+ return self.setfield(val, *(self.dtypedescr.fields[obj][:2]))
+
+
+# The recarray is almost identical to a standard array (which supports
+# named fields already) The biggest difference is that it can use
+# attribute-lookup to find the fields and it returns a record item.
+
+# If byteorder is given it forces a particular byteorder on all
+# the fields (and any subfields)
+
+class recarray(sb.ndarray):
+ def __new__(subtype, shape, formats, names=None, titles=None,
+ buf=None, offset=0, strides=None, byteorder=None,
+ aligned=0):
+
+ if isinstance(formats, sb.dtypedescr):
+ descr = formats
+ else:
+ parsed = format_parser(formats, names, titles, aligned)
+ descr = parsed._descr
+
+ if (byteorder is not None):
+ byteorder = _byteorderconv[byteorder[0]]
+ descr = descr.newbyteorder(byteorder)
+
+ if buf is None:
+ self = sb.ndarray.__new__(subtype, shape, (record, descr))
+ else:
+ self = sb.ndarray.__new__(subtype, shape, (record, descr),
+ buffer=buf, offset=offset,
+ strides=strides)
+ return self
+
+ def __getattribute__(self, attr):
+ fielddict = sb.ndarray.__getattribute__(self,'dtypedescr').fields
+ try:
+ res = fielddict[attr][:2]
+ except:
+ return sb.ndarray.__getattribute__(self,attr)
+
+ return self.getfield(*res)
+
+ def __setattr__(self, attr, val):
+ fielddict = sb.ndarray.__getattribute__(self,'dtypedescr').fields
+ try:
+ res = fielddict[attr][:2]
+ except:
+ return sb.ndarray.__setattr__(self,attr,val)
+
+ return self.setfield(val,*res)
+
+
+def fromarrays(arrayList, formats=None, names=None, titles=None, shape=None,
+ aligned=0):
+ """ create a record array from a (flat) list of arrays
+
+ >>> x1=array([1,2,3,4])
+ >>> x2=array(['a','dd','xyz','12'])
+ >>> x3=array([1.1,2,3,4])
+ >>> r=fromarrays([x1,x2,x3],names='a,b,c')
+ >>> print r[1]
+ (2, 'dd', 2.0)
+ >>> x1[1]=34
+ >>> r.a
+ recarray([1, 2, 3, 4])
+ """
+
+ if shape is None or shape == 0:
+ shape = arrayList[0].shape
+
+ if isinstance(shape, int):
+ shape = (shape,)
+
+ if formats is None:
+ # go through each object in the list to see if it is an ndarray
+ # and determine the formats.
+ formats = ''
+ for obj in arrayList:
+ if not isinstance(obj, sb.ndarray):
+ raise ValueError, "item in the array list must be an ndarray."
+ if obj.ndim == 1:
+ _repeat = ''
+ elif len(obj._shape) >= 2:
+ _repeat = `obj._shape[1:]`
+ formats += _repeat + _typestr[obj.dtype]
+ if issubclass(obj.dtype, nt.flexible):
+ formats += `obj.itemsize`
+ formats += ','
+ formats=formats[:-1]
+
+ for obj in arrayList:
+ if obj.shape != shape:
+ raise ValueError, "array has different shape"
+
+ parsed = format_parser(formats, names, titles, aligned)
+ _names = parsed._names
+ _array = recarray(shape, parsed._descr)
+
+ # populate the record array (makes a copy)
+ for i in range(len(arrayList)):
+ _array[_names[i]] = arrayList[i]
+
+ return _array
+
+# shape must be 1-d
+def fromrecords(recList, formats=None, names=None, titles=None, shape=None,
+ aligned=0):
+ """ create a Record Array from a list of records in text form
+
+ The data in the same field can be heterogeneous, they will be promoted
+ to the highest data type. This method is intended for creating
+ smaller record arrays. If used to create large array e.g.
+
+ r=fromrecords([[2,3.,'abc']]*100000)
+
+ it is slow.
+
+ >>> r=fromrecords([[456,'dbe',1.2],[2,'de',1.3]],names='col1,col2,col3')
+ >>> print r[0]
+ (456, 'dbe', 1.2)
+ >>> r.col1
+ recarray([456, 2])
+ >>> r.col2
+ recarray(['dbe', 'de'])
+ >>> import cPickle
+ >>> print cPickle.loads(cPickle.dumps(r))
+ recarray[
+ (456, 'dbe', 1.2),
+ (2, 'de', 1.3)
+ ]
+ """
+
+ if (shape is None or shape == 0):
+ shape = len(recList)
+
+ if isinstance(shape, (int, long)):
+ shape = (shape,)
+
+ if len(shape) > 1:
+ raise ValueError, "Can only deal with 1-d list of records"
+
+ nfields = len(recList[0])
+ if formats is None: # slower
+ obj = sb.array(recList,dtype=object)
+ arrlist = [sb.array(obj[:,i].tolist()) for i in xrange(nfields)]
+ return fromarrays(arrlist, formats=formats, shape=shape, names=names,
+ titles=titles, aligned=aligned)
+
+ parsed = format_parser(formats, names, titles, aligned)
+ _array = recarray(shape, parsed._descr)
+
+ for k in xrange(_array.size):
+ _array[k] = tuple(recList[k])
+
+ return _array
+
+def fromstring(datastring, formats, shape=None, names=None, titles=None,
+ byteorder=None, aligned=0, offset=0):
+ """ create a (read-only) record array from binary data contained in
+ a string"""
+
+ parsed = format_parser(formats, names, titles, aligned)
+ itemsize = parsed._descr.itemsize
+ if (shape is None or shape == 0 or shape == -1):
+ shape = (len(datastring)-offset) / itemsize
+
+ _array = recarray(shape, parsed._descr, names=names,
+ titles=titles, buf=datastring, offset=offset,
+ byteorder=byteorder)
+ return _array
+
+def fromfile(fd, formats, shape=None, names=None, titles=None,
+ byteorder=None, aligned=0, offset=0):
+ """Create an array from binary file data
+
+ If file is a string then that file is opened, else it is assumed
+ to be a file object. No options at the moment, all file positioning
+ must be done prior to this function call with a file object
+
+ >>> import testdata, sys
+ >>> fd=open(testdata.filename)
+ >>> fd.seek(2880*2)
+ >>> r=fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
+ >>> print r[0]
+ (5.1000000000000005, 61, 'abcde')
+ >>> r._shape
+ (3,)
+ """
+
+ if (shape is None or shape == 0):
+ shape = (-1,)
+ elif isinstance(shape, (int, long)):
+ shape = (shape,)
+
+ name = 0
+ if isinstance(fd, str):
+ name = 1
+ fd = open(fd, 'rb')
+ if (offset > 0):
+ fd.seek(offset, 1)
+ try:
+ size = os.fstat(fd.fileno())[stat.ST_SIZE] - fd.tell()
+ except:
+ size = os.path.getsize(fd.name) - fd.tell()
+
+ parsed = format_parser(formats, names, titles, aligned)
+ itemsize = parsed._descr.itemsize
+
+ shapeprod = sb.array(shape).prod()
+ shapesize = shapeprod*itemsize
+ if shapesize < 0:
+ shape = list(shape)
+ shape[ shape.index(-1) ] = size / -shapesize
+ shape = tuple(shape)
+ shapeprod = sb.array(shape).prod()
+
+ nbytes = shapeprod*itemsize
+
+ if nbytes > size:
+ raise ValueError(
+ "Not enough bytes left in file for specified shape and type")
+
+ # create the array
+ _array = recarray(shape, parsed._descr, byteorder=byteorder)
+ nbytesread = fd.readinto(_array.data)
+ if nbytesread != nbytes:
+ raise IOError("Didn't read as many bytes as expected")
+ if name:
+ fd.close()
+
+ return _array
+
+
+def array(obj, formats=None, names=None, titles=None, shape=None,
+ byteorder=None, aligned=0, offset=0, strides=None):
+
+ if isinstance(obj, (type(None), str, file)) and (formats is None):
+ raise ValueError("Must define formats if object is "\
+ "None, string, or a file pointer")
+
+ elif obj is None:
+ if shape is None:
+ raise ValueError("Must define a shape if obj is None")
+ return recarray(shape, formats, names=names, titles=titles,
+ buf=obj, offset=offset, strides=strides,
+ byteorder=byteorder, aligned=aligned)
+ elif isinstance(obj, str):
+ return fromstring(obj, formats, names=names, titles=titles,
+ shape=shape, byteorder=byteorder, aligned=aligned,
+ offset=offset)
+ elif isinstance(obj, (list, tuple)):
+ if isinstance(obj[0], sb.ndarray):
+ return fromarrays(obj, formats=formats, names=names, titles=titles,
+ shape=shape, aligned=aligned)
+ else:
+ return fromrecords(obj, formats=formats, names=names, titles=titles,
+ shape=shape, aligned=aligned)
+ elif isinstance(obj, recarray):
+ new = obj.copy()
+ parsed = format_parser(formats, names, titles, aligned)
+ new.dtypedescr = parsed._descr
+ return new
+ elif isinstance(obj, file):
+ return fromfile(obj, formats=formats, names=names, titles=titles,
+ shape=shape, byteorder=byteorder, aligned=aligned,
+ offset=offset)
+ else:
+ raise ValueError("Unknown input type")
+
+
+
+
diff --git a/numpy/base/scimath.py b/numpy/base/scimath.py
new file mode 100644
index 000000000..4202fa640
--- /dev/null
+++ b/numpy/base/scimath.py
@@ -0,0 +1,77 @@
+"""
+Wrapper functions to more user-friendly calling of certain math functions
+whose output data-type is different than the input data-type in certain domains of the input.
+"""
+
+__all__ = ['sqrt', 'log', 'log2', 'logn','log10', 'power', 'arccos',
+ 'arcsin', 'arctanh']
+
+import numeric as nx
+from numeric import *
+
+from type_check import isreal, asscalar
+
+__all__.extend([key for key in dir(nx.umath)
+ if key[0] != '_' and key not in __all__])
+
+_ln2 = log(2.0)
+
+def _tocomplex(arr):
+ if isinstance(arr.dtype, (nx.single, nx.byte, nx.short, nx.ubyte,
+ nx.ushort)):
+ return arr.astype(nx.csingle)
+ else:
+ return arr.astype(nx.cdouble)
+
+def _fix_real_lt_zero(x):
+ x = asarray(x)
+ if any(isreal(x) & (x<0)):
+ x = _tocomplex(x)
+ return asscalar(x)
+
+def _fix_real_abs_gt_1(x):
+ x = asarray(x)
+ if any(isreal(x) & (abs(x)>1)):
+ x = _tocomplex(x)
+ return x
+
+def sqrt(x):
+ x = _fix_real_lt_zero(x)
+ return nx.sqrt(x)
+
+def log(x):
+ x = _fix_real_lt_zero(x)
+ return nx.log(x)
+
+def log10(x):
+ x = _fix_real_lt_zero(x)
+ return nx.log10(x)
+
+def logn(n, x):
+ """ Take log base n of x.
+ """
+ x = _fix_real_lt_zero(x)
+ n = _fix_real_lt_zero(n)
+ return log(x)/log(n)
+
+def log2(x):
+ """ Take log base 2 of x.
+ """
+ x = _fix_real_lt_zero(x)
+ return log(x)/_ln2
+
+def power(x, p):
+ x = _fix_real_lt_zero(x)
+ return nx.power(x, p)
+
+def arccos(x):
+ x = _fix_real_abs_gt_1(x)
+ return arccos(x)
+
+def arcsin(x):
+ x = _fix_real_abs_gt_1(x)
+ return arcsin(x)
+
+def arctanh(x):
+ x = _fix_real_abs_gt_1(x)
+ return arctanh(x)
diff --git a/numpy/base/setup.py b/numpy/base/setup.py
new file mode 100644
index 000000000..5f0e0fa94
--- /dev/null
+++ b/numpy/base/setup.py
@@ -0,0 +1,284 @@
+
+import imp
+import os
+from os.path import join
+from glob import glob
+from distutils.dep_util import newer,newer_group
+
+def configuration(parent_package='',top_path=None):
+ from scipy.distutils.misc_util import Configuration,dot_join
+ from scipy.distutils.system_info import get_info
+
+ config = Configuration('base',parent_package,top_path)
+ local_dir = config.local_path
+ codegen_dir = join(local_dir,'code_generators')
+
+ generate_umath_py = join(codegen_dir,'generate_umath.py')
+ n = dot_join(config.name,'generate_umath')
+ generate_umath = imp.load_module('_'.join(n.split('.')),
+ open(generate_umath_py,'U'),generate_umath_py,
+ ('.py','U',1))
+
+ header_dir = join(*(config.name.split('.')+['include','scipy']))
+
+ def generate_config_h(ext, build_dir):
+ target = join(build_dir,'config.h')
+ if newer(__file__,target):
+ config_cmd = config.get_config_cmd()
+ print 'Generating',target
+ #
+ tc = generate_testcode(target)
+ from distutils import sysconfig
+ python_include = sysconfig.get_python_inc()
+ result = config_cmd.try_run(tc,include_dirs=[python_include])
+ if not result:
+ raise "ERROR: Failed to test configuration"
+ moredefs = []
+
+ #
+ mathlibs = []
+ tc = testcode_mathlib()
+ mathlibs_choices = [[],['m'],['cpml']]
+ mathlib = os.environ.get('MATHLIB')
+ if mathlib:
+ mathlibs_choices.insert(0,mathlib.split(','))
+ for libs in mathlibs_choices:
+ if config_cmd.try_run(tc,libraries=libs):
+ mathlibs = libs
+ break
+ else:
+ raise "math library missing; rerun setup.py after setting the MATHLIB env variable"
+ ext.libraries.extend(mathlibs)
+ moredefs.append(('MATHLIB',','.join(mathlibs)))
+
+ libs = mathlibs
+ kws_args = {'libraries':libs,'decl':0,'headers':['math.h']}
+ if config_cmd.check_func('expl', **kws_args):
+ moredefs.append('HAVE_LONGDOUBLE_FUNCS')
+ if config_cmd.check_func('expf', **kws_args):
+ moredefs.append('HAVE_FLOAT_FUNCS')
+ if config_cmd.check_func('asinh', **kws_args):
+ moredefs.append('HAVE_INVERSE_HYPERBOLIC')
+ if config_cmd.check_func('atanhf', **kws_args):
+ moredefs.append('HAVE_INVERSE_HYPERBOLIC_FLOAT')
+ if config_cmd.check_func('atanhl', **kws_args):
+ moredefs.append('HAVE_INVERSE_HYPERBOLIC_LONGDOUBLE')
+ if config_cmd.check_func('isnan', **kws_args):
+ moredefs.append('HAVE_ISNAN')
+ if config_cmd.check_func('isinf', **kws_args):
+ moredefs.append('HAVE_ISINF')
+
+ if moredefs:
+ target_f = open(target,'a')
+ for d in moredefs:
+ if isinstance(d,str):
+ target_f.write('#define %s\n' % (d))
+ else:
+ target_f.write('#define %s %s\n' % (d[0],d[1]))
+ target_f.close()
+ else:
+ mathlibs = []
+ target_f = open(target)
+ for line in target_f.readlines():
+ s = '#define MATHLIB'
+ if line.startswith(s):
+ value = line[len(s):].strip()
+ if value:
+ mathlibs.extend(value.split(','))
+ target_f.close()
+
+ ext.libraries.extend(mathlibs)
+
+ incl_dir = os.path.dirname(target)
+ if incl_dir not in config.scipy_include_dirs:
+ config.scipy_include_dirs.append(incl_dir)
+
+ config.add_data_files((header_dir,target))
+ return target
+
+ def generate_api_func(header_file, module_name):
+ def generate_api(ext,build_dir):
+ target = join(build_dir, header_file)
+ script = join(codegen_dir, module_name + '.py')
+ if newer(script, target):
+ sys.path.insert(0, codegen_dir)
+ try:
+ m = __import__(module_name)
+ print 'executing',script
+ m.generate_api(build_dir)
+ finally:
+ del sys.path[0]
+ config.add_data_files((header_dir,target))
+ return target
+ return generate_api
+
+ generate_array_api = generate_api_func('__multiarray_api.h',
+ 'generate_array_api')
+ generate_ufunc_api = generate_api_func('__ufunc_api.h',
+ 'generate_ufunc_api')
+
+ def generate_umath_c(ext,build_dir):
+ target = join(build_dir,'__umath_generated.c')
+ script = generate_umath_py
+ if newer(script,target):
+ f = open(target,'w')
+ f.write(generate_umath.make_code(generate_umath.defdict,
+ generate_umath.__file__))
+ f.close()
+ return []
+
+ config.add_data_files(join('include','scipy','*.h'))
+ config.add_include_dirs('src')
+
+ config.scipy_include_dirs.extend(config.paths('include'))
+
+ deps = [join('src','arrayobject.c'),
+ join('src','arraymethods.c'),
+ join('src','scalartypes.inc.src'),
+ join('src','arraytypes.inc.src'),
+ join('include','scipy','*object.h'),
+ join(codegen_dir,'genapi.py'),
+ join(codegen_dir,'*.txt')
+ ]
+
+ config.add_extension('multiarray',
+ sources = [join('src','multiarraymodule.c'),
+ generate_config_h,
+ generate_array_api,
+ join('src','scalartypes.inc.src'),
+ join('src','arraytypes.inc.src'),
+ join(codegen_dir,'generate_array_api.py'),
+ join('*.py')
+ ],
+ depends = deps,
+ )
+
+ config.add_extension('umath',
+ sources = [generate_config_h,
+ join('src','umathmodule.c.src'),
+ generate_umath_c,
+ generate_ufunc_api,
+ join('src','scalartypes.inc.src'),
+ join('src','arraytypes.inc.src'),
+ ],
+ depends = [join('src','ufuncobject.c'),
+ generate_umath_py,
+ join(codegen_dir,'generate_ufunc_api.py'),
+ ]+deps,
+ )
+
+ config.add_extension('_compiled_base',
+ sources=[join('src','_compiled_base.c'),
+ generate_config_h,
+ generate_array_api,
+ ],
+ )
+
+ config.add_extension('_sort',
+ sources=[join('src','_sortmodule.c.src'),
+ generate_config_h,
+ generate_array_api,
+ ],
+ )
+
+ # Configure blasdot
+ blas_info = get_info('blas_opt',0)
+ #blas_info = {}
+ def get_dotblas_sources(ext, build_dir):
+ if blas_info:
+ return ext.depends[:1]
+ return None # no extension module will be built
+
+ config.add_extension('_dotblas',
+ sources = [get_dotblas_sources],
+ depends=[join('blasdot','_dotblas.c'),
+ join('blasdot','cblas.h'),
+ ],
+ include_dirs = ['blasdot'],
+ extra_info = blas_info
+ )
+
+
+ config.add_data_dir('tests')
+ config.make_svn_version_py()
+
+ return config
+
+def testcode_mathlib():
+ return """\
+/* check whether libm is broken */
+#include <math.h>
+int main(int argc, char *argv[])
+{
+ return exp(-720.) > 1.0; /* typically an IEEE denormal */
+}
+"""
+
+import sys
+def generate_testcode(target):
+ if sys.platform == 'win32':
+ target = target.replace('\\','\\\\')
+ testcode = [r'''
+#include <Python.h>
+#include <limits.h>
+#include <stdio.h>
+
+int main(int argc, char **argv)
+{
+
+ FILE *fp;
+
+ fp = fopen("'''+target+'''","w");
+ ''']
+
+ c_size_test = r'''
+#ifndef %(sz)s
+ fprintf(fp,"#define %(sz)s %%d\n", sizeof(%(type)s));
+#else
+ fprintf(fp,"/* #define %(sz)s %%d */\n", %(sz)s);
+#endif
+'''
+ for sz, t in [('SIZEOF_SHORT', 'short'),
+ ('SIZEOF_INT', 'int'),
+ ('SIZEOF_LONG', 'long'),
+ ('SIZEOF_FLOAT', 'float'),
+ ('SIZEOF_DOUBLE', 'double'),
+ ('SIZEOF_LONG_DOUBLE', 'long double'),
+ ('SIZEOF_PY_INTPTR_T', 'Py_intptr_t'),
+ ]:
+ testcode.append(c_size_test % {'sz' : sz, 'type' : t})
+
+ testcode.append('#ifdef PY_LONG_LONG')
+ testcode.append(c_size_test % {'sz' : 'SIZEOF_LONG_LONG',
+ 'type' : 'PY_LONG_LONG'})
+ testcode.append(c_size_test % {'sz' : 'SIZEOF_PY_LONG_LONG',
+ 'type' : 'PY_LONG_LONG'})
+
+
+ testcode.append(r'''
+#else
+ fprintf(fp, "/* PY_LONG_LONG not defined */\n");
+#endif
+#ifndef CHAR_BIT
+ {
+ unsigned char var = 2;
+ int i=0;
+ while (var >= 2) {
+ var = var << 1;
+ i++;
+ }
+ fprintf(fp,"#define CHAR_BIT %d\n", i+1);
+ }
+#else
+ fprintf(fp, "/* #define CHAR_BIT %d */\n", CHAR_BIT);
+#endif
+ fclose(fp);
+ return 0;
+}
+''')
+ testcode = '\n'.join(testcode)
+ return testcode
+
+if __name__=='__main__':
+ from scipy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/numpy/base/shape_base.py b/numpy/base/shape_base.py
new file mode 100644
index 000000000..8d66b41d1
--- /dev/null
+++ b/numpy/base/shape_base.py
@@ -0,0 +1,539 @@
+__all__ = ['atleast_1d','atleast_2d','atleast_3d','vstack','hstack',
+ 'column_stack','dstack','array_split','split','hsplit',
+ 'vsplit','dsplit','apply_over_axes','expand_dims',
+ 'apply_along_axis']
+
+import numeric as _nx
+from numeric import *
+from oldnumeric import product
+
+from type_check import isscalar
+
+def apply_along_axis(func1d,axis,arr,*args):
+ """ Execute func1d(arr[i],*args) where func1d takes 1-D arrays
+ and arr is an N-d array. i varies so as to apply the function
+ along the given axis for each 1-d subarray in arr.
+ """
+ arr = asarray(arr)
+ nd = arr.ndim
+ if axis < 0:
+ axis += nd
+ if (axis >= nd):
+ raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
+ % (axis,nd))
+ ind = [0]*(nd-1)
+ i = zeros(nd,'O')
+ indlist = range(nd)
+ indlist.remove(axis)
+ i[axis] = slice(None,None)
+ outshape = asarray(arr.shape).take(indlist)
+ i.put(ind, indlist)
+ res = func1d(arr[tuple(i.tolist())],*args)
+ # if res is a number, then we have a smaller output array
+ if isscalar(res):
+ outarr = zeros(outshape,asarray(res).dtypechar)
+ outarr[ind] = res
+ Ntot = product(outshape)
+ k = 1
+ while k < Ntot:
+ # increment the index
+ ind[-1] += 1
+ n = -1
+ while (ind[n] >= outshape[n]) and (n > (1-nd)):
+ ind[n-1] += 1
+ ind[n] = 0
+ n -= 1
+ i.put(ind,indlist)
+ res = func1d(arr[tuple(i.tolist())],*args)
+ outarr[ind] = res
+ k += 1
+ return outarr
+ else:
+ Ntot = product(outshape)
+ holdshape = outshape
+ outshape = list(arr.shape)
+ outshape[axis] = len(res)
+ outarr = zeros(outshape,asarray(res).dtypechar)
+ outarr[tuple(i.tolist())] = res
+ k = 1
+ while k < Ntot:
+ # increment the index
+ ind[-1] += 1
+ n = -1
+ while (ind[n] >= holdshape[n]) and (n > (1-nd)):
+ ind[n-1] += 1
+ ind[n] = 0
+ n -= 1
+ i.put(ind, indlist)
+ res = func1d(arr[tuple(i.tolist())],*args)
+ outarr[tuple(i.tolist())] = res
+ k += 1
+ return outarr
+
+
+def apply_over_axes(func, a, axes):
+ """Apply a function repeatedly over multiple axes, keeping the same shape
+ for the resulting array.
+
+ func is called as res = func(a, axis). The result is assumed
+ to be either the same shape as a or have one less dimension.
+ This call is repeated for each axis in the axes sequence.
+ """
+ val = asarray(a)
+ N = a.ndim
+ if array(axes).ndim == 0:
+ axes = (axes,)
+ for axis in axes:
+ if axis < 0: axis = N + axis
+ args = (val, axis)
+ res = func(*args)
+ if res.ndim == val.ndim:
+ val = res
+ else:
+ res = expand_dims(res,axis)
+ if res.ndim == val.ndim:
+ val = res
+ else:
+ raise ValueError, "function is not returning"\
+ " an array of correct shape"
+ return val
+
+def expand_dims(a, axis):
+ """Expand the shape of a by including newaxis before given axis.
+ """
+ a = asarray(a)
+ shape = a.shape
+ if axis < 0:
+ axis = axis + len(shape) + 1
+ return a.reshape(shape[:axis] + (1,) + shape[axis:])
+
+
+def atleast_1d(*arys):
+ """ Force a sequence of arrays to each be at least 1D.
+
+ Description:
+ Force an array to be at least 1D. If an array is 0D, the
+ array is converted to a single row of values. Otherwise,
+ the array is unaltered.
+ Arguments:
+ *arys -- arrays to be converted to 1 or more dimensional array.
+ Returns:
+ input array converted to at least 1D array.
+ """
+ res = []
+ for ary in arys:
+ ary = asarray(ary)
+ if len(ary.shape) == 0:
+ ary = ary.reshape(1)
+ res.append(ary)
+ if len(res) == 1:
+ return res[0]
+ else:
+ return res
+
+def atleast_2d(*arys):
+ """ Force a sequence of arrays to each be at least 2D.
+
+ Description:
+ Force an array to each be at least 2D. If the array
+ is 0D or 1D, the array is converted to a single
+ row of values. Otherwise, the array is unaltered.
+ Arguments:
+ arys -- arrays to be converted to 2 or more dimensional array.
+ Returns:
+ input array converted to at least 2D array.
+ """
+ res = []
+ for ary in arys:
+ ary = asarray(ary)
+ if len(ary.shape) == 0:
+ result = ary.reshape(1,1)
+ elif len(ary.shape) == 1:
+ result = ary[newaxis,:]
+ else:
+ result = ary
+ res.append(result)
+ if len(res) == 1:
+ return res[0]
+ else:
+ return res
+
+def atleast_3d(*arys):
+ """ Force a sequence of arrays to each be at least 3D.
+
+ Description:
+ Force an array each be at least 3D. If the array is 0D or 1D,
+ the array is converted to a single 1xNx1 array of values where
+ N is the orginal length of the array. If the array is 2D, the
+ array is converted to a single MxNx1 array of values where MxN
+ is the orginal shape of the array. Otherwise, the array is
+ unaltered.
+ Arguments:
+ arys -- arrays to be converted to 3 or more dimensional array.
+ Returns:
+ input array converted to at least 3D array.
+ """
+ res = []
+ for ary in arys:
+ ary = asarray(ary)
+ if len(ary.shape) == 0:
+ result = ary.reshape(1,1,1)
+ elif len(ary.shape) == 1:
+ result = ary[newaxis,:,newaxis]
+ elif len(ary.shape) == 2:
+ result = ary[:,:,newaxis]
+ else:
+ result = ary
+ res.append(result)
+ if len(res) == 1:
+ return res[0]
+ else:
+ return res
+
+
+def vstack(tup):
+ """ Stack arrays in sequence vertically (row wise)
+
+ Description:
+ Take a sequence of arrays and stack them veritcally
+ to make a single array. All arrays in the sequence
+ must have the same shape along all but the first axis.
+ vstack will rebuild arrays divided by vsplit.
+ Arguments:
+ tup -- sequence of arrays. All arrays must have the same
+ shape.
+ Examples:
+ >>> import scipy
+ >>> a = array((1,2,3))
+ >>> b = array((2,3,4))
+ >>> scipy.vstack((a,b))
+ array([[1, 2, 3],
+ [2, 3, 4]])
+ >>> a = array([[1],[2],[3]])
+ >>> b = array([[2],[3],[4]])
+ >>> scipy.vstack((a,b))
+ array([[1],
+ [2],
+ [3],
+ [2],
+ [3],
+ [4]])
+
+ """
+ return _nx.concatenate(map(atleast_2d,tup),0)
+
+def hstack(tup):
+ """ Stack arrays in sequence horizontally (column wise)
+
+ Description:
+ Take a sequence of arrays and stack them horizontally
+ to make a single array. All arrays in the sequence
+ must have the same shape along all but the second axis.
+ hstack will rebuild arrays divided by hsplit.
+ Arguments:
+ tup -- sequence of arrays. All arrays must have the same
+ shape.
+ Examples:
+ >>> import scipy
+ >>> a = array((1,2,3))
+ >>> b = array((2,3,4))
+ >>> scipy.hstack((a,b))
+ array([1, 2, 3, 2, 3, 4])
+ >>> a = array([[1],[2],[3]])
+ >>> b = array([[2],[3],[4]])
+ >>> scipy.hstack((a,b))
+ array([[1, 2],
+ [2, 3],
+ [3, 4]])
+
+ """
+ return _nx.concatenate(map(atleast_1d,tup),1)
+
+def column_stack(tup):
+ """ Stack 1D arrays as columns into a 2D array
+
+ Description:
+ Take a sequence of 1D arrays and stack them as columns
+ to make a single 2D array. All arrays in the sequence
+ must have the same length.
+ Arguments:
+ tup -- sequence of 1D arrays. All arrays must have the same
+ length.
+ Examples:
+ >>> import scipy
+ >>> a = array((1,2,3))
+ >>> b = array((2,3,4))
+ >>> scipy.column_stack((a,b))
+ array([[1, 2],
+ [2, 3],
+ [3, 4]])
+
+ """
+ arrays = map(_nx.transpose,map(atleast_2d,tup))
+ return _nx.concatenate(arrays,1)
+
+def dstack(tup):
+ """ Stack arrays in sequence depth wise (along third dimension)
+
+ Description:
+ Take a sequence of arrays and stack them along the third axis.
+ All arrays in the sequence must have the same shape along all
+ but the third axis. This is a simple way to stack 2D arrays
+ (images) into a single 3D array for processing.
+ dstack will rebuild arrays divided by dsplit.
+ Arguments:
+ tup -- sequence of arrays. All arrays must have the same
+ shape.
+ Examples:
+ >>> import scipy
+ >>> a = array((1,2,3))
+ >>> b = array((2,3,4))
+ >>> scipy.dstack((a,b))
+ array([ [[1, 2],
+ [2, 3],
+ [3, 4]]])
+ >>> a = array([[1],[2],[3]])
+ >>> b = array([[2],[3],[4]])
+ >>> scipy.dstack((a,b))
+ array([[ [1, 2]],
+ [ [2, 3]],
+ [ [3, 4]]])
+ """
+ return _nx.concatenate(map(atleast_3d,tup),2)
+
+def _replace_zero_by_x_arrays(sub_arys):
+ for i in range(len(sub_arys)):
+ if len(_nx.shape(sub_arys[i])) == 0:
+ sub_arys[i] = _nx.array([])
+ elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]),0)):
+ sub_arys[i] = _nx.array([])
+ return sub_arys
+
+def array_split(ary,indices_or_sections,axis = 0):
+ """ Divide an array into a list of sub-arrays.
+
+ Description:
+ Divide ary into a list of sub-arrays along the
+ specified axis. If indices_or_sections is an integer,
+ ary is divided into that many equally sized arrays.
+ If it is impossible to make an equal split, each of the
+ leading arrays in the list have one additional member. If
+ indices_or_sections is a list of sorted integers, its
+ entries define the indexes where ary is split.
+
+ Arguments:
+ ary -- N-D array.
+ Array to be divided into sub-arrays.
+ indices_or_sections -- integer or 1D array.
+ If integer, defines the number of (close to) equal sized
+ sub-arrays. If it is a 1D array of sorted indices, it
+ defines the indexes at which ary is divided. Any empty
+ list results in a single sub-array equal to the original
+ array.
+ axis -- integer. default=0.
+ Specifies the axis along which to split ary.
+ Caveats:
+ Currently, the default for axis is 0. This
+ means a 2D array is divided into multiple groups
+ of rows. This seems like the appropriate default, but
+ we've agreed most other functions should default to
+ axis=-1. Perhaps we should use axis=-1 for consistency.
+ However, we could also make the argument that SciPy
+ works on "rows" by default. sum() sums up rows of
+ values. split() will split data into rows. Opinions?
+ """
+ try:
+ Ntotal = ary.shape[axis]
+ except AttributeError:
+ Ntotal = len(ary)
+ try: # handle scalar case.
+ Nsections = len(indices_or_sections) + 1
+ div_points = [0] + list(indices_or_sections) + [Ntotal]
+ except TypeError: #indices_or_sections is a scalar, not an array.
+ Nsections = int(indices_or_sections)
+ if Nsections <= 0:
+ raise ValueError, 'number sections must be larger than 0.'
+ Neach_section,extras = divmod(Ntotal,Nsections)
+ section_sizes = [0] + \
+ extras * [Neach_section+1] + \
+ (Nsections-extras) * [Neach_section]
+ div_points = _nx.array(section_sizes).cumsum()
+
+ sub_arys = []
+ sary = _nx.swapaxes(ary,axis,0)
+ for i in range(Nsections):
+ st = div_points[i]; end = div_points[i+1]
+ sub_arys.append(_nx.swapaxes(sary[st:end],axis,0))
+
+ # there is a wierd issue with array slicing that allows
+ # 0x10 arrays and other such things. The following cluge is needed
+ # to get around this issue.
+ sub_arys = _replace_zero_by_x_arrays(sub_arys)
+ # end cluge.
+
+ return sub_arys
+
+def split(ary,indices_or_sections,axis=0):
+ """ Divide an array into a list of sub-arrays.
+
+ Description:
+ Divide ary into a list of sub-arrays along the
+ specified axis. If indices_or_sections is an integer,
+ ary is divided into that many equally sized arrays.
+ If it is impossible to make an equal split, an error is
+ raised. This is the only way this function differs from
+ the array_split() function. If indices_or_sections is a
+ list of sorted integers, its entries define the indexes
+ where ary is split.
+
+ Arguments:
+ ary -- N-D array.
+ Array to be divided into sub-arrays.
+ indices_or_sections -- integer or 1D array.
+ If integer, defines the number of (close to) equal sized
+ sub-arrays. If it is a 1D array of sorted indices, it
+ defines the indexes at which ary is divided. Any empty
+ list results in a single sub-array equal to the original
+ array.
+ axis -- integer. default=0.
+ Specifies the axis along which to split ary.
+ Caveats:
+ Currently, the default for axis is 0. This
+ means a 2D array is divided into multiple groups
+ of rows. This seems like the appropriate default, but
+ we've agreed most other functions should default to
+ axis=-1. Perhaps we should use axis=-1 for consistency.
+ However, we could also make the argument that SciPy
+ works on "rows" by default. sum() sums up rows of
+ values. split() will split data into rows. Opinions?
+ """
+ try: len(indices_or_sections)
+ except TypeError:
+ sections = indices_or_sections
+ N = ary.shape[axis]
+ if N % sections:
+ raise ValueError, 'array split does not result in an equal division'
+ res = array_split(ary,indices_or_sections,axis)
+ return res
+
+def hsplit(ary,indices_or_sections):
+ """ Split ary into multiple columns of sub-arrays
+
+ Description:
+ Split a single array into multiple sub arrays. The array is
+ divided into groups of columns. If indices_or_sections is
+ an integer, ary is divided into that many equally sized sub arrays.
+ If it is impossible to make the sub-arrays equally sized, the
+ operation throws a ValueError exception. See array_split and
+ split for other options on indices_or_sections.
+ Arguments:
+ ary -- N-D array.
+ Array to be divided into sub-arrays.
+ indices_or_sections -- integer or 1D array.
+ If integer, defines the number of (close to) equal sized
+ sub-arrays. If it is a 1D array of sorted indices, it
+ defines the indexes at which ary is divided. Any empty
+ list results in a single sub-array equal to the original
+ array.
+ Returns:
+ sequence of sub-arrays. The returned arrays have the same
+ number of dimensions as the input array.
+ Related:
+ hstack, split, array_split, vsplit, dsplit.
+ Examples:
+ >>> import scipy
+ >>> a= array((1,2,3,4))
+ >>> scipy.hsplit(a,2)
+ [array([1, 2]), array([3, 4])]
+ >>> a = array([[1,2,3,4],[1,2,3,4]])
+ [array([[1, 2],
+ [1, 2]]), array([[3, 4],
+ [3, 4]])]
+
+ """
+ if len(_nx.shape(ary)) == 0:
+ raise ValueError, 'hsplit only works on arrays of 1 or more dimensions'
+ if len(ary.shape) > 1:
+ return split(ary,indices_or_sections,1)
+ else:
+ return split(ary,indices_or_sections,0)
+
+def vsplit(ary,indices_or_sections):
+ """ Split ary into multiple rows of sub-arrays
+
+ Description:
+ Split a single array into multiple sub arrays. The array is
+ divided into groups of rows. If indices_or_sections is
+ an integer, ary is divided into that many equally sized sub arrays.
+ If it is impossible to make the sub-arrays equally sized, the
+ operation throws a ValueError exception. See array_split and
+ split for other options on indices_or_sections.
+ Arguments:
+ ary -- N-D array.
+ Array to be divided into sub-arrays.
+ indices_or_sections -- integer or 1D array.
+ If integer, defines the number of (close to) equal sized
+ sub-arrays. If it is a 1D array of sorted indices, it
+ defines the indexes at which ary is divided. Any empty
+ list results in a single sub-array equal to the original
+ array.
+ Returns:
+ sequence of sub-arrays. The returned arrays have the same
+ number of dimensions as the input array.
+ Caveats:
+ How should we handle 1D arrays here? I am currently raising
+ an error when I encounter them. Any better approach?
+
+ Should we reduce the returned array to their minium dimensions
+ by getting rid of any dimensions that are 1?
+ Related:
+ vstack, split, array_split, hsplit, dsplit.
+ Examples:
+ import scipy
+ >>> a = array([[1,2,3,4],
+ ... [1,2,3,4]])
+ >>> scipy.vsplit(a)
+ [array([ [1, 2, 3, 4]]), array([ [1, 2, 3, 4]])]
+
+ """
+ if len(_nx.shape(ary)) < 2:
+ raise ValueError, 'vsplit only works on arrays of 2 or more dimensions'
+ return split(ary,indices_or_sections,0)
+
+def dsplit(ary,indices_or_sections):
+ """ Split ary into multiple sub-arrays along the 3rd axis (depth)
+
+ Description:
+ Split a single array into multiple sub arrays. The array is
+ divided into groups along the 3rd axis. If indices_or_sections is
+ an integer, ary is divided into that many equally sized sub arrays.
+ If it is impossible to make the sub-arrays equally sized, the
+ operation throws a ValueError exception. See array_split and
+ split for other options on indices_or_sections.
+ Arguments:
+ ary -- N-D array.
+ Array to be divided into sub-arrays.
+ indices_or_sections -- integer or 1D array.
+ If integer, defines the number of (close to) equal sized
+ sub-arrays. If it is a 1D array of sorted indices, it
+ defines the indexes at which ary is divided. Any empty
+ list results in a single sub-array equal to the original
+ array.
+ Returns:
+ sequence of sub-arrays. The returned arrays have the same
+ number of dimensions as the input array.
+ Caveats:
+ See vsplit caveats.
+ Related:
+ dstack, split, array_split, hsplit, vsplit.
+ Examples:
+ >>> a = array([[[1,2,3,4],[1,2,3,4]]])
+ [array([ [[1, 2],
+ [1, 2]]]), array([ [[3, 4],
+ [3, 4]]])]
+
+ """
+ if len(_nx.shape(ary)) < 3:
+ raise ValueError, 'vsplit only works on arrays of 3 or more dimensions'
+ return split(ary,indices_or_sections,2)
+
diff --git a/numpy/base/src/_compiled_base.c b/numpy/base/src/_compiled_base.c
new file mode 100644
index 000000000..3ce3743d7
--- /dev/null
+++ b/numpy/base/src/_compiled_base.c
@@ -0,0 +1,453 @@
+#include "Python.h"
+#include "structmember.h"
+#include "scipy/arrayobject.h"
+
+static PyObject *ErrorObject;
+#define Py_Try(BOOLEAN) {if (!(BOOLEAN)) goto fail;}
+#define Py_Assert(BOOLEAN,MESS) {if (!(BOOLEAN)) { \
+ PyErr_SetString(ErrorObject, (MESS)); \
+ goto fail;} \
+ }
+
+static intp
+incr_slot_ (double x, double *bins, intp lbins)
+{
+ intp i ;
+ for ( i = 0 ; i < lbins ; i ++ )
+ if ( x < bins [i] )
+ return i ;
+ return lbins ;
+}
+
+static intp
+decr_slot_ (double x, double * bins, intp lbins)
+{
+ intp i ;
+ for ( i = lbins - 1 ; i >= 0; i -- )
+ if (x < bins [i])
+ return i + 1 ;
+ return 0 ;
+}
+
+static int
+monotonic_ (double * a, int lena)
+{
+ int i;
+ if (a [0] <= a [1]) /* possibly monotonic increasing */
+ {
+ for (i = 1 ; i < lena - 1; i ++)
+ if (a [i] > a [i + 1]) return 0 ;
+ return 1 ;
+ }
+ else /* possibly monotonic decreasing */
+ {
+ for (i = 1 ; i < lena - 1; i ++)
+ if (a [i] < a [i + 1]) return 0 ;
+ return -1 ;
+ }
+}
+
+
+
+static intp
+mxx (intp *i , intp len)
+{
+ /* find the index of the maximum element of an integer array */
+ intp mx = 0, max = i[0] ;
+ intp j ;
+ for ( j = 1 ; j < len; j ++ )
+ if ( i [j] > max )
+ {max = i [j] ;
+ mx = j ;}
+ return mx;
+}
+
+static intp
+mnx (intp *i , intp len)
+{
+ /* find the index of the minimum element of an integer array */
+ intp mn = 0, min = i [0] ;
+ intp j ;
+ for ( j = 1 ; j < len; j ++ )
+ if ( i [j] < min )
+ {min = i [j] ;
+ mn = j ;}
+ return mn;
+}
+
+
+static PyObject *
+arr_bincount(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ /* histogram accepts one or two arguments. The first is an array
+ * of non-negative integers and the second, if present, is an
+ * array of weights, which must be promotable to double.
+ * Call these arguments list and weight. Both must be one-
+ * dimensional. len (weight) == len(list)
+ * If weight is not present:
+ * histogram (list) [i] is the number of occurrences of i in list.
+ * If weight is present:
+ * histogram (list, weight) [i] is the sum of all weight [j]
+ * where list [j] == i. */
+ /* self is not used */
+ PyArray_Descr *type;
+ PyObject *list = NULL, *weight=Py_None ;
+ PyObject *lst=NULL, *ans=NULL, *wts=NULL;
+ intp *numbers, *ians, len , mxi, mni, ans_size;
+ int i;
+ double *weights , *dans;
+ static char *kwlist[] = {"list", "weights", NULL};
+
+
+ Py_Try(PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwlist,
+ &list, &weight));
+ Py_Try(lst = PyArray_ContiguousFromAny(list, PyArray_INTP, 1, 1));
+ len = PyArray_SIZE(lst);
+ numbers = (intp *) PyArray_DATA(lst);
+ mxi = mxx (numbers, len) ;
+ mni = mnx (numbers, len) ;
+ Py_Assert(numbers[mni] >= 0,
+ "irst argument of bincount must be non-negative");
+ ans_size = numbers [mxi] + 1 ;
+ type = PyArray_DescrFromType(PyArray_INTP);
+ if (weight == Py_None) {
+ Py_Try(ans = PyArray_Zeros(1, &ans_size, type, 0));
+ ians = (intp *)(PyArray_DATA(ans));
+ for (i = 0 ; i < len ; i++)
+ ians [numbers [i]] += 1 ;
+ Py_DECREF(lst);
+ }
+ else {
+ Py_Try(wts = PyArray_ContiguousFromAny(weight,
+ PyArray_DOUBLE, 1, 1));
+ weights = (double *)PyArray_DATA (wts);
+ Py_Assert(PyArray_SIZE(wts) == len, "bincount: length of weights " \
+ "does not match that of list");
+ type = PyArray_DescrFromType(PyArray_DOUBLE);
+ Py_Try(ans = PyArray_Zeros(1, &ans_size, type, 0));
+ dans = (double *)PyArray_DATA (ans);
+ for (i = 0 ; i < len ; i++) {
+ dans[numbers[i]] += weights[i];
+ }
+ Py_DECREF(lst);
+ Py_DECREF(wts);
+ }
+ return ans;
+
+ fail:
+ Py_XDECREF(lst);
+ Py_XDECREF(wts);
+ Py_XDECREF(ans);
+ return NULL;
+}
+
+
+static PyObject *
+arr_digitize(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ /* digitize (x, bins) returns an array of python integers the same
+ length of x. The values i returned are such that
+ bins [i - 1] <= x < bins [i] if bins is monotonically increasing,
+ or bins [i - 1] > x >= bins [i] if bins is monotonically decreasing.
+ Beyond the bounds of bins, returns either i = 0 or i = len (bins)
+ as appropriate. */
+ /* self is not used */
+ PyObject *ox, *obins ;
+ PyObject *ax=NULL, *abins=NULL, *aret=NULL;
+ double *dx, *dbins ;
+ intp lbins, lx ; /* lengths */
+ intp *iret;
+ int m, i ;
+ static char *kwlist[] = {"x", "bins", NULL};
+ PyArray_Descr *type;
+
+ Py_Try(PyArg_ParseTupleAndKeywords(args, kwds, "OO", kwlist,
+ &ox, &obins));
+
+ type = PyArray_DescrFromType(PyArray_DOUBLE);
+ Py_Try(ax=PyArray_FromAny(ox, type, 1, 1, CARRAY_FLAGS));
+ Py_Try(abins = PyArray_FromAny(obins, type, 1, 1, CARRAY_FLAGS));
+
+ lx = PyArray_SIZE(ax);
+ dx = (double *)PyArray_DATA(ax);
+ lbins = PyArray_SIZE(abins);
+ dbins = (double *)PyArray_DATA(abins);
+ Py_Try(aret = PyArray_SimpleNew(1, &lx, PyArray_INTP));
+ iret = (intp *)PyArray_DATA(aret);
+
+ Py_Assert(lx > 0 && lbins > 0,
+ "x and bins both must have non-zero length");
+
+ if (lbins == 1) {
+ for (i=0 ; i<lx ; i++)
+ if (dx [i] >= dbins[0])
+ iret[i] = 1;
+ else
+ iret[i] = 0;
+ }
+ else {
+ m = monotonic_ (dbins, lbins) ;
+ if ( m == -1 ) {
+ for ( i = 0 ; i < lx ; i ++ )
+ iret [i] = decr_slot_ (dx [i], dbins, lbins) ;
+ }
+ else if ( m == 1 ) {
+ for ( i = 0 ; i < lx ; i ++ )
+ iret [i] = incr_slot_ ((float)dx [i], dbins, lbins) ;
+ }
+ else Py_Assert(0, "bins must be montonically increasing or decreasing");
+ }
+
+ Py_DECREF(ax);
+ Py_DECREF(abins);
+ return aret;
+
+ fail:
+ Py_XDECREF(ax);
+ Py_XDECREF(abins);
+ Py_XDECREF(aret);
+ return NULL;
+}
+
+
+
+static char arr_insert__doc__[] = "Insert vals sequentially into equivalent 1-d positions indicated by mask.";
+
+static PyObject *
+arr_insert(PyObject *self, PyObject *args, PyObject *kwdict)
+{
+ /* Returns input array with values inserted sequentially into places
+ indicated by the mask
+ */
+ PyObject *mask=NULL, *vals=NULL;
+ PyArrayObject *ainput=NULL, *amask=NULL, *avals=NULL,
+ *tmp=NULL;
+ int numvals, totmask, sameshape;
+ char *input_data, *mptr, *vptr, *zero=NULL;
+ int melsize, delsize, copied, nd;
+ intp *instrides, *inshape;
+ int mindx, rem_indx, indx, i, k, objarray;
+
+ static char *kwlist[] = {"input","mask","vals",NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwdict, "O&OO", kwlist,
+ PyArray_Converter, &ainput,
+ &mask, &vals))
+ goto fail;
+
+ amask = (PyArrayObject *) PyArray_FROM_OF(mask, CARRAY_FLAGS);
+ if (amask == NULL) goto fail;
+ /* Cast an object array */
+ if (amask->descr->type_num == PyArray_OBJECT) {
+ tmp = (PyArrayObject *)PyArray_Cast(amask, PyArray_INTP);
+ if (tmp == NULL) goto fail;
+ Py_DECREF(amask);
+ amask = tmp;
+ }
+
+ sameshape = 1;
+ if (amask->nd == ainput->nd) {
+ for (k=0; k < amask->nd; k++)
+ if (amask->dimensions[k] != ainput->dimensions[k])
+ sameshape = 0;
+ }
+ else { /* Test to see if amask is 1d */
+ if (amask->nd != 1) sameshape = 0;
+ else if ((PyArray_SIZE(ainput)) != PyArray_SIZE(amask)) sameshape = 0;
+ }
+ if (!sameshape) {
+ PyErr_SetString(PyExc_TypeError,
+ "mask array must be 1-d or same shape as input array");
+ goto fail;
+ }
+
+ avals = (PyArrayObject *)PyArray_FromObject(vals, ainput->descr->type_num, 0, 1);
+ if (avals == NULL) goto fail;
+
+ numvals = PyArray_SIZE(avals);
+ nd = ainput->nd;
+ input_data = ainput->data;
+ mptr = amask->data;
+ melsize = amask->descr->elsize;
+ vptr = avals->data;
+ delsize = avals->descr->elsize;
+ zero = PyArray_Zero(amask);
+ if (zero == NULL)
+ goto fail;
+ objarray = (ainput->descr->type_num == PyArray_OBJECT);
+
+ /* Handle zero-dimensional case separately */
+ if (nd == 0) {
+ if (memcmp(mptr,zero,melsize) != 0) {
+ /* Copy value element over to input array */
+ memcpy(input_data,vptr,delsize);
+ if (objarray) Py_INCREF(*((PyObject **)vptr));
+ }
+ Py_DECREF(amask);
+ Py_DECREF(avals);
+ PyDataMem_FREE(zero);
+ Py_INCREF(Py_None);
+ return Py_None;
+ }
+
+ /* Walk through mask array, when non-zero is encountered
+ copy next value in the vals array to the input array.
+ If we get through the value array, repeat it as necessary.
+ */
+ totmask = (int) PyArray_SIZE(amask);
+ copied = 0;
+ instrides = ainput->strides;
+ inshape = ainput->dimensions;
+ for (mindx = 0; mindx < totmask; mindx++) {
+ if (memcmp(mptr,zero,melsize) != 0) {
+ /* compute indx into input array
+ */
+ rem_indx = mindx;
+ indx = 0;
+ for(i=nd-1; i > 0; --i) {
+ indx += (rem_indx % inshape[i]) * instrides[i];
+ rem_indx /= inshape[i];
+ }
+ indx += rem_indx * instrides[0];
+ /* fprintf(stderr, "mindx = %d, indx=%d\n", mindx, indx); */
+ /* Copy value element over to input array */
+ memcpy(input_data+indx,vptr,delsize);
+ if (objarray) Py_INCREF(*((PyObject **)vptr));
+ vptr += delsize;
+ copied += 1;
+ /* If we move past value data. Reset */
+ if (copied >= numvals) vptr = avals->data;
+ }
+ mptr += melsize;
+ }
+
+ Py_DECREF(amask);
+ Py_DECREF(avals);
+ PyDataMem_FREE(zero);
+ Py_DECREF(ainput);
+ Py_INCREF(Py_None);
+ return Py_None;
+
+ fail:
+ PyDataMem_FREE(zero);
+ Py_XDECREF(ainput);
+ Py_XDECREF(amask);
+ Py_XDECREF(avals);
+ return NULL;
+}
+
+
+static PyTypeObject *PyMemberDescr_TypePtr=NULL;
+static PyTypeObject *PyGetSetDescr_TypePtr=NULL;
+
+/* Can only be called if doc is currently NULL
+*/
+static PyObject *
+arr_add_docstring(PyObject *dummy, PyObject *args)
+{
+ PyObject *obj;
+ PyObject *str;
+ char *docstr;
+ static char *msg = "already has a docstring";
+
+ if (!PyArg_ParseTuple(args, "OO!", &obj, &PyString_Type, &str))
+ return NULL;
+
+ docstr = PyString_AS_STRING(str);
+
+#define _TESTDOC1(typebase) (obj->ob_type == &Py##typebase##_Type)
+#define _TESTDOC2(typebase) (obj->ob_type == Py##typebase##_TypePtr)
+#define _ADDDOC(typebase, doc, name) { \
+ Py##typebase##Object *new = (Py##typebase##Object *)obj; \
+ if (!(doc)) { \
+ doc = docstr; \
+ } \
+ else { \
+ PyErr_Format(PyExc_RuntimeError, \
+ "%s method %s",name, msg); \
+ return NULL; \
+ } \
+ }
+
+ if _TESTDOC1(CFunction)
+ _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name)
+ else if _TESTDOC1(Type)
+ _ADDDOC(Type, new->tp_doc, new->tp_name)
+ else if _TESTDOC2(MemberDescr)
+ _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name)
+ else if _TESTDOC2(GetSetDescr)
+ _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name)
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "Cannot set a docstring for that object");
+ return NULL;
+ }
+
+#undef _TESTDOC1
+#undef _TESTDOC2
+#undef _ADDDOC
+
+ Py_INCREF(str);
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static struct PyMethodDef methods[] = {
+ {"_insert", (PyCFunction)arr_insert, METH_VARARGS | METH_KEYWORDS,
+ arr_insert__doc__},
+ {"bincount", (PyCFunction)arr_bincount,
+ METH_VARARGS | METH_KEYWORDS, NULL},
+ {"digitize", (PyCFunction)arr_digitize, METH_VARARGS | METH_KEYWORDS,
+ NULL},
+ {"add_docstring", (PyCFunction)arr_add_docstring, METH_VARARGS,
+ NULL},
+ {NULL, NULL} /* sentinel */
+};
+
+static void
+define_types(void)
+{
+ PyObject *tp_dict;
+ PyObject *myobj;
+
+ tp_dict = PyArrayDescr_Type.tp_dict;
+ /* Get "subdescr" */
+ myobj = PyDict_GetItemString(tp_dict, "fields");
+ if (myobj == NULL) return;
+ PyGetSetDescr_TypePtr = myobj->ob_type;
+ myobj = PyDict_GetItemString(tp_dict, "alignment");
+ if (myobj == NULL) return;
+ PyMemberDescr_TypePtr = myobj->ob_type;
+ return;
+}
+
+/* Initialization function for the module (*must* be called initArray) */
+
+DL_EXPORT(void) init_compiled_base(void) {
+ PyObject *m, *d, *s;
+
+ /* Create the module and add the functions */
+ m = Py_InitModule("scipy.base._compiled_base", methods);
+
+ /* Import the array and ufunc objects */
+ import_array();
+
+ /* Add some symbolic constants to the module */
+ d = PyModule_GetDict(m);
+
+ s = PyString_FromString("0.5");
+ PyDict_SetItemString(d, "__version__", s);
+ Py_DECREF(s);
+
+ ErrorObject = PyString_FromString("scipy.base._compiled_base.error");
+ PyDict_SetItemString(d, "error", ErrorObject);
+ Py_DECREF(ErrorObject);
+
+
+ /* define PyGetSetDescr_Type and PyMemberDescr_Type */
+ define_types();
+
+ /* Check for errors */
+ if (PyErr_Occurred())
+ Py_FatalError("can't initialize module _compiled_base");
+}
diff --git a/numpy/base/src/_isnan.c b/numpy/base/src/_isnan.c
new file mode 100644
index 000000000..f5965fbc7
--- /dev/null
+++ b/numpy/base/src/_isnan.c
@@ -0,0 +1,47 @@
+/* Adapted from cephes */
+
+static int
+isnan(double x)
+{
+ union
+ {
+ double d;
+ unsigned short s[4];
+ unsigned int i[2];
+ } u;
+
+ u.d = x;
+
+#if SIZEOF_INT == 4
+
+#ifdef WORDS_BIGENDIAN /* defined in pyconfig.h */
+ if( ((u.i[0] & 0x7ff00000) == 0x7ff00000)
+ && (((u.i[0] & 0x000fffff) != 0) || (u.i[1] != 0)))
+ return 1;
+#else
+ if( ((u.i[1] & 0x7ff00000) == 0x7ff00000)
+ && (((u.i[1] & 0x000fffff) != 0) || (u.i[0] != 0)))
+ return 1;
+#endif
+
+#else /* SIZEOF_INT != 4 */
+
+#ifdef WORDS_BIGENDIAN
+ if( (u.s[0] & 0x7ff0) == 0x7ff0)
+ {
+ if( ((u.s[0] & 0x000f) | u.s[1] | u.s[2] | u.s[3]) != 0 )
+ return 1;
+ }
+#else
+ if( (u.s[3] & 0x7ff0) == 0x7ff0)
+ {
+ if( ((u.s[3] & 0x000f) | u.s[2] | u.s[1] | u.s[0]) != 0 )
+ return 1;
+ }
+#endif
+
+#endif /* SIZEOF_INT */
+
+ return 0;
+}
+
diff --git a/numpy/base/src/_signbit.c b/numpy/base/src/_signbit.c
new file mode 100644
index 000000000..d128cb1fb
--- /dev/null
+++ b/numpy/base/src/_signbit.c
@@ -0,0 +1,32 @@
+/* Adapted from cephes */
+
+static int
+signbit(double x)
+{
+ union
+ {
+ double d;
+ short s[4];
+ int i[2];
+ } u;
+
+ u.d = x;
+
+#if SIZEOF_INT == 4
+
+#ifdef WORDS_BIGENDIAN /* defined in pyconfig.h */
+ return u.i[0] < 0;
+#else
+ return u.i[1] < 0;
+#endif
+
+#else /* SIZEOF_INT != 4 */
+
+#ifdef WORDS_BIGENDIAN
+ return u.s[0] < 0;
+#else
+ return u.s[3] < 0;
+#endif
+
+#endif /* SIZEOF_INT */
+}
diff --git a/numpy/base/src/_sortmodule.c.src b/numpy/base/src/_sortmodule.c.src
new file mode 100644
index 000000000..47c7520c1
--- /dev/null
+++ b/numpy/base/src/_sortmodule.c.src
@@ -0,0 +1,482 @@
+/* The purpose of this module is to add faster sort functions
+ that are type-specific. This is done by altering the
+ function table for the builtin descriptors.
+
+ These sorting functions are copied almost directly from numarray
+ with a few modifications (complex comparisons compare the imaginary
+ part if the real parts are equal, for example), and the names
+ are changed.
+
+ The original sorting code is due to Charles R. Harris who wrote
+ it for numarray.
+*/
+
+/* Quick sort is usually the fastest, but the worst case scenario can
+ be slower than the merge and heap sorts. The merge sort requires
+ extra memory and so for large arrays may not be useful.
+
+ The merge sort is *stable*, meaning that equal components
+ are unmoved from their entry versions, so it can be used to
+ implement lexigraphic sorting on multiple keys.
+
+ The heap sort is included for completeness.
+*/
+
+
+#include "Python.h"
+#include "scipy/arrayobject.h"
+
+#define PYA_QS_STACK 100
+#define SMALL_QUICKSORT 15
+#define SMALL_MERGESORT 20
+#define STDC_LT(a,b) ((a) < (b))
+#define STDC_LE(a,b) ((a) <= (b))
+#define STDC_EQ(a,b) ((a) == (b))
+#define SWAP(a,b) {SWAP_temp = (b); (b)=(a); (a) = SWAP_temp;}
+#define NUMC_LT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag): ((p).real < (q).real)))
+#define NUMC_LE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag): ((p).real <= (q).real)))
+#define NUMC_EQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag))
+
+/**begin repeat
+#TYPE=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE#
+#type=Bool,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble#
+#lessthan=STDC_LT*14,NUMC_LT*3#
+#lessequal=STDC_LE*14,NUMC_LE*3#
+ **/
+static int
+@TYPE@_quicksort(@type@ *start, intp num, void *unused)
+{
+ @type@ *pl = start;
+ @type@ *pr = start + num - 1;
+ @type@ vp, SWAP_temp;
+ @type@ *stack[PYA_QS_STACK], **sptr = stack, *pm, *pi, *pj, *pt;
+
+ for(;;) {
+ while ((pr - pl) > SMALL_QUICKSORT) {
+ /* quicksort partition */
+ pm = pl + ((pr - pl) >> 1);
+ if (@lessthan@(*pm,*pl)) SWAP(*pm,*pl);
+ if (@lessthan@(*pr,*pm)) SWAP(*pr,*pm);
+ if (@lessthan@(*pm,*pl)) SWAP(*pm,*pl);
+ vp = *pm;
+ pi = pl;
+ pj = pr - 1;
+ SWAP(*pm,*pj);
+ for(;;) {
+ do ++pi; while (@lessthan@(*pi,vp));
+ do --pj; while (@lessthan@(vp,*pj));
+ if (pi >= pj) break;
+ SWAP(*pi,*pj);
+ }
+ SWAP(*pi,*(pr-1));
+ /* push largest partition on stack */
+ if (pi - pl < pr - pi) {
+ *sptr++ = pi + 1;
+ *sptr++ = pr;
+ pr = pi - 1;
+ }else{
+ *sptr++ = pl;
+ *sptr++ = pi - 1;
+ pl = pi + 1;
+ }
+ }
+ /* insertion sort */
+ for(pi = pl + 1; pi <= pr; ++pi) {
+ vp = *pi;
+ for(pj = pi, pt = pi - 1; \
+ pj > pl && @lessthan@(vp, *pt);) {
+ *pj-- = *pt--;
+ }
+ *pj = vp;
+ }
+ if (sptr == stack) break;
+ pr = *(--sptr);
+ pl = *(--sptr);
+ }
+ return 0;
+}
+
+static int
+@TYPE@_aquicksort(@type@ *v, intp* tosort, intp num, void *unused)
+{
+ @type@ vp;
+ intp *pl, *pr, SWAP_temp;
+ intp *stack[PYA_QS_STACK], **sptr=stack, *pm, *pi, *pj, *pt, vi;
+
+ pl = tosort;
+ pr = tosort + num - 1;
+
+ for(;;) {
+ while ((pr - pl) > SMALL_QUICKSORT) {
+ /* quicksort partition */
+ pm = pl + ((pr - pl) >> 1);
+ if (@lessthan@(v[*pm],v[*pl])) SWAP(*pm,*pl);
+ if (@lessthan@(v[*pr],v[*pm])) SWAP(*pr,*pm);
+ if (@lessthan@(v[*pm],v[*pl])) SWAP(*pm,*pl);
+ vp = v[*pm];
+ pi = pl;
+ pj = pr - 1;
+ SWAP(*pm,*pj);
+ for(;;) {
+ do ++pi; while (@lessthan@(v[*pi],vp));
+ do --pj; while (@lessthan@(vp,v[*pj]));
+ if (pi >= pj) break;
+ SWAP(*pi,*pj);
+ }
+ SWAP(*pi,*(pr-1));
+ /* push largest partition on stack */
+ if (pi - pl < pr - pi) {
+ *sptr++ = pi + 1;
+ *sptr++ = pr;
+ pr = pi - 1;
+ }else{
+ *sptr++ = pl;
+ *sptr++ = pi - 1;
+ pl = pi + 1;
+ }
+ }
+ /* insertion sort */
+ for(pi = pl + 1; pi <= pr; ++pi) {
+ vi = *pi;
+ vp = v[vi];
+ for(pj = pi, pt = pi - 1; \
+ pj > pl && @lessthan@(vp, v[*pt]);)
+ {
+ *pj-- = *pt--;
+ }
+ *pj = vi;
+ }
+ if (sptr == stack) break;
+ pr = *(--sptr);
+ pl = *(--sptr);
+ }
+ return 0;
+}
+
+
+static int
+@TYPE@_heapsort(@type@ *start, intp n, void *unused)
+{
+
+ @type@ tmp, *a;
+ intp i,j,l;
+
+ /* The array needs to be offset by one for heapsort indexing */
+ a = start - 1;
+
+ for (l = n>>1; l > 0; --l) {
+ tmp = a[l];
+ for (i = l, j = l<<1; j <= n;) {
+ if (j < n && @lessthan@(a[j], a[j+1]))
+ j += 1;
+ if (@lessthan@(tmp, a[j])) {
+ a[i] = a[j];
+ i = j;
+ j += j;
+ }else
+ break;
+ }
+ a[i] = tmp;
+ }
+
+ for (; n > 1;) {
+ tmp = a[n];
+ a[n] = a[1];
+ n -= 1;
+ for (i = 1, j = 2; j <= n;) {
+ if (j < n && @lessthan@(a[j], a[j+1]))
+ j++;
+ if (@lessthan@(tmp, a[j])) {
+ a[i] = a[j];
+ i = j;
+ j += j;
+ }else
+ break;
+ }
+ a[i] = tmp;
+ }
+ return 0;
+}
+
+static int
+@TYPE@_aheapsort(@type@ *v, intp *tosort, intp n, void *unused)
+{
+ intp *a, i,j,l, tmp;
+ /* The arrays need to be offset by one for heapsort indexing */
+ a = tosort - 1;
+
+ for (l = n>>1; l > 0; --l) {
+ tmp = a[l];
+ for (i = l, j = l<<1; j <= n;) {
+ if (j < n && @lessthan@(v[a[j]], v[a[j+1]]))
+ j += 1;
+ if (@lessthan@(v[tmp], v[a[j]])) {
+ a[i] = a[j];
+ i = j;
+ j += j;
+ }else
+ break;
+ }
+ a[i] = tmp;
+ }
+
+ for (; n > 1;) {
+ tmp = a[n];
+ a[n] = a[1];
+ n -= 1;
+ for (i = 1, j = 2; j <= n;) {
+ if (j < n && @lessthan@(v[a[j]], v[a[j+1]]))
+ j++;
+ if (@lessthan@(v[tmp], v[a[j]])) {
+ a[i] = a[j];
+ i = j;
+ j += j;
+ }else
+ break;
+ }
+ a[i] = tmp;
+ }
+
+ return 0;
+}
+
+static void
+@TYPE@_mergesort0(@type@ *pl, @type@ *pr, @type@ *pw)
+{
+ @type@ vp, *pi, *pj, *pk, *pm;
+
+ if (pr - pl > SMALL_MERGESORT) {
+ /* merge sort */
+ pm = pl + ((pr - pl + 1)>>1);
+ @TYPE@_mergesort0(pl,pm-1,pw);
+ @TYPE@_mergesort0(pm,pr,pw);
+ for(pi = pw, pj = pl; pj < pm; ++pi, ++pj) {
+ *pi = *pj;
+ }
+ for(pk = pw, pm = pl; pk < pi && pj <= pr; ++pm) {
+ if (@lessequal@(*pk,*pj)) {
+ *pm = *pk;
+ ++pk;
+ }else{
+ *pm = *pj;
+ ++pj;
+ }
+ }
+ for(; pk < pi; ++pm, ++pk) {
+ *pm = *pk;
+ }
+ }else{
+ /* insertion sort */
+ for(pi = pl + 1; pi <= pr; ++pi) {
+ vp = *pi;
+ for(pj = pi, pk = pi - 1;\
+ pj > pl && @lessthan@(vp, *pk); --pj, --pk) {
+ *pj = *pk;
+ }
+ *pj = vp;
+ }
+ }
+}
+
+static int
+@TYPE@_mergesort(@type@ *start, intp num, void *unused)
+{
+ @type@ *pl, *pr, *pw;
+
+ pl = start; pr = pl + num - 1;
+ pw = (@type@ *) PyDataMem_NEW(((1+num/2))*sizeof(@type@));
+
+ if (!pw) {
+ PyErr_NoMemory();
+ return -1;
+ }
+
+ @TYPE@_mergesort0(pl, pr, pw);
+ PyDataMem_FREE(pw);
+ return 0;
+}
+
+static void
+@TYPE@_amergesort0(intp *pl, intp *pr, @type@ *v, intp *pw)
+{
+ @type@ vp;
+ intp vi, *pi, *pj, *pk, *pm;
+
+ if (pr - pl > SMALL_MERGESORT) {
+ /* merge sort */
+ pm = pl + ((pr - pl + 1)>>1);
+ @TYPE@_amergesort0(pl,pm-1,v,pw);
+ @TYPE@_amergesort0(pm,pr,v,pw);
+ for(pi = pw, pj = pl; pj < pm; ++pi, ++pj) {
+ *pi = *pj;
+ }
+ for(pk = pw, pm = pl; pk < pi && pj <= pr; ++pm) {
+ if (@lessequal@(v[*pk],v[*pj])) {
+ *pm = *pk;
+ ++pk;
+ }else{
+ *pm = *pj;
+ ++pj;
+ }
+ }
+ for(; pk < pi; ++pm, ++pk) {
+ *pm = *pk;
+ }
+ }else{
+ /* insertion sort */
+ for(pi = pl + 1; pi <= pr; ++pi) {
+ vi = *pi;
+ vp = v[vi];
+ for(pj = pi, pk = pi - 1; \
+ pj > pl && @lessthan@(vp, v[*pk]); --pj, --pk) {
+ *pj = *pk;
+ }
+ *pj = vi;
+ }
+ }
+}
+
+static int
+@TYPE@_amergesort(@type@ *v, intp *tosort, intp num, void *unused)
+{
+ intp *pl, *pr, *pw;
+
+ pl = tosort; pr = pl + num - 1;
+ pw = PyDimMem_NEW((1+num/2));
+
+ if (!pw) {
+ PyErr_NoMemory();
+ return -1;
+ }
+
+ @TYPE@_amergesort0(pl, pr, v, pw);
+ PyDimMem_FREE(pw);
+ return 0;
+}
+/**end repeat**/
+
+static int
+unincmp(Py_UNICODE *s1, Py_UNICODE *s2, register int len)
+{
+ register Py_UNICODE c1, c2;
+ while(len-- > 0) {
+ c1 = *s1++;
+ c2 = *s2++;
+ if (c1 != c2)
+ return (c1 < c2) ? -1 : 1;
+ }
+ return 0;
+}
+
+/**begin repeat
+#TYPE=STRING,UNICODE#
+#comp=strncmp,unincmp#
+#type=char *, Py_UNICODE *#
+*/
+static void
+@TYPE@_amergesort0(intp *pl, intp *pr, @type@*v, intp *pw, int elsize)
+{
+ @type@ vp;
+ intp vi, *pi, *pj, *pk, *pm;
+
+ if (pr - pl > SMALL_MERGESORT) {
+ /* merge sort */
+ pm = pl + ((pr - pl + 1)>>1);
+ @TYPE@_amergesort0(pl,pm-1,v,pw,elsize);
+ @TYPE@_amergesort0(pm,pr,v,pw,elsize);
+ for(pi = pw, pj = pl; pj < pm; ++pi, ++pj) {
+ *pi = *pj;
+ }
+ for(pk = pw, pm = pl; pk < pi && pj <= pr; ++pm) {
+ if (@comp@(v[*pk],v[*pj],elsize)<=0) {
+ *pm = *pk;
+ ++pk;
+ }else{
+ *pm = *pj;
+ ++pj;
+ }
+ }
+ for(; pk < pi; ++pm, ++pk) {
+ *pm = *pk;
+ }
+ }else{
+ /* insertion sort */
+ for(pi = pl + 1; pi <= pr; ++pi) {
+ vi = *pi;
+ vp = v[vi];
+ for(pj = pi, pk = pi - 1; \
+ pj > pl && (@comp@(vp, v[*pk],elsize)<=0); \
+ --pj, --pk) {
+ *pj = *pk;
+ }
+ *pj = vi;
+ }
+ }
+}
+
+static int
+@TYPE@_amergesort(@type@*v, intp *tosort, intp num, PyArrayObject *arr)
+{
+ intp *pl, *pr, *pw;
+ int elsize;
+
+ elsize = arr->descr->elsize;
+
+ pl = tosort; pr = pl + num - 1;
+ pw = PyDimMem_NEW((1+num/2));
+
+ if (!pw) {
+ PyErr_NoMemory();
+ return -1;
+ }
+
+ @TYPE@_amergesort0(pl, pr, v, pw, elsize);
+ PyDimMem_FREE(pw);
+ return 0;
+}
+/**end repeat**/
+
+static void
+add_sortfuncs(void)
+{
+ PyArray_Descr *descr;
+
+/**begin repeat
+#TYPE=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE#
+**/
+ descr = PyArray_DescrFromType(PyArray_@TYPE@);
+ descr->f->sort[PyArray_QUICKSORT] = \
+ (PyArray_SortFunc *)@TYPE@_quicksort;
+ descr->f->argsort[PyArray_QUICKSORT] = \
+ (PyArray_ArgSortFunc *)@TYPE@_aquicksort;
+ descr->f->sort[PyArray_HEAPSORT] = \
+ (PyArray_SortFunc *)@TYPE@_heapsort;
+ descr->f->argsort[PyArray_HEAPSORT] = \
+ (PyArray_ArgSortFunc *)@TYPE@_aheapsort;
+ descr->f->sort[PyArray_MERGESORT] = \
+ (PyArray_SortFunc *)@TYPE@_mergesort;
+ descr->f->argsort[PyArray_MERGESORT] = \
+ (PyArray_ArgSortFunc *)@TYPE@_amergesort;
+/**end repeat**/
+
+ descr = PyArray_DescrFromType(PyArray_STRING);
+ descr->f->argsort[PyArray_MERGESORT] = \
+ (PyArray_ArgSortFunc *)STRING_amergesort;
+ descr = PyArray_DescrFromType(PyArray_UNICODE);
+ descr->f->argsort[PyArray_MERGESORT] = \
+ (PyArray_ArgSortFunc *)UNICODE_amergesort;
+}
+
+static struct PyMethodDef methods[] = {
+ {NULL, NULL, 0}
+};
+
+PyMODINIT_FUNC
+init_sort(void) {
+ PyObject *m;
+
+ m = Py_InitModule("_sort", methods);
+
+ if (import_array() < 0) return;
+ add_sortfuncs();
+}
diff --git a/numpy/base/src/arraymethods.c b/numpy/base/src/arraymethods.c
new file mode 100644
index 000000000..2b7a53042
--- /dev/null
+++ b/numpy/base/src/arraymethods.c
@@ -0,0 +1,1647 @@
+
+/* Should only be used if x is known to be an nd-array */
+#define _ARET(x) PyArray_Return((PyArrayObject *)(x))
+
+static char doc_take[] = "a.take(indices, axis=None). Selects the elements "\
+ "in indices from array a along the given axis.";
+
+static PyObject *
+array_take(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int dimension=MAX_DIMS;
+ PyObject *indices;
+ static char *kwlist[] = {"indices", "axis", NULL};
+
+ dimension=0;
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist,
+ &indices, PyArray_AxisConverter,
+ &dimension))
+ return NULL;
+
+ return _ARET(PyArray_Take(self, indices, dimension));
+}
+
+static char doc_fill[] = "a.fill(value) places the scalar value at every"\
+ "position in the array.";
+
+static PyObject *
+array_fill(PyArrayObject *self, PyObject *args)
+{
+ PyObject *obj;
+ if (!PyArg_ParseTuple(args, "O", &obj))
+ return NULL;
+ if (PyArray_FillWithScalar(self, obj) < 0) return NULL;
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static char doc_put[] = "a.put(values, indices) sets a.flat[n] = v[n] "\
+ "for each n in indices. v can be scalar or shorter than indices, "\
+ "will repeat.";
+
+static PyObject *
+array_put(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *indices, *values;
+ static char *kwlist[] = {"values", "indices", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO", kwlist,
+ &values, &indices))
+ return NULL;
+ return PyArray_Put(self, values, indices);
+}
+
+static char doc_putmask[] = "a.putmask(values, mask) sets a.flat[n] = v[n] "\
+ "for each n where mask.flat[n] is TRUE. v can be scalar.";
+
+static PyObject *
+array_putmask(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *mask, *values;
+
+ static char *kwlist[] = {"values", "mask", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO", kwlist,
+ &values, &mask))
+ return NULL;
+ return PyArray_PutMask(self, values, mask);
+}
+
+/* Used to reshape a Fortran Array */
+static void
+_reverse_shape(PyArray_Dims *newshape)
+{
+ int i, n = newshape->len;
+ intp *ptr = newshape->ptr;
+ intp *eptr;
+ intp tmp;
+ int len = n >> 1;
+
+ eptr = ptr+n-1;
+ for(i=0; i<len; i++) {
+ tmp = *eptr;
+ *eptr-- = *ptr;
+ *ptr++ = tmp;
+ }
+}
+
+static char doc_reshape[] = \
+ "self.reshape(d1, d2, ..., dn) Return a new array from this one. \n" \
+ "\n The new array must have the same number of elements as self. "\
+ "Also\n a copy of the data only occurs if necessary.";
+
+static PyObject *
+array_reshape(PyArrayObject *self, PyObject *args)
+{
+ PyArray_Dims newshape;
+ PyObject *ret, *tmp;
+ int n;
+
+ n = PyTuple_Size(args);
+ if (n <= 1) {
+ if (!PyArg_ParseTuple(args, "O&", PyArray_IntpConverter,
+ &newshape)) return NULL;
+ }
+ else {
+ if (!PyArray_IntpConverter(args, &newshape)) {
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "invalid shape");
+ }
+ goto fail;
+ }
+ }
+
+ if (newshape.len == 1) {
+ PyDimMem_FREE(newshape.ptr);
+ return PyArray_Ravel(self, 0);
+ }
+
+ if ((newshape.len == 0) || PyArray_ISCONTIGUOUS(self)) {
+ ret = PyArray_Newshape(self, &newshape);
+ }
+ else if PyArray_ISFORTRAN(self) {
+ tmp = PyArray_Transpose(self, NULL);
+ if (tmp == NULL) goto fail;
+ _reverse_shape(&newshape);
+ ret = PyArray_Newshape((PyArrayObject *)tmp, &newshape);
+ Py_DECREF(tmp);
+ if (ret == NULL) goto fail;
+ tmp = PyArray_Transpose((PyArrayObject *)ret, NULL);
+ Py_DECREF(ret);
+ if (tmp == NULL) goto fail;
+ ret = tmp;
+ }
+ else {
+ tmp = PyArray_Copy(self);
+ if (tmp==NULL) goto fail;
+ ret = PyArray_Newshape((PyArrayObject *)tmp, &newshape);
+ Py_DECREF(tmp);
+ }
+ PyDimMem_FREE(newshape.ptr);
+ return _ARET(ret);
+
+ fail:
+ PyDimMem_FREE(newshape.ptr);
+ return NULL;
+}
+
+static char doc_squeeze[] = "m.squeeze() eliminate all length-1 dimensions";
+
+static PyObject *
+array_squeeze(PyArrayObject *self, PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+ return _ARET(PyArray_Squeeze(self));
+}
+
+
+
+static char doc_view[] = "a.view(<dtype>) return a new view of array with same data.";
+
+static PyObject *
+array_view(PyArrayObject *self, PyObject *args)
+{
+ PyArray_Descr *type=NULL;
+ if (!PyArg_ParseTuple(args, "|O&",
+ PyArray_DescrConverter, &type))
+ return NULL;
+
+ return _ARET(PyArray_View(self, type));
+}
+
+static char doc_argmax[] = "a.argmax(axis=None)";
+
+static PyObject *
+array_argmax(PyArrayObject *self, PyObject *args)
+{
+ int axis=MAX_DIMS;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_AxisConverter,
+ &axis)) return NULL;
+
+ return _ARET(PyArray_ArgMax(self, axis));
+}
+
+static char doc_argmin[] = "a.argmin(axis=None)";
+
+static PyObject *
+array_argmin(PyArrayObject *self, PyObject *args)
+{
+ int axis=MAX_DIMS;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_AxisConverter,
+ &axis)) return NULL;
+
+ return _ARET(PyArray_ArgMin(self, axis));
+}
+
+static char doc_max[] = "a.max(axis=None)";
+
+static PyObject *
+array_max(PyArrayObject *self, PyObject *args)
+{
+ int axis=MAX_DIMS;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_AxisConverter,
+ &axis)) return NULL;
+
+ return PyArray_Max(self, axis);
+}
+
+static char doc_ptp[] = "a.ptp(axis=None) a.max(axis)-a.min(axis)";
+
+static PyObject *
+array_ptp(PyArrayObject *self, PyObject *args)
+{
+ int axis=MAX_DIMS;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_AxisConverter,
+ &axis)) return NULL;
+
+ return PyArray_Ptp(self, axis);
+}
+
+
+static char doc_min[] = "a.min(axis=None)";
+
+static PyObject *
+array_min(PyArrayObject *self, PyObject *args)
+{
+ int axis=MAX_DIMS;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_AxisConverter,
+ &axis)) return NULL;
+
+ return PyArray_Min(self, axis);
+}
+
+
+static char doc_swapaxes[] = "a.swapaxes(axis1, axis2) returns new view with axes swapped.";
+
+static PyObject *
+array_swapaxes(PyArrayObject *self, PyObject *args)
+{
+ int axis1, axis2;
+
+ if (!PyArg_ParseTuple(args, "ii", &axis1, &axis2)) return NULL;
+
+ return PyArray_SwapAxes(self, axis1, axis2);
+}
+
+static char doc_getfield[] = "m.getfield(dtype, offset) returns a field "\
+ " of the given array as a certain type. A field is a view of "\
+ " the array's data with each itemsize determined by the given type"\
+ " and the offset into the current array.";
+
+/* steals typed reference */
+/*OBJECT_API
+ Get a subset of bytes from each element of the array
+*/
+static PyObject *
+PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset)
+{
+ PyObject *ret=NULL;
+
+ if (offset < 0 || (offset + typed->elsize) > self->descr->elsize) {
+ PyErr_Format(PyExc_ValueError,
+ "Need 0 <= offset <= %d for requested type " \
+ "but received offset = %d",
+ self->descr->elsize-typed->elsize, offset);
+ Py_DECREF(typed);
+ return NULL;
+ }
+ ret = PyArray_NewFromDescr(self->ob_type,
+ typed,
+ self->nd, self->dimensions,
+ self->strides,
+ self->data + offset,
+ self->flags, (PyObject *)self);
+ if (ret == NULL) return NULL;
+ Py_INCREF(self);
+ ((PyArrayObject *)ret)->base = (PyObject *)self;
+
+ PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL_FLAGS);
+ return ret;
+}
+
+static PyObject *
+array_getfield(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+
+ PyArray_Descr *dtype;
+ int offset = 0;
+ static char *kwlist[] = {"dtype", "offset", 0};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|i", kwlist,
+ PyArray_DescrConverter,
+ &dtype, &offset)) return NULL;
+
+ return _ARET(PyArray_GetField(self, dtype, offset));
+}
+
+
+static char doc_setfield[] = "m.setfield(value, dtype, offset) places val "\
+ "into field of the given array defined by the data type and offset.";
+
+/*OBJECT_API
+ Set a subset of bytes from each element of the array
+*/
+static int
+PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype,
+ int offset, PyObject *val)
+{
+ PyObject *ret=NULL;
+ int retval = 0;
+
+ if (offset < 0 || (offset + dtype->elsize) > self->descr->elsize) {
+ PyErr_Format(PyExc_ValueError,
+ "Need 0 <= offset <= %d for requested type " \
+ "but received offset = %d",
+ self->descr->elsize-dtype->elsize, offset);
+ Py_DECREF(dtype);
+ return -1;
+ }
+ ret = PyArray_NewFromDescr(self->ob_type,
+ dtype, self->nd, self->dimensions,
+ self->strides, self->data + offset,
+ self->flags, (PyObject *)self);
+ if (ret == NULL) return -1;
+ Py_INCREF(self);
+ ((PyArrayObject *)ret)->base = (PyObject *)self;
+
+ PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL_FLAGS);
+ retval = PyArray_CopyObject((PyArrayObject *)ret, val);
+ Py_DECREF(ret);
+ return retval;
+}
+
+static PyObject *
+array_setfield(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ PyArray_Descr *dtype;
+ int offset = 0;
+ PyObject *value;
+ static char *kwlist[] = {"value", "dtype", "offset", 0};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|i", kwlist,
+ &value, PyArray_DescrConverter,
+ &dtype, &offset)) return NULL;
+
+ if (PyArray_SetField(self, dtype, offset, value) < 0)
+ return NULL;
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+/* This doesn't change the descriptor just the actual data...
+ */
+
+/*OBJECT_API*/
+static PyObject *
+PyArray_Byteswap(PyArrayObject *self, Bool inplace)
+{
+ PyArrayObject *ret;
+ intp size;
+ PyArray_CopySwapNFunc *copyswapn;
+ PyArray_CopySwapFunc *copyswap;
+ PyArrayIterObject *it;
+
+ if (inplace) {
+ copyswapn = self->descr->f->copyswapn;
+
+ size = PyArray_SIZE(self);
+ if (PyArray_ISONESEGMENT(self)) {
+ copyswapn(self->data, NULL, size, 1,
+ self->descr->elsize);
+ }
+ else { /* Use iterator */
+
+ it = (PyArrayIterObject *)\
+ PyArray_IterNew((PyObject *)self);
+ copyswap = self->descr->f->copyswap;
+ while (it->index < it->size) {
+ copyswap(it->dataptr, NULL, 1,
+ self->descr->elsize);
+ PyArray_ITER_NEXT(it);
+ }
+ Py_DECREF(it);
+ }
+
+ Py_INCREF(self);
+ return (PyObject *)self;
+ }
+ else {
+ if ((ret = (PyArrayObject *)PyArray_NewCopy(self,-1)) == NULL)
+ return NULL;
+
+ size = PyArray_SIZE(self);
+
+ /* now ret has the same dtypedescr as self (including
+ byteorder)
+ */
+
+ ret->descr->f->copyswapn(ret->data, NULL, size, 1,
+ ret->descr->elsize);
+
+ return (PyObject *)ret;
+ }
+}
+
+static char doc_byteswap[] = "m.byteswap(False) Swap the bytes in"\
+ " the array. Return the byteswapped array. If the first argument"\
+ " is TRUE, byteswap in-place and return a reference to self.";
+
+static PyObject *
+array_byteswap(PyArrayObject *self, PyObject *args)
+{
+ Bool inplace=FALSE;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace))
+ return NULL;
+
+ return PyArray_Byteswap(self, inplace);
+}
+
+static char doc_tolist[] = "m.tolist(). Copy the data portion of the array"\
+ " to a hierarchical python list and return that list.";
+
+static PyObject *
+array_tolist(PyArrayObject *self, PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+ if (self->nd <= 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "can't convert a 0-d array to a list");
+ return NULL;
+ }
+
+ return PyArray_ToList(self);
+}
+
+static char doc_tostring[] = "m.tostring() Construct a Python string "\
+ "containing the raw bytes in the array";
+
+static PyObject *
+array_tostring(PyArrayObject *self, PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+ return PyArray_ToString(self);
+}
+
+static char doc_tofile[] = "m.tofile(fid, sep="") write the data to a file.";
+
+static PyObject *
+array_tofile(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int ret;
+ PyObject *file;
+ FILE *fd;
+ char *sep="";
+ char *format="";
+ char *mode="";
+ static char *kwlist[] = {"file", "sep", "format", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|ss", kwlist,
+ &file, &sep, &format)) return NULL;
+
+ if (PyString_Check(file)) {
+ if (sep == "") mode="wb";
+ else mode="w";
+ file = PyFile_FromString(PyString_AS_STRING(file), mode);
+ if (file==NULL) return NULL;
+ }
+ else {
+ Py_INCREF(file);
+ }
+ fd = PyFile_AsFile(file);
+ if (fd == NULL) {
+ PyErr_SetString(PyExc_IOError, "first argument must be a " \
+ "string or open file");
+ Py_DECREF(file);
+ return NULL;
+ }
+ ret = PyArray_ToFile(self, fd, sep, format);
+ Py_DECREF(file);
+ if (ret < 0) return NULL;
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static char doc_toscalar[] = "m.item(). Copy the first data point of "\
+ "the array to a standard Python scalar and return it.";
+
+static PyObject *
+array_toscalar(PyArrayObject *self, PyObject *args) {
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+ if (self->nd == 0 || PyArray_SIZE(self) == 1)
+ return self->descr->f->getitem(self->data, self);
+ else {
+ PyErr_SetString(PyExc_ValueError, "can only convert an" \
+ " array of size 1 to Python scalar.");
+ return NULL;
+ }
+}
+
+static char doc_cast[] = "m.astype(t). Cast array m to type t. \n\n"\
+ "t can be either a string representing a typecode, or a python type"\
+ " object of type int, float, or complex.";
+
+static PyObject *
+array_cast(PyArrayObject *self, PyObject *args)
+{
+ PyArray_Descr *descr=NULL;
+ PyObject *obj;
+
+ if (!PyArg_ParseTuple(args, "O&", PyArray_DescrConverter,
+ &descr)) return NULL;
+
+ if (descr == self->descr) {
+ obj = _ARET(PyArray_NewCopy(self,0));
+ Py_XDECREF(descr);
+ return obj;
+ }
+ return _ARET(PyArray_CastToType(self, descr, 0));
+}
+
+/* default sub-type implementation */
+
+static char doc_wraparray[] = "m.__array_wrap__(obj) returns an object of "\
+ "type m from the ndarray object obj";
+
+static PyObject *
+array_wraparray(PyArrayObject *self, PyObject *args)
+{
+ PyObject *arr;
+ PyObject *ret;
+
+ if (PyTuple_Size(args) < 1) {
+ PyErr_SetString(PyExc_TypeError,
+ "only accepts 1 argument");
+ return NULL;
+ }
+ arr = PyTuple_GET_ITEM(args, 0);
+ if (!PyArray_Check(arr)) {
+ PyErr_SetString(PyExc_TypeError,
+ "can only be called with ndarray object");
+ return NULL;
+ }
+
+ Py_INCREF(PyArray_DESCR(arr));
+ ret = PyArray_NewFromDescr(self->ob_type,
+ PyArray_DESCR(arr),
+ PyArray_NDIM(arr),
+ PyArray_DIMS(arr),
+ PyArray_STRIDES(arr), PyArray_DATA(arr),
+ PyArray_FLAGS(arr), (PyObject *)self);
+ if (ret == NULL) return NULL;
+ Py_INCREF(arr);
+ PyArray_BASE(ret) = arr;
+ return ret;
+}
+
+/* NO-OP --- just so all subclasses will have one by default. */
+static PyObject *
+array_finalize(PyArrayObject *self, PyObject *args)
+{
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+
+static char doc_array_getarray[] = "m.__array__(|dtype) just returns either a new reference to self if dtype is not given or a new array of provided data type if dtype is different from the current dtype of the array.";
+
+static PyObject *
+array_getarray(PyArrayObject *self, PyObject *args)
+{
+ PyArray_Descr *newtype=NULL;
+ PyObject *ret;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_DescrConverter,
+ &newtype)) return NULL;
+
+ /* convert to PyArray_Type or PyBigArray_Type */
+ if (!PyArray_CheckExact(self) || !PyBigArray_CheckExact(self)) {
+ PyObject *new;
+ PyTypeObject *subtype = &PyArray_Type;
+
+ if (!PyType_IsSubtype(self->ob_type, &PyArray_Type)) {
+ subtype = &PyBigArray_Type;
+ }
+
+ Py_INCREF(PyArray_DESCR(self));
+ new = PyArray_NewFromDescr(subtype,
+ PyArray_DESCR(self),
+ PyArray_NDIM(self),
+ PyArray_DIMS(self),
+ PyArray_STRIDES(self),
+ PyArray_DATA(self),
+ PyArray_FLAGS(self), NULL);
+ if (new == NULL) return NULL;
+ Py_INCREF(self);
+ PyArray_BASE(new) = (PyObject *)self;
+ self = (PyArrayObject *)new;
+ }
+ else {
+ Py_INCREF(self);
+ }
+
+ if ((newtype == NULL) || \
+ PyArray_EquivTypes(self->descr, newtype)) {
+ return (PyObject *)self;
+ }
+ else {
+ ret = PyArray_CastToType(self, newtype, 0);
+ Py_DECREF(self);
+ return ret;
+ }
+}
+
+static char doc_copy[] = "m.copy(|fortran). Return a copy of the array.\n"\
+ "If fortran == 0 then the result is contiguous (default). \n"\
+ "If fortran > 0 then the result has fortran data order. \n"\
+ "If fortran < 0 then the result has fortran data order only if m\n"
+ " is already in fortran order.";
+
+static PyObject *
+array_copy(PyArrayObject *self, PyObject *args)
+{
+ int fortran=0;
+ if (!PyArg_ParseTuple(args, "|i", &fortran)) return NULL;
+
+ return _ARET(PyArray_NewCopy(self, fortran));
+}
+
+static char doc_resize[] = "self.resize(new_shape). "\
+ "Change size and shape of self inplace.\n"\
+ "\n Array must own its own memory and not be referenced by other " \
+ "arrays\n Returns None.";
+
+static PyObject *
+array_resize(PyArrayObject *self, PyObject *args)
+{
+ PyArray_Dims newshape;
+ PyObject *ret;
+ int n;
+
+ n = PyTuple_Size(args);
+ if (n <= 1) {
+ if (!PyArg_ParseTuple(args, "O&", PyArray_IntpConverter,
+ &newshape)) return NULL;
+ }
+ else {
+ if (!PyArray_IntpConverter(args, &newshape)) {
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "invalid shape");
+ }
+ return NULL;
+ }
+ }
+ ret = PyArray_Resize(self, &newshape);
+ PyDimMem_FREE(newshape.ptr);
+ if (ret == NULL) return NULL;
+ Py_DECREF(ret);
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static char doc_repeat[] = "a.repeat(repeats=, axis=None)\n"\
+ "\n"\
+ " Copy elements of a, repeats times. The repeats argument must\n"\
+ " be a sequence of length a.shape[axis] or a scalar.";
+
+static PyObject *
+array_repeat(PyArrayObject *self, PyObject *args, PyObject *kwds) {
+ PyObject *repeats;
+ int axis=MAX_DIMS;
+ static char *kwlist[] = {"repeats", "axis", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist,
+ &repeats, PyArray_AxisConverter,
+ &axis)) return NULL;
+
+ return _ARET(PyArray_Repeat(self, repeats, axis));
+}
+
+static char doc_choose[] = "a.choose(b0, b1, ..., bn)\n"\
+ "\n"\
+ "Return an array with elements chosen from 'a' at the positions\n"\
+ "of the given arrays b_i. The array 'a' should be an integer array\n"\
+ "with entries from 0 to n+1, and the b_i arrays should have the same\n"\
+ "shape as 'a'.";
+
+static PyObject *
+array_choose(PyArrayObject *self, PyObject *args)
+{
+ PyObject *choices;
+ int n;
+
+ n = PyTuple_Size(args);
+ if (n <= 1) {
+ if (!PyArg_ParseTuple(args, "O", &choices))
+ return NULL;
+ }
+ else {
+ choices = args;
+ }
+
+ return _ARET(PyArray_Choose(self, choices));
+}
+
+static char doc_sort[] = "a.sort(axis=-1,kind='quicksort') sorts in place along axis. Return is None and kind can be 'quicksort', 'mergesort', or 'heapsort'";
+
+static PyObject *
+array_sort(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=-1;
+ int val;
+ PyArray_SORTKIND which=PyArray_QUICKSORT;
+ static char *kwlist[] = {"axis", "kind", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&", kwlist, &axis,
+ PyArray_SortkindConverter, &which))
+ return NULL;
+
+ val = PyArray_Sort(self, axis, which);
+ if (val < 0) return NULL;
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static char doc_argsort[] = "a.argsort(axis=-1,kind='quicksort')\n"\
+ " Return the indexes into a that would sort it along the"\
+ " given axis; kind can be 'quicksort', 'mergesort', or 'heapsort'";
+
+static PyObject *
+array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=-1;
+ PyArray_SORTKIND which=PyArray_QUICKSORT;
+ static char *kwlist[] = {"axis", "kind", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&", kwlist, &axis,
+ PyArray_SortkindConverter, &which))
+ return NULL;
+
+ return _ARET(PyArray_ArgSort(self, axis, which));
+}
+
+static char doc_searchsorted[] = "a.searchsorted(v)\n"\
+ " Assuming that a is a 1-D array, in ascending order and\n"\
+ " represents bin boundaries, then a.searchsorted(values) gives an\n"\
+ " array of bin numbers, giving the bin into which each value would\n"\
+ " be placed. This method is helpful for histograming. \n"\
+ " Note: No warning is given if the boundaries, in a, are not \n"\
+ " in ascending order.";
+;
+
+static PyObject *
+array_searchsorted(PyArrayObject *self, PyObject *args)
+{
+ PyObject *values;
+
+ if (!PyArg_ParseTuple(args, "O", &values)) return NULL;
+
+ return _ARET(PyArray_SearchSorted(self, values));
+}
+
+static char doc_deepcopy[] = "Used if copy.deepcopy is called on an array.";
+
+static PyObject *
+array_deepcopy(PyArrayObject *self, PyObject *args)
+{
+ PyObject* visit;
+ PyObject **optr;
+ PyArrayIterObject *it;
+ PyObject *copy, *ret, *deepcopy, *temp, *res;
+
+ if (!PyArg_ParseTuple(args, "O", &visit)) return NULL;
+ ret = PyArray_Copy(self);
+ if (PyArray_ISOBJECT(self)) {
+ copy = PyImport_ImportModule("copy");
+ if (copy == NULL) return NULL;
+ deepcopy = PyObject_GetAttrString(copy, "deepcopy");
+ Py_DECREF(copy);
+ if (deepcopy == NULL) return NULL;
+ it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self);
+ if (it == NULL) {Py_DECREF(deepcopy); return NULL;}
+ optr = (PyObject **)PyArray_DATA(ret);
+ while(it->index < it->size) {
+ temp = *((PyObject **)it->dataptr);
+ Py_INCREF(temp);
+ /* call deepcopy on this argument */
+ res = PyObject_CallFunctionObjArgs(deepcopy,
+ temp, visit, NULL);
+ Py_DECREF(temp);
+ Py_DECREF(*optr);
+ *optr++ = res;
+ PyArray_ITER_NEXT(it);
+ }
+ Py_DECREF(deepcopy);
+ Py_DECREF(it);
+ }
+ return _ARET(ret);
+}
+
+/* Convert Object Array to flat list and pickle the flat list string */
+static PyObject *
+_getobject_pkl(PyArrayObject *self)
+{
+ PyObject *theobject;
+ PyArrayIterObject *iter=NULL;
+ PyObject *list;
+
+
+ iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self);
+ if (iter == NULL) return NULL;
+ list = PyList_New(iter->size);
+ if (list == NULL) {Py_DECREF(iter); return NULL;}
+ while (iter->index < iter->size) {
+ theobject = *((PyObject **)iter->dataptr);
+ Py_INCREF(theobject);
+ PyList_SET_ITEM(list, (int) iter->index, theobject);
+ PyArray_ITER_NEXT(iter);
+ }
+ Py_DECREF(iter);
+ return list;
+}
+
+static int
+_setobject_pkl(PyArrayObject *self, PyObject *list)
+{
+ PyObject *theobject;
+ PyArrayIterObject *iter=NULL;
+ int size;
+
+ size = self->descr->elsize;
+
+ iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self);
+ if (iter == NULL) return -1;
+ while(iter->index < iter->size) {
+ theobject = PyList_GET_ITEM(list, (int) iter->index);
+ Py_INCREF(theobject);
+ *((PyObject **)iter->dataptr) = theobject;
+ PyArray_ITER_NEXT(iter);
+ }
+ Py_XDECREF(iter);
+ return 0;
+}
+
+
+static char doc_reduce[] = "a.__reduce__() for pickling.";
+
+static PyObject *
+array_reduce(PyArrayObject *self, PyObject *args)
+{
+ PyObject *ret=NULL, *state=NULL, *obj=NULL, *mod=NULL;
+ PyObject *mybool, *thestr=NULL;
+ PyArray_Descr *descr;
+
+ /* Return a tuple of (callable object, arguments, object's state) */
+ /* We will put everything in the object's state, so that on UnPickle
+ it can use the string object as memory without a copy */
+
+ ret = PyTuple_New(3);
+ if (ret == NULL) return NULL;
+ mod = PyImport_ImportModule("scipy.base._internal");
+ if (mod == NULL) {Py_DECREF(ret); return NULL;}
+ obj = PyObject_GetAttrString(mod, "_reconstruct");
+ Py_DECREF(mod);
+ PyTuple_SET_ITEM(ret, 0, obj);
+ PyTuple_SET_ITEM(ret, 1,
+ Py_BuildValue("ONN",
+ (PyObject *)self->ob_type,
+ Py_BuildValue("(N)",
+ PyInt_FromLong(0)),
+ PyObject_GetAttrString((PyObject *)self,
+ "dtypechar")));
+
+ /* Now fill in object's state. This is a tuple with
+ 4 arguments
+
+ 1) a Tuple giving the shape
+ 2) a PyArray_Descr Object (with correct bytorder set)
+ 3) a Bool stating if Fortran or not
+ 4) a binary string with the data (or a list for Object arrays)
+
+ Notice because Python does not describe a mechanism to write
+ raw data to the pickle, this performs a copy to a string first
+ */
+
+ state = PyTuple_New(4);
+ if (state == NULL) {
+ Py_DECREF(ret); return NULL;
+ }
+ PyTuple_SET_ITEM(state, 0, PyObject_GetAttrString((PyObject *)self,
+ "shape"));
+ descr = self->descr;
+ Py_INCREF(descr);
+ PyTuple_SET_ITEM(state, 1, (PyObject *)descr);
+ mybool = (PyArray_ISFORTRAN(self) ? Py_True : Py_False);
+ Py_INCREF(mybool);
+ PyTuple_SET_ITEM(state, 2, mybool);
+ if (PyArray_ISOBJECT(self)) {
+ thestr = _getobject_pkl(self);
+ }
+ else {
+ thestr = PyArray_ToString(self);
+ }
+ if (thestr == NULL) {
+ Py_DECREF(ret);
+ Py_DECREF(state);
+ return NULL;
+ }
+ PyTuple_SET_ITEM(state, 3, thestr);
+ PyTuple_SET_ITEM(ret, 2, state);
+ return ret;
+}
+
+static char doc_setstate[] = "a.__setstate__(tuple) for unpickling.";
+
+/*
+ 1) a Tuple giving the shape
+ 2) a PyArray_Descr Object
+ 3) a Bool stating if Fortran or not
+ 4) a binary string with the data (or a list if Object array)
+*/
+
+static intp _array_fill_strides(intp *, intp *, int, intp, int, int *);
+
+static int _IsAligned(PyArrayObject *);
+
+static PyArray_Descr * _array_typedescr_fromstr(char *);
+
+static PyObject *
+array_setstate(PyArrayObject *self, PyObject *args)
+{
+ PyObject *shape;
+ PyArray_Descr *typecode;
+ long fortran;
+ PyObject *rawdata;
+ char *datastr;
+ int len;
+ intp dimensions[MAX_DIMS];
+ int nd;
+
+ /* This will free any memory associated with a and
+ use the string in setstate as the (writeable) memory.
+ */
+ if (!PyArg_ParseTuple(args, "(O!O!iO)", &PyTuple_Type,
+ &shape, &PyArrayDescr_Type, &typecode,
+ &fortran, &rawdata))
+ return NULL;
+
+ Py_XDECREF(self->descr);
+ self->descr = typecode;
+ Py_INCREF(typecode);
+ nd = PyArray_IntpFromSequence(shape, dimensions, MAX_DIMS);
+ if (typecode->type_num == PyArray_OBJECT) {
+ if (!PyList_Check(rawdata)) {
+ PyErr_SetString(PyExc_TypeError,
+ "object pickle not returning list");
+ return NULL;
+ }
+ }
+ else {
+ if (!PyString_Check(rawdata)) {
+ PyErr_SetString(PyExc_TypeError,
+ "pickle not returning string");
+ return NULL;
+ }
+
+ if (PyString_AsStringAndSize(rawdata, &datastr, &len))
+ return NULL;
+
+ if ((len != (self->descr->elsize * \
+ (int) PyArray_MultiplyList(dimensions, nd)))) {
+ PyErr_SetString(PyExc_ValueError,
+ "buffer size does not" \
+ " match array size");
+ return NULL;
+ }
+ }
+
+ if ((self->flags & OWN_DATA)) {
+ if (self->data != NULL)
+ PyDataMem_FREE(self->data);
+ self->flags &= ~OWN_DATA;
+ }
+ Py_XDECREF(self->base);
+
+ self->flags &= ~UPDATEIFCOPY;
+
+ if (self->dimensions != NULL) {
+ PyDimMem_FREE(self->dimensions);
+ self->dimensions = NULL;
+ }
+
+ self->flags = DEFAULT_FLAGS;
+
+ self->nd = nd;
+
+ if (nd > 0) {
+ self->dimensions = PyDimMem_NEW(nd * 2);
+ self->strides = self->dimensions + nd;
+ memcpy(self->dimensions, dimensions, sizeof(intp)*nd);
+ (void) _array_fill_strides(self->strides, dimensions, nd,
+ self->descr->elsize, fortran,
+ &(self->flags));
+ }
+
+ if (typecode->type_num != PyArray_OBJECT) {
+ self->data = datastr;
+ if (!_IsAligned(self)) {
+ intp num = PyArray_NBYTES(self);
+ self->data = PyDataMem_NEW(num);
+ if (self->data == NULL) {
+ self->nd = 0;
+ PyDimMem_FREE(self->dimensions);
+ return PyErr_NoMemory();
+ }
+ memcpy(self->data, datastr, num);
+ self->flags |= OWN_DATA;
+ self->base = NULL;
+ }
+ else {
+ self->base = rawdata;
+ Py_INCREF(self->base);
+ }
+ }
+ else {
+ self->data = PyDataMem_NEW(PyArray_NBYTES(self));
+ if (self->data == NULL) {
+ self->nd = 0;
+ self->data = PyDataMem_NEW(self->descr->elsize);
+ if (self->dimensions) PyDimMem_FREE(self->dimensions);
+ return PyErr_NoMemory();
+ }
+ self->flags |= OWN_DATA;
+ self->base = NULL;
+ if (_setobject_pkl(self, rawdata) < 0)
+ return NULL;
+ }
+
+ PyArray_UpdateFlags(self, UPDATE_ALL_FLAGS);
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+/*OBJECT_API*/
+static int
+PyArray_Dump(PyObject *self, PyObject *file, int protocol)
+{
+ PyObject *cpick=NULL;
+ PyObject *ret;
+ if (protocol < 0) protocol = 2;
+
+ cpick = PyImport_ImportModule("cPickle");
+ if (cpick==NULL) return -1;
+
+ if PyString_Check(file) {
+ file = PyFile_FromString(PyString_AS_STRING(file), "wb");
+ if (file==NULL) return -1;
+ }
+ else Py_INCREF(file);
+ ret = PyObject_CallMethod(cpick, "dump", "OOi", self,
+ file, protocol);
+ Py_XDECREF(ret);
+ Py_DECREF(file);
+ Py_DECREF(cpick);
+ if (PyErr_Occurred()) return -1;
+ return 0;
+}
+
+/*OBJECT_API*/
+static PyObject *
+PyArray_Dumps(PyObject *self, int protocol)
+{
+ PyObject *cpick=NULL;
+ PyObject *ret;
+ if (protocol < 0) protocol = 2;
+
+ cpick = PyImport_ImportModule("cPickle");
+ if (cpick==NULL) return NULL;
+ ret = PyObject_CallMethod(cpick, "dumps", "Oi", self, protocol);
+ Py_DECREF(cpick);
+ return ret;
+}
+
+
+static char doc_dump[] = "m.dump(file)";
+
+static PyObject *
+array_dump(PyArrayObject *self, PyObject *args)
+{
+ PyObject *file=NULL;
+ int ret;
+
+ if (!PyArg_ParseTuple(args, "O", &file))
+ return NULL;
+ ret = PyArray_Dump((PyObject *)self, file, 2);
+ if (ret < 0) return NULL;
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static char doc_dumps[] = "m.dumps()";
+
+static PyObject *
+array_dumps(PyArrayObject *self, PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, ""))
+ return NULL;
+ return PyArray_Dumps((PyObject *)self, 2);
+}
+
+
+static char doc_transpose[] = "m.transpose(<None>)";
+
+static PyObject *
+array_transpose(PyArrayObject *self, PyObject *args)
+{
+ PyObject *shape=Py_None;
+ int n;
+ PyArray_Dims permute;
+ PyObject *ret;
+
+ n = PyTuple_Size(args);
+ if (n > 1) shape = args;
+ else if (n == 1) shape = PyTuple_GET_ITEM(args, 0);
+
+ if (shape == Py_None)
+ ret = PyArray_Transpose(self, NULL);
+ else {
+ if (!PyArray_IntpConverter(shape, &permute)) return NULL;
+ ret = PyArray_Transpose(self, &permute);
+ PyDimMem_FREE(permute.ptr);
+ }
+
+ return _ARET(ret);
+}
+
+static char doc_mean[] = "a.mean(axis=None, dtype=None)\n\n"\
+ "Average the array over the given axis. If the axis is None, average\n"\
+ "over all dimensions of the array.\n"\
+ "\n"\
+ "If an integer axis is given, this equals:\n"\
+ " a.sum(axis, dtype) * 1.0 / len(a)\n"\
+ "\n"\
+ "If axis is None, this equals:\n"\
+ " a.sum(axis, dtype) * 1.0 / product(a.shape)\n"\
+ "\n"\
+ "The optional dtype argument is the data type for intermediate\n"\
+ "calculations in the sum.";
+
+#define _CHKTYPENUM(typ) ((typ) ? (typ)->type_num : PyArray_NOTYPE)
+
+static PyObject *
+array_mean(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=MAX_DIMS;
+ PyArray_Descr *dtype=NULL;
+ static char *kwlist[] = {"axis", "dtype", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist,
+ PyArray_AxisConverter,
+ &axis, PyArray_DescrConverter2,
+ &dtype)) return NULL;
+
+ return PyArray_Mean(self, axis, _CHKTYPENUM(dtype));
+}
+
+static char doc_sum[] = "a.sum(axis=None, dtype=None)\n\n"\
+ "Sum the array over the given axis. If the axis is None, sum over all\n"\
+ "dimensions of the array.\n"\
+ "\n"\
+ "The optional dtype argument is the data type for the returned value\n"\
+ "and intermediate calculations. The default is to upcast (promote)\n"\
+ "smaller integer types to the platform-dependent int. For example, on\n"\
+ "32-bit platforms:\n"\
+ "\n"\
+ " a.dtype default sum() dtype\n"\
+ " ---------------------------------------------------\n"\
+ " bool, int8, int16, int32 int32\n"\
+ "\n"\
+ "Examples:\n"\
+ "\n"\
+ ">>> array([0.5, 1.5]).sum()\n"\
+ "2.0\n"\
+ ">>> array([0.5, 1.5]).sum(dtype=int32)\n"\
+ "1\n"\
+ ">>> array([[0, 1], [0, 5]]).sum()\n"\
+ "array([0, 6])\n"\
+ ">>> array([[0, 1], [0, 5]]).sum(axis=1)\n"\
+ "array([1, 5])";
+
+static PyObject *
+array_sum(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=MAX_DIMS;
+ PyArray_Descr *dtype=NULL;
+ static char *kwlist[] = {"axis", "dtype", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist,
+ PyArray_AxisConverter,
+ &axis, PyArray_DescrConverter2,
+ &dtype)) return NULL;
+
+ return PyArray_Sum(self, axis, _CHKTYPENUM(dtype));
+}
+
+
+static char doc_cumsum[] = "a.cumsum(axis=None, dtype=None)";
+
+static PyObject *
+array_cumsum(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=MAX_DIMS;
+ PyArray_Descr *dtype=NULL;
+ static char *kwlist[] = {"axis", "dtype", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist,
+ PyArray_AxisConverter,
+ &axis, PyArray_DescrConverter2,
+ &dtype)) return NULL;
+
+ return PyArray_CumSum(self, axis, _CHKTYPENUM(dtype));
+}
+
+static char doc_prod[] = "a.prod(axis=None, dtype=None)";
+
+static PyObject *
+array_prod(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=MAX_DIMS;
+ PyArray_Descr *dtype=NULL;
+ static char *kwlist[] = {"axis", "dtype", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist,
+ PyArray_AxisConverter,
+ &axis, PyArray_DescrConverter2,
+ &dtype)) return NULL;
+
+ return PyArray_Prod(self, axis, _CHKTYPENUM(dtype));
+}
+
+
+static char doc_cumprod[] = "a.cumprod(axis=None, dtype=None)";
+
+static PyObject *
+array_cumprod(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=MAX_DIMS;
+ PyArray_Descr *dtype=NULL;
+ static char *kwlist[] = {"axis", "dtype", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist,
+ PyArray_AxisConverter,
+ &axis, PyArray_DescrConverter2,
+ &dtype)) return NULL;
+
+ return PyArray_CumProd(self, axis, _CHKTYPENUM(dtype));
+}
+
+
+static char doc_any[] = "a.any(axis=None)";
+
+static PyObject *
+array_any(PyArrayObject *self, PyObject *args)
+{
+ int axis=MAX_DIMS;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_AxisConverter,
+ &axis)) return NULL;
+
+ return PyArray_Any(self, axis);
+}
+
+static char doc_all[] = "a.all(axis=None)";
+
+static PyObject *
+array_all(PyArrayObject *self, PyObject *args)
+{
+ int axis=MAX_DIMS;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_AxisConverter,
+ &axis)) return NULL;
+
+ return PyArray_All(self, axis);
+}
+
+static char doc_stddev[] = "a.std(axis=None, dtype=None)";
+
+static PyObject *
+array_stddev(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=MAX_DIMS;
+ PyArray_Descr *dtype=NULL;
+ static char *kwlist[] = {"axis", "dtype", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist,
+ PyArray_AxisConverter,
+ &axis, PyArray_DescrConverter2,
+ &dtype)) return NULL;
+
+ return PyArray_Std(self, axis, _CHKTYPENUM(dtype), 0);
+}
+
+static char doc_variance[] = "a.var(axis=None, dtype=None)";
+
+static PyObject *
+array_variance(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=MAX_DIMS;
+ PyArray_Descr *dtype=NULL;
+ static char *kwlist[] = {"axis", "dtype", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist,
+ PyArray_AxisConverter,
+ &axis, PyArray_DescrConverter2,
+ &dtype)) return NULL;
+
+ return PyArray_Std(self, axis, _CHKTYPENUM(dtype), 1);
+}
+
+static char doc_compress[] = "a.compress(condition=, axis=None)";
+
+static PyObject *
+array_compress(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=MAX_DIMS;
+ PyObject *condition;
+ static char *kwlist[] = {"condition", "axis", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist,
+ &condition, PyArray_AxisConverter,
+ &axis)) return NULL;
+
+ return _ARET(PyArray_Compress(self, condition, axis));
+}
+
+static char doc_nonzero[] = "a.nonzero() return a tuple of indices referencing"\
+ "the elements of a that are nonzero.";
+
+static PyObject *
+array_nonzero(PyArrayObject *self, PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+
+ return _ARET(PyArray_Nonzero(self));
+}
+
+
+static char doc_trace[] = "a.trace(offset=0, axis1=0, axis2=1, dtype=None) \n"\
+ "return the sum along the offset diagonal of the arrays indicated\n" \
+ "axis1 and axis2.";
+
+static PyObject *
+array_trace(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis1=0, axis2=1, offset=0;
+ PyArray_Descr *dtype=NULL;
+ static char *kwlist[] = {"offset", "axis1", "axis2", "dtype", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiO&", kwlist,
+ &offset, &axis1, &axis2,
+ PyArray_DescrConverter2, &dtype))
+ return NULL;
+
+ return _ARET(PyArray_Trace(self, offset, axis1, axis2,
+ _CHKTYPENUM(dtype)));
+}
+
+#undef _CHKTYPENUM
+
+
+static char doc_clip[] = "a.clip(min=, max=)";
+
+static PyObject *
+array_clip(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *min, *max;
+ static char *kwlist[] = {"min", "max", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO", kwlist,
+ &min, &max))
+ return NULL;
+
+ return _ARET(PyArray_Clip(self, min, max));
+}
+
+static char doc_conj[] = "a.conj()";
+
+static char doc_conjugate[] = "a.conjugate()";
+
+static PyObject *
+array_conjugate(PyArrayObject *self, PyObject *args)
+{
+
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+
+ return PyArray_Conjugate(self);
+}
+
+
+static char doc_diagonal[] = "a.diagonal(offset=0, axis1=0, axis2=1)";
+
+static PyObject *
+array_diagonal(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis1=0, axis2=1, offset=0;
+ static char *kwlist[] = {"offset", "axis1", "axis2", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iii", kwlist,
+ &offset, &axis1, &axis2))
+ return NULL;
+
+ return _ARET(PyArray_Diagonal(self, offset, axis1, axis2));
+}
+
+static char doc_flatten[] = "a.flatten([fortran]) return a 1-d array (always copy)";
+
+static PyObject *
+array_flatten(PyArrayObject *self, PyObject *args)
+{
+ int fortran=0;
+
+ if (!PyArg_ParseTuple(args, "|i", &fortran)) return NULL;
+
+ return PyArray_Flatten(self, (int) fortran);
+}
+
+static char doc_ravel[] = "a.ravel([fortran]) return a 1-d array (copy only if needed)";
+
+static PyObject *
+array_ravel(PyArrayObject *self, PyObject *args)
+{
+ int fortran=0;
+
+ if (!PyArg_ParseTuple(args, "|i", &fortran)) return NULL;
+
+ return PyArray_Ravel(self, fortran);
+}
+
+
+
+static char doc_setflags[] = "a.setflags(write=None, align=None, uic=None)";
+
+static int _IsAligned(PyArrayObject *);
+static Bool _IsWriteable(PyArrayObject *);
+
+static PyObject *
+array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"write", "align", "uic", NULL};
+ PyObject *write=Py_None;
+ PyObject *align=Py_None;
+ PyObject *uic=Py_None;
+ int flagback = self->flags;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOOO", kwlist,
+ &write, &align, &uic))
+ return NULL;
+
+ if (align != Py_None) {
+ if (PyObject_Not(align)) self->flags &= ~ALIGNED;
+ else if (_IsAligned(self)) self->flags |= ALIGNED;
+ else {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot set aligned flag of mis-"\
+ "aligned array to True");
+ return NULL;
+ }
+ }
+
+ if (uic != Py_None) {
+ if (PyObject_IsTrue(uic)) {
+ self->flags = flagback;
+ PyErr_SetString(PyExc_ValueError,
+ "cannot set UPDATEIFCOPY" \
+ "flag to True");
+ return NULL;
+ }
+ else {
+ self->flags &= ~UPDATEIFCOPY;
+ Py_DECREF(self->base);
+ self->base = NULL;
+ }
+ }
+
+ if (write != Py_None) {
+ if (PyObject_IsTrue(write))
+ if (_IsWriteable(self)) {
+ self->flags |= WRITEABLE;
+ }
+ else {
+ self->flags = flagback;
+ PyErr_SetString(PyExc_ValueError,
+ "cannot set WRITEABLE " \
+ "flag to True of this " \
+ "array"); \
+ return NULL;
+ }
+ else
+ self->flags &= ~WRITEABLE;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static char doc_newbyteorder[] = "a.newbyteorder(<byteorder>) is equivalent\n" \
+ " to a.view(a.dtypedescr.newbytorder(<byteorder>))\n";
+
+static PyObject *
+array_newbyteorder(PyArrayObject *self, PyObject *args)
+{
+ char endian = PyArray_SWAP;
+ PyArray_Descr *new;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter,
+ &endian)) return NULL;
+
+ new = PyArray_DescrNewByteorder(self->descr, endian);
+ if (!new) return NULL;
+ return _ARET(PyArray_View(self, new));
+
+}
+
+static PyMethodDef array_methods[] = {
+ {"tolist", (PyCFunction)array_tolist, 1, doc_tolist},
+ {"item", (PyCFunction)array_toscalar, METH_VARARGS, doc_toscalar},
+ {"tofile", (PyCFunction)array_tofile,
+ METH_VARARGS | METH_KEYWORDS, doc_tofile},
+ {"tostring", (PyCFunction)array_tostring, METH_VARARGS, doc_tostring},
+ {"byteswap", (PyCFunction)array_byteswap, 1, doc_byteswap},
+ {"astype", (PyCFunction)array_cast, 1, doc_cast},
+ {"getfield", (PyCFunction)array_getfield,
+ METH_VARARGS | METH_KEYWORDS, doc_getfield},
+ {"setfield", (PyCFunction)array_setfield,
+ METH_VARARGS | METH_KEYWORDS, doc_setfield},
+ {"copy", (PyCFunction)array_copy, 1, doc_copy},
+ {"resize", (PyCFunction)array_resize, 1, doc_resize},
+
+ /* for subtypes */
+ {"__array__", (PyCFunction)array_getarray, 1, doc_array_getarray},
+ {"__array_wrap__", (PyCFunction)array_wraparray, 1, doc_wraparray},
+ /* default version so it is found... -- only used for subclasses */
+ {"__array_finalize__", (PyCFunction)array_finalize, 1, NULL},
+
+
+ /* for the copy module */
+ {"__copy__", (PyCFunction)array_copy, 1, doc_copy},
+ {"__deepcopy__", (PyCFunction)array_deepcopy, 1, doc_deepcopy},
+
+ /* for Pickling */
+ {"__reduce__", (PyCFunction) array_reduce, 1, doc_reduce},
+ {"__setstate__", (PyCFunction) array_setstate, 1, doc_setstate},
+ {"dumps", (PyCFunction) array_dumps, 1, doc_dumps},
+ {"dump", (PyCFunction) array_dump, 1, doc_dump},
+
+ /* Extended methods added 2005 */
+ {"fill", (PyCFunction)array_fill,
+ METH_VARARGS, doc_fill},
+ {"transpose", (PyCFunction)array_transpose,
+ METH_VARARGS, doc_transpose},
+ {"take", (PyCFunction)array_take,
+ METH_VARARGS|METH_KEYWORDS, doc_take},
+ {"put", (PyCFunction)array_put,
+ METH_VARARGS|METH_KEYWORDS, doc_put},
+ {"putmask", (PyCFunction)array_putmask,
+ METH_VARARGS|METH_KEYWORDS, doc_putmask},
+ {"repeat", (PyCFunction)array_repeat,
+ METH_VARARGS|METH_KEYWORDS, doc_repeat},
+ {"choose", (PyCFunction)array_choose,
+ METH_VARARGS, doc_choose},
+ {"sort", (PyCFunction)array_sort,
+ METH_VARARGS|METH_KEYWORDS, doc_sort},
+ {"argsort", (PyCFunction)array_argsort,
+ METH_VARARGS|METH_KEYWORDS, doc_argsort},
+ {"searchsorted", (PyCFunction)array_searchsorted,
+ METH_VARARGS, doc_searchsorted},
+ {"argmax", (PyCFunction)array_argmax,
+ METH_VARARGS, doc_argmax},
+ {"argmin", (PyCFunction)array_argmin,
+ METH_VARARGS, doc_argmin},
+ {"reshape", (PyCFunction)array_reshape,
+ METH_VARARGS, doc_reshape},
+ {"squeeze", (PyCFunction)array_squeeze,
+ METH_VARARGS, doc_squeeze},
+ {"view", (PyCFunction)array_view,
+ METH_VARARGS, doc_view},
+ {"swapaxes", (PyCFunction)array_swapaxes,
+ METH_VARARGS, doc_swapaxes},
+ {"max", (PyCFunction)array_max,
+ METH_VARARGS, doc_max},
+ {"min", (PyCFunction)array_min,
+ METH_VARARGS, doc_min},
+ {"ptp", (PyCFunction)array_ptp,
+ METH_VARARGS, doc_ptp},
+ {"mean", (PyCFunction)array_mean,
+ METH_VARARGS|METH_KEYWORDS, doc_mean},
+ {"trace", (PyCFunction)array_trace,
+ METH_VARARGS|METH_KEYWORDS, doc_trace},
+ {"diagonal", (PyCFunction)array_diagonal,
+ METH_VARARGS|METH_KEYWORDS, doc_diagonal},
+ {"clip", (PyCFunction)array_clip,
+ METH_VARARGS|METH_KEYWORDS, doc_clip},
+ {"conj", (PyCFunction)array_conjugate,
+ METH_VARARGS, doc_conj},
+ {"conjugate", (PyCFunction)array_conjugate,
+ METH_VARARGS, doc_conjugate},
+ {"nonzero", (PyCFunction)array_nonzero,
+ METH_VARARGS, doc_nonzero},
+ {"std", (PyCFunction)array_stddev,
+ METH_VARARGS|METH_KEYWORDS, doc_stddev},
+ {"var", (PyCFunction)array_variance,
+ METH_VARARGS|METH_KEYWORDS, doc_variance},
+ {"sum", (PyCFunction)array_sum,
+ METH_VARARGS|METH_KEYWORDS, doc_sum},
+ {"cumsum", (PyCFunction)array_cumsum,
+ METH_VARARGS|METH_KEYWORDS, doc_cumsum},
+ {"prod", (PyCFunction)array_prod,
+ METH_VARARGS|METH_KEYWORDS, doc_prod},
+ {"cumprod", (PyCFunction)array_cumprod,
+ METH_VARARGS|METH_KEYWORDS, doc_cumprod},
+ {"all", (PyCFunction)array_all,
+ METH_VARARGS, doc_all},
+ {"any", (PyCFunction)array_any,
+ METH_VARARGS, doc_any},
+ {"compress", (PyCFunction)array_compress,
+ METH_VARARGS|METH_KEYWORDS, doc_compress},
+ {"flatten", (PyCFunction)array_flatten,
+ METH_VARARGS, doc_flatten},
+ {"ravel", (PyCFunction)array_ravel,
+ METH_VARARGS, doc_ravel},
+ {"setflags", (PyCFunction)array_setflags,
+ METH_VARARGS|METH_KEYWORDS, doc_setflags},
+ {"newbyteorder", (PyCFunction)array_newbyteorder,
+ METH_VARARGS, doc_newbyteorder},
+ {NULL, NULL} /* sentinel */
+};
+
+#undef _ARET
+
+
diff --git a/numpy/base/src/arrayobject.c b/numpy/base/src/arrayobject.c
new file mode 100644
index 000000000..72db76373
--- /dev/null
+++ b/numpy/base/src/arrayobject.c
@@ -0,0 +1,8472 @@
+/*
+ Provide multidimensional arrays as a basic object type in python.
+
+Based on Original Numeric implementation
+Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu
+
+with contributions from many Numeric Python developers 1995-2004
+
+Heavily modified in 2005 with inspiration from Numarray
+
+by
+
+Travis Oliphant
+Assistant Professor at
+Brigham Young University
+
+maintainer email: oliphant.travis@ieee.org
+
+Numarray design (which provided guidance) by
+Space Science Telescope Institute
+ (J. Todd Miller, Perry Greenfield, Rick White)
+
+*/
+
+/* Helper functions */
+
+#define error_converting(x) (((x) == -1) && PyErr_Occurred())
+
+/*OBJECT_API*/
+static intp
+PyArray_PyIntAsIntp(PyObject *o)
+{
+ longlong long_value = -1;
+ PyObject *obj;
+ static char *msg = "an integer is required";
+ PyObject *arr=NULL;
+ PyArray_Descr *descr;
+ intp ret;
+
+ if (!o) {
+ PyErr_SetString(PyExc_TypeError, msg);
+ return -1;
+ }
+ descr = PyArray_DescrFromType(PyArray_INTP);
+ if (PyArray_Check(o)) {
+ if (PyArray_SIZE(o)!=1 || !PyArray_ISINTEGER(o)) {
+ PyErr_SetString(PyExc_TypeError, msg);
+ Py_DECREF(descr);
+ return -1;
+ }
+ arr = PyArray_CastToType((PyArrayObject *)o, descr, 0);
+ }
+ else if (PyArray_IsScalar(o, Integer)) {
+ arr = PyArray_FromScalar(o, descr);
+ }
+ if (arr != NULL) {
+ ret = *((intp *)PyArray_DATA(arr));
+ Py_DECREF(arr);
+ return ret;
+ }
+ if (PyInt_Check(o)) {
+ long_value = (longlong) PyInt_AS_LONG(o);
+ } else if (PyLong_Check(o)) {
+ long_value = (longlong) PyLong_AsLongLong(o);
+ } else if (o->ob_type->tp_as_number != NULL && \
+ o->ob_type->tp_as_number->nb_long != NULL) {
+ obj = o->ob_type->tp_as_number->nb_long(o);
+ if (obj != NULL) {
+ long_value = (longlong) PyLong_AsLongLong(obj);
+ Py_DECREF(obj);
+ }
+ } else if (o->ob_type->tp_as_number != NULL && \
+ o->ob_type->tp_as_number->nb_int != NULL) {
+ obj = o->ob_type->tp_as_number->nb_int(o);
+ if (obj != NULL) {
+ long_value = (longlong) PyLong_AsLongLong(obj);
+ Py_DECREF(obj);
+ }
+ } else {
+ PyErr_SetString(PyExc_NotImplementedError,"");
+ }
+
+ if error_converting(long_value) {
+ PyErr_SetString(PyExc_TypeError, msg);
+ return -1;
+ }
+
+#if (SIZEOF_LONGLONG != SIZEOF_PY_INTPTR_T)
+ if ((long_value < MIN_INTP) || (long_value > MAX_INTP)) {
+ PyErr_SetString(PyExc_ValueError,
+ "integer won't fit into a C intp");
+ return -1;
+ }
+#endif
+ return (intp) long_value;
+}
+
+
+static PyObject *array_int(PyArrayObject *v);
+
+/*OBJECT_API*/
+static int
+PyArray_PyIntAsInt(PyObject *o)
+{
+ long long_value = -1;
+ PyObject *obj;
+ static char *msg = "an integer is required";
+ PyObject *arr=NULL;
+ PyArray_Descr *descr;
+ int ret;
+
+
+ if (!o) {
+ PyErr_SetString(PyExc_TypeError, msg);
+ return -1;
+ }
+ descr = PyArray_DescrFromType(PyArray_INT);
+ if (PyArray_Check(o)) {
+ if (PyArray_SIZE(o)!=1 || !PyArray_ISINTEGER(o)) {
+ PyErr_SetString(PyExc_TypeError, msg);
+ Py_DECREF(descr);
+ return -1;
+ }
+ arr = PyArray_CastToType((PyArrayObject *)o, descr, 0);
+ }
+ if (PyArray_IsScalar(o, Integer)) {
+ arr = PyArray_FromScalar(o, descr);
+ }
+ if (arr != NULL) {
+ ret = *((int *)PyArray_DATA(arr));
+ Py_DECREF(arr);
+ return ret;
+ }
+ if (PyInt_Check(o)) {
+ long_value = (long) PyInt_AS_LONG(o);
+ } else if (PyLong_Check(o)) {
+ long_value = (long) PyLong_AsLong(o);
+ } else if (o->ob_type->tp_as_number != NULL && \
+ o->ob_type->tp_as_number->nb_long != NULL) {
+ obj = o->ob_type->tp_as_number->nb_long(o);
+ if (obj == NULL) return -1;
+ long_value = (long) PyLong_AsLong(obj);
+ Py_DECREF(obj);
+ } else if (o->ob_type->tp_as_number != NULL && \
+ o->ob_type->tp_as_number->nb_int != NULL) {
+ obj = o->ob_type->tp_as_number->nb_int(o);
+ if (obj == NULL) return -1;
+ long_value = (long) PyLong_AsLong(obj);
+ Py_DECREF(obj);
+ } else {
+ PyErr_SetString(PyExc_NotImplementedError,"");
+ }
+ if error_converting(long_value) {
+ PyErr_SetString(PyExc_TypeError, msg);
+ return -1;
+ }
+
+#if (SIZEOF_LONG != SIZEOF_INT)
+ if ((long_value < INT_MIN) || (long_value > INT_MAX)) {
+ PyErr_SetString(PyExc_ValueError,
+ "integer won't fit into a C int");
+ return -1;
+ }
+#endif
+ return (int) long_value;
+}
+
+
+/*OBJECT_API
+ Get Priority from object
+*/
+static double
+PyArray_GetPriority(PyObject *obj, double default_)
+{
+ PyObject *ret;
+ double priority=PyArray_PRIORITY;
+
+ if (PyArray_CheckExact(obj))
+ return priority;
+ if (PyBigArray_CheckExact(obj))
+ return PyArray_BIG_PRIORITY;
+
+ ret = PyObject_GetAttrString(obj, "__array_priority__");
+ if (ret != NULL) priority = PyFloat_AsDouble(ret);
+ if (PyErr_Occurred()) {
+ PyErr_Clear();
+ priority = default_;
+ }
+ Py_XDECREF(ret);
+ return priority;
+}
+
+/* Backward compatibility only */
+/* In both Zero and One
+
+ ***You must free the memory once you are done with it
+ using PyDataMem_FREE(ptr) or you create a memory leak***
+
+ If arr is an Object array you are getting a
+ BORROWED reference to Zero or One.
+ Do not DECREF.
+ Please INCREF if you will be hanging on to it.
+
+ The memory for the ptr still must be freed in any case;
+*/
+
+
+/*OBJECT_API
+ Get pointer to zero of correct type for array.
+*/
+static char *
+PyArray_Zero(PyArrayObject *arr)
+{
+ char *zeroval;
+ int ret, storeflags;
+ PyObject *obj;
+
+ zeroval = PyDataMem_NEW(arr->descr->elsize);
+ if (zeroval == NULL) {
+ PyErr_SetNone(PyExc_MemoryError);
+ return NULL;
+ }
+
+ obj=PyInt_FromLong((long) 0);
+ if (PyArray_ISOBJECT(arr)) {
+ memcpy(zeroval, &obj, sizeof(PyObject *));
+ Py_DECREF(obj);
+ return zeroval;
+ }
+ storeflags = arr->flags;
+ arr->flags |= BEHAVED_FLAGS;
+ ret = arr->descr->f->setitem(obj, zeroval, arr);
+ arr->flags = storeflags;
+ Py_DECREF(obj);
+ if (ret < 0) {
+ PyDataMem_FREE(zeroval);
+ return NULL;
+ }
+ return zeroval;
+}
+
+/*OBJECT_API
+ Get pointer to one of correct type for array
+*/
+static char *
+PyArray_One(PyArrayObject *arr)
+{
+ char *oneval;
+ int ret, storeflags;
+ PyObject *obj;
+
+ oneval = PyDataMem_NEW(arr->descr->elsize);
+ if (oneval == NULL) {
+ PyErr_SetNone(PyExc_MemoryError);
+ return NULL;
+ }
+
+ obj = PyInt_FromLong((long) 1);
+ if (PyArray_ISOBJECT(arr)) {
+ memcpy(oneval, &obj, sizeof(PyObject *));
+ Py_DECREF(obj);
+ return oneval;
+ }
+
+ storeflags = arr->flags;
+ arr->flags |= BEHAVED_FLAGS;
+ ret = arr->descr->f->setitem(obj, oneval, arr);
+ arr->flags = storeflags;
+ Py_DECREF(obj);
+ if (ret < 0) {
+ PyDataMem_FREE(oneval);
+ return NULL;
+ }
+ return oneval;
+}
+
+/* End deprecated */
+
+
+static int
+do_sliced_copy(char *dest, intp *dest_strides, intp *dest_dimensions,
+ int dest_nd, char *src, intp *src_strides,
+ intp *src_dimensions, int src_nd, int elsize,
+ int copies) {
+ intp i, j;
+
+ if (src_nd == 0 && dest_nd == 0) {
+ for(j=0; j<copies; j++) {
+ memmove(dest, src, elsize);
+ dest += elsize;
+ }
+ return 0;
+ }
+
+ if (dest_nd > src_nd) {
+ for(i=0; i<*dest_dimensions; i++, dest += *dest_strides) {
+ if (do_sliced_copy(dest, dest_strides+1,
+ dest_dimensions+1, dest_nd-1,
+ src, src_strides,
+ src_dimensions, src_nd,
+ elsize, copies) == -1)
+ return -1;
+ }
+ return 0;
+ }
+
+ if (dest_nd == 1) {
+ if (*dest_dimensions != *src_dimensions) {
+ PyErr_SetString(PyExc_ValueError,
+ "matrices are not aligned for copy");
+ return -1;
+ }
+ for(i=0; i<*dest_dimensions; i++, src += *src_strides) {
+ for(j=0; j<copies; j++) {
+ memmove(dest, src, elsize);
+ dest += *dest_strides;
+ }
+ }
+ } else {
+ for(i=0; i<*dest_dimensions; i++, dest += *dest_strides,
+ src += *src_strides) {
+ if (do_sliced_copy(dest, dest_strides+1,
+ dest_dimensions+1, dest_nd-1,
+ src, src_strides+1,
+ src_dimensions+1, src_nd-1,
+ elsize, copies) == -1)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/* This function reduces a source and destination array until a
+ discontiguous segment is found in either the source or
+ destination. Thus, an N dimensional array where the last dimension
+ is contiguous and has size n while the items are of size elsize,
+ will be reduced to an N-1 dimensional array with items of size n *
+ elsize.
+
+ This process is repeated until a discontiguous section is found.
+ Thus, a contiguous array will be reduced to a 0-dimensional array
+ with items of size elsize * sizeof(N-dimensional array).
+
+ Finally, if a source array has been reduced to a 0-dimensional
+ array with large element sizes, the contiguous destination array is
+ reduced as well.
+
+ The only thing this function changes is the element size, the
+ number of copies, and the source and destination number of
+ dimensions. The strides and dimensions are not changed.
+*/
+
+static int
+optimize_slices(intp **dest_strides, intp **dest_dimensions,
+ int *dest_nd, intp **src_strides,
+ intp **src_dimensions, int *src_nd,
+ int *elsize, int *copies)
+{
+ while (*src_nd > 0) {
+ if (((*dest_strides)[*dest_nd-1] == *elsize) &&
+ ((*src_strides)[*src_nd-1] == *elsize)) {
+ if ((*dest_dimensions)[*dest_nd-1] !=
+ (*src_dimensions)[*src_nd-1]) {
+ PyErr_SetString(PyExc_ValueError,
+ "matrices are not aligned");
+ return -1;
+ }
+ *elsize *= (*dest_dimensions)[*dest_nd-1];
+ *dest_nd-=1; *src_nd-=1;
+ } else {
+ break;
+ }
+ }
+ if (*src_nd == 0) {
+ while (*dest_nd > 0) {
+ if (((*dest_strides)[*dest_nd-1] == *elsize)) {
+ *copies *= (*dest_dimensions)[*dest_nd-1];
+ *dest_nd-=1;
+ } else {
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static char *
+contiguous_data(PyArrayObject *src)
+{
+ intp dest_strides[MAX_DIMS], *dest_strides_ptr;
+ intp *dest_dimensions=src->dimensions;
+ int dest_nd=src->nd;
+ intp *src_strides = src->strides;
+ intp *src_dimensions=src->dimensions;
+ int src_nd=src->nd;
+ int elsize=src->descr->elsize;
+ int copies=1;
+ int ret, i;
+ intp stride=elsize;
+ char *new_data;
+
+ for(i=dest_nd-1; i>=0; i--) {
+ dest_strides[i] = stride;
+ stride *= dest_dimensions[i];
+ }
+
+ dest_strides_ptr = dest_strides;
+
+ if (optimize_slices(&dest_strides_ptr, &dest_dimensions, &dest_nd,
+ &src_strides, &src_dimensions, &src_nd,
+ &elsize, &copies) == -1)
+ return NULL;
+
+ new_data = (char *)_pya_malloc(stride);
+
+ ret = do_sliced_copy(new_data, dest_strides_ptr, dest_dimensions,
+ dest_nd, src->data, src_strides,
+ src_dimensions, src_nd, elsize, copies);
+
+ if (ret != -1) { return new_data; }
+ else { _pya_free(new_data); return NULL; }
+}
+
+/* end Helper functions */
+
+
+static PyObject *PyArray_New(PyTypeObject *, int nd, intp *,
+ int, intp *, void *, int, int, PyObject *);
+
+/* C-API functions */
+
+/* Used for arrays of python objects to increment the reference count of */
+/* every python object in the array. */
+/*OBJECT_API
+ For object arrays, increment all internal references.
+*/
+static int
+PyArray_INCREF(PyArrayObject *mp)
+{
+ intp i, n;
+
+ PyObject **data, **data2;
+
+ if (mp->descr->type_num != PyArray_OBJECT) return 0;
+
+ if (PyArray_ISONESEGMENT(mp)) {
+ data = (PyObject **)mp->data;
+ } else {
+ if ((data = (PyObject **)contiguous_data(mp)) == NULL)
+ return -1;
+ }
+
+ n = PyArray_SIZE(mp);
+ data2 = data;
+ for(i=0; i<n; i++, data++) Py_XINCREF(*data);
+
+ if (!PyArray_ISONESEGMENT(mp)) _pya_free(data2);
+
+ return 0;
+}
+
+/*OBJECT_API
+ Decrement all internal references for object arrays.
+*/
+static int
+PyArray_XDECREF(PyArrayObject *mp)
+{
+ intp i, n;
+ PyObject **data, **data2;
+
+ if (mp->descr->type_num != PyArray_OBJECT) return 0;
+
+ if (PyArray_ISONESEGMENT(mp)) {
+ data = (PyObject **)mp->data;
+ } else {
+ if ((data = (PyObject **)contiguous_data(mp)) == NULL)
+ return -1;
+ }
+
+ n = PyArray_SIZE(mp);
+ data2 = data;
+ for(i=0; i<n; i++, data++) Py_XDECREF(*data);
+
+ if (!PyArray_ISONESEGMENT(mp)) _pya_free(data2);
+
+ return 0;
+}
+
+/* byte-swap inplace (unrolled loops for special cases) */
+static void
+byte_swap_vector(void *p, int n, int size) {
+ char *a, *b, c=0;
+ int j,m;
+
+ switch(size) {
+ case 1: /* no byteswap necessary */
+ break;
+ case 2:
+ for (a = (char*)p ; n > 0; n--, a += 1) {
+ b = a + 1;
+ c = *a; *a++ = *b; *b = c;
+ }
+ break;
+ case 4:
+ for (a = (char*)p ; n > 0; n--, a += 2) {
+ b = a + 3;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ }
+ break;
+ case 8:
+ for (a = (char*)p ; n > 0; n--, a += 4) {
+ b = a + 7;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ }
+ break;
+ default:
+ m = size / 2;
+ for (a = (char *)p ; n > 0; n--, a += m) {
+ b = a + (size-1);
+ for (j=0; j<m; j++)
+ c=*a; *a++ = *b; *b-- = c;
+ }
+ break;
+ }
+}
+
+
+/* If numitems > 1, then dst must be contiguous */
+static void
+copy_and_swap(void *dst, void *src, int itemsize, intp numitems,
+ intp srcstrides, int swap)
+{
+ int i;
+ char *s1 = (char *)src;
+ char *d1 = (char *)dst;
+
+
+ if ((numitems == 1) || (itemsize == srcstrides))
+ memcpy(d1, s1, itemsize*numitems);
+ else {
+ for (i = 0; i < numitems; i++) {
+ memcpy(d1, s1, itemsize);
+ d1 += itemsize;
+ s1 += srcstrides;
+ }
+ }
+
+ if (swap)
+ byte_swap_vector(d1, numitems, itemsize);
+}
+
+static PyArray_Descr **userdescrs=NULL;
+/* Computer-generated arraytype and scalartype code */
+#include "scalartypes.inc"
+#include "arraytypes.inc"
+
+static char *
+index2ptr(PyArrayObject *mp, intp i)
+{
+ if(mp->nd == 0) {
+ PyErr_SetString(PyExc_IndexError,
+ "0-d arrays can't be indexed");
+ return NULL;
+ }
+ if (i==0 && mp->dimensions[0] > 0)
+ return mp->data;
+
+ if (mp->nd>0 && i>0 && i < mp->dimensions[0]) {
+ return mp->data+i*mp->strides[0];
+ }
+ PyErr_SetString(PyExc_IndexError,"index out of bounds");
+ return NULL;
+}
+
+/*OBJECT_API
+ Compute the size of an array (in number of items)
+*/
+static intp
+PyArray_Size(PyObject *op)
+{
+ if (PyArray_Check(op)) {
+ return PyArray_SIZE((PyArrayObject *)op);
+ }
+ else {
+ return 0;
+ }
+}
+
+/* If destination is not the right type, then src
+ will be cast to destination.
+*/
+
+/* Does a flat iterator-based copy.
+
+ The arrays are assumed to have the same number of elements
+ They can be different sizes and have different types however.
+*/
+
+/*OBJECT_API
+ Copy an Array into another array.
+*/
+static int
+PyArray_CopyInto(PyArrayObject *dest, PyArrayObject *src)
+{
+ intp dsize, ssize, sbytes, ncopies;
+ int elsize, index;
+ PyArrayIterObject *dit=NULL;
+ PyArrayIterObject *sit=NULL;
+ char *dptr;
+ int swap;
+ PyArray_CopySwapFunc *copyswap;
+ PyArray_CopySwapNFunc *copyswapn;
+
+ if (!PyArray_ISWRITEABLE(dest)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "cannot write to array");
+ return -1;
+ }
+
+ if (!PyArray_EquivArrTypes(dest, src)) {
+ return PyArray_CastTo(dest, src);
+ }
+
+ dsize = PyArray_SIZE(dest);
+ ssize = PyArray_SIZE(src);
+ if (ssize == 0) return 0;
+ if (dsize % ssize != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "number of elements in destination must be "\
+ "integer multiple of number of "\
+ "elements in source");
+ return -1;
+ }
+ ncopies = (dsize / ssize);
+
+ swap = PyArray_ISNOTSWAPPED(dest) != PyArray_ISNOTSWAPPED(src);
+ copyswap = dest->descr->f->copyswap;
+ copyswapn = dest->descr->f->copyswapn;
+
+ elsize = dest->descr->elsize;
+
+ if ((PyArray_ISCONTIGUOUS(dest) && PyArray_ISCONTIGUOUS(src)) \
+ || (PyArray_ISFORTRAN(dest) && PyArray_ISFORTRAN(src))) {
+
+ PyArray_XDECREF(dest);
+ dptr = dest->data;
+ sbytes = ssize * src->descr->elsize;
+ while(ncopies--) {
+ memmove(dptr, src->data, sbytes);
+ dptr += sbytes;
+ }
+ if (swap)
+ copyswapn(dest->data, NULL, dsize, 1, elsize);
+ PyArray_INCREF(dest);
+ return 0;
+ }
+
+ dit = (PyArrayIterObject *)PyArray_IterNew((PyObject *)dest);
+ sit = (PyArrayIterObject *)PyArray_IterNew((PyObject *)src);
+
+ if ((dit == NULL) || (sit == NULL)) {
+ Py_XDECREF(dit);
+ Py_XDECREF(sit);
+ return -1;
+ }
+
+ PyArray_XDECREF(dest);
+ while(ncopies--) {
+ index = ssize;
+ while(index--) {
+ memmove(dit->dataptr, sit->dataptr, elsize);
+ if (swap)
+ copyswap(dit->dataptr, NULL, 1, elsize);
+ PyArray_ITER_NEXT(dit);
+ PyArray_ITER_NEXT(sit);
+ }
+ PyArray_ITER_RESET(sit);
+ }
+ PyArray_INCREF(dest);
+ Py_DECREF(dit);
+ Py_DECREF(sit);
+ return 0;
+}
+
+
+static int
+PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object)
+{
+ PyArrayObject *src;
+ int ret;
+
+ Py_INCREF(dest->descr);
+ src = (PyArrayObject *)PyArray_FromAny(src_object,
+ dest->descr, 0,
+ dest->nd, FORTRAN_IF(dest));
+ if (src == NULL) return -1;
+
+ ret = PyArray_CopyInto(dest, src);
+ Py_DECREF(src);
+ return ret;
+}
+
+
+/* These are also old calls (should use PyArray_New) */
+
+/* They all zero-out the memory as previously done */
+
+/* steals reference to descr -- and enforces native byteorder on it.*/
+/*OBJECT_API
+ Like FromDimsAndData but uses the Descr structure instead of typecode
+ as input.
+*/
+static PyObject *
+PyArray_FromDimsAndDataAndDescr(int nd, int *d,
+ PyArray_Descr *descr,
+ char *data)
+{
+ PyObject *ret;
+#if SIZEOF_INTP != SIZEOF_INT
+ int i;
+ intp newd[MAX_DIMS];
+#endif
+
+ if (!PyArray_ISNBO(descr->byteorder))
+ descr->byteorder = '=';
+
+#if SIZEOF_INTP != SIZEOF_INT
+ for (i=0; i<nd; i++) newd[i] = (intp) d[i];
+ ret = PyArray_NewFromDescr(&PyArray_Type, descr,
+ nd, newd,
+ NULL, data,
+ (data ? CARRAY_FLAGS : 0), NULL);
+#else
+ ret = PyArray_NewFromDescr(&PyArray_Type, descr,
+ nd, (intp *)d,
+ NULL, data,
+ (data ? CARRAY_FLAGS : 0), NULL);
+#endif
+ return ret;
+}
+
+/*OBJECT_API
+ Construct an empty array from dimensions and typenum
+*/
+static PyObject *
+PyArray_FromDims(int nd, int *d, int type)
+{
+ PyObject *ret;
+ ret = PyArray_FromDimsAndDataAndDescr(nd, d,
+ PyArray_DescrFromType(type),
+ NULL);
+ /* Old FromDims set memory to zero --- some algorithms
+ relied on that. Better keep it the same. If
+ Object type, then it's already been set to zero, though.
+ */
+ if (ret && (PyArray_DESCR(ret)->type_num != PyArray_OBJECT)) {
+ memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret));
+ }
+ return ret;
+}
+
+/* end old calls */
+
+/*OBJECT_API
+ Copy an array.
+*/
+static PyObject *
+PyArray_NewCopy(PyArrayObject *m1, int fortran)
+{
+ PyArrayObject *ret;
+ if (fortran < 0) fortran = PyArray_ISFORTRAN(m1);
+
+ Py_INCREF(m1->descr);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(m1->ob_type,
+ m1->descr,
+ m1->nd,
+ m1->dimensions,
+ NULL, NULL,
+ fortran,
+ (PyObject *)m1);
+ if (ret == NULL) return NULL;
+ if (PyArray_CopyInto(ret, m1) == -1) {
+ Py_DECREF(ret);
+ return NULL;
+ }
+
+ return (PyObject *)ret;
+}
+
+static PyObject *array_big_item(PyArrayObject *, intp);
+
+/* Does nothing with descr (cannot be NULL) */
+/*OBJECT_API
+ Get scalar-equivalent to a region of memory described by a descriptor.
+*/
+static PyObject *
+PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
+{
+ PyTypeObject *type;
+ PyObject *obj;
+ void *destptr;
+ PyArray_CopySwapFunc *copyswap;
+ int type_num;
+ int itemsize;
+ int swap;
+
+ type_num = descr->type_num;
+ itemsize = descr->elsize;
+ type = descr->typeobj;
+ copyswap = descr->f->copyswap;
+ swap = !PyArray_ISNBO(descr->byteorder);
+ if (type->tp_itemsize != 0) /* String type */
+ obj = type->tp_alloc(type, itemsize);
+ else
+ obj = type->tp_alloc(type, 0);
+ if (obj == NULL) return NULL;
+ if PyTypeNum_ISEXTENDED(type_num) {
+ if (type_num == PyArray_STRING) {
+ destptr = PyString_AS_STRING(obj);
+ ((PyStringObject *)obj)->ob_shash = -1;
+ ((PyStringObject *)obj)->ob_sstate = \
+ SSTATE_NOT_INTERNED;
+ }
+ else if (type_num == PyArray_UNICODE) {
+ PyUnicodeObject *uni = (PyUnicodeObject*)obj;
+ int length = itemsize / sizeof(Py_UNICODE);
+ /* Need an extra slot and need to use
+ Python memory manager */
+ uni->str = NULL;
+ destptr = PyMem_NEW(Py_UNICODE, length+1);
+ if (destptr == NULL) {
+ Py_DECREF(obj);
+ return PyErr_NoMemory();
+ }
+ uni->str = (Py_UNICODE *)destptr;
+ uni->str[0] = 0;
+ uni->str[length] = 0;
+ uni->length = length;
+ uni->hash = -1;
+ uni->defenc = NULL;
+ }
+ else {
+ PyVoidScalarObject *vobj = (PyVoidScalarObject *)obj;
+ vobj->base = NULL;
+ vobj->descr = descr;
+ Py_INCREF(descr);
+ vobj->obval = NULL;
+ vobj->ob_size = itemsize;
+ vobj->flags = BEHAVED_FLAGS | OWNDATA;
+ swap = 0;
+ if (descr->fields) {
+ if (base) {
+ Py_INCREF(base);
+ vobj->base = base;
+ vobj->flags = PyArray_FLAGS(base);
+ vobj->flags &= ~OWNDATA;
+ vobj->obval = data;
+ return obj;
+ }
+ }
+ destptr = PyDataMem_NEW(itemsize);
+ if (destptr == NULL) {
+ Py_DECREF(obj);
+ return PyErr_NoMemory();
+ }
+ vobj->obval = destptr;
+ }
+ }
+ else {
+ destptr = _SOFFSET_(obj, type_num);
+ }
+ /* copyswap for OBJECT increments the reference count */
+ copyswap(destptr, data, swap, itemsize);
+ return obj;
+}
+
+/* returns an Array-Scalar Object of the type of arr
+ from the given pointer to memory -- main Scalar creation function
+ default new method calls this.
+*/
+
+/* Ideally, here the descriptor would contain all the information needed.
+ So, that we simply need the data and the descriptor, and perhaps
+ a flag
+*/
+
+/*OBJECT_API
+ Get scalar-equivalent to 0-d array
+*/
+static PyObject *
+PyArray_ToScalar(void *data, PyArrayObject *arr)
+{
+ return PyArray_Scalar(data, arr->descr, (PyObject *)arr);
+}
+
+
+/* Return Python scalar if 0-d array object is encountered */
+
+/*OBJECT_API
+ Return either an array or the appropriate Python object if the array
+ is 0d and matches a Python type.
+*/
+static PyObject *
+PyArray_Return(PyArrayObject *mp)
+{
+
+
+ if (mp == NULL) return NULL;
+
+ if (PyErr_Occurred()) {
+ Py_XDECREF(mp);
+ return NULL;
+ }
+
+ if (mp->nd == 0) {
+ PyObject *ret;
+ ret = PyArray_ToScalar(mp->data, mp);
+ Py_DECREF(mp);
+ return ret;
+ }
+ else {
+ return (PyObject *)mp;
+ }
+}
+
+/*
+ returns typenum to associate with this type >=PyArray_USERDEF.
+ Also creates a copy of the VOID_DESCR table inserting it's typeobject in
+ and it's typenum in the appropriate place.
+
+ needs the userdecrs table and PyArray_NUMUSER variables
+ defined in arratypes.inc
+*/
+/*OBJECT_API
+ Register Data type
+*/
+static int
+PyArray_RegisterDataType(PyTypeObject *type)
+{
+ PyArray_Descr *descr;
+ PyObject *obj;
+ int typenum;
+ int i;
+
+ if ((type == &PyVoidArrType_Type) || \
+ !PyType_IsSubtype(type, &PyVoidArrType_Type)) {
+ PyErr_SetString(PyExc_ValueError,
+ "can only register void subtypes");
+ return -1;
+ }
+ /* See if this type is already registered */
+ for (i=0; i<PyArray_NUMUSERTYPES; i++) {
+ descr = userdescrs[i];
+ if (descr->typeobj == type)
+ return descr->type_num;
+ }
+ descr = PyArray_DescrNewFromType(PyArray_VOID);
+ typenum = PyArray_USERDEF + PyArray_NUMUSERTYPES;
+ descr->type_num = typenum;
+ descr->typeobj = type;
+ obj = PyObject_GetAttrString((PyObject *)type,"itemsize");
+ if (obj) {
+ i = PyInt_AsLong(obj);
+ if ((i < 0) && (PyErr_Occurred())) PyErr_Clear();
+ else descr->elsize = i;
+ Py_DECREF(obj);
+ }
+ Py_INCREF(type);
+ userdescrs = realloc(userdescrs,
+ (PyArray_NUMUSERTYPES+1)*sizeof(void *));
+ if (userdescrs == NULL) {
+ PyErr_SetString(PyExc_MemoryError, "RegisterDataType");
+ Py_DECREF(descr);
+ return -1;
+ }
+ userdescrs[PyArray_NUMUSERTYPES++] = descr;
+ return typenum;
+}
+
+
+/*
+ copyies over from the old descr table for anything
+ NULL or zero in what is given.
+ DECREF's the Descr already there.
+ places a pointer to the new one into the slot.
+*/
+
+/* steals a reference to descr */
+/*OBJECT_API
+ Insert Descr Table
+*/
+static int
+PyArray_RegisterDescrForType(int typenum, PyArray_Descr *descr)
+{
+ PyArray_Descr *old;
+
+ if (!PyTypeNum_ISUSERDEF(typenum)) {
+ PyErr_SetString(PyExc_TypeError,
+ "data type not registered");
+ Py_DECREF(descr);
+ return -1;
+ }
+ old = userdescrs[typenum-PyArray_USERDEF];
+ descr->typeobj = old->typeobj;
+ descr->type_num = typenum;
+
+ if (descr->f == NULL) descr->f = old->f;
+ if (descr->fields == NULL) {
+ descr->fields = old->fields;
+ Py_XINCREF(descr->fields);
+ }
+ if (descr->subarray == NULL && old->subarray) {
+ descr->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr));
+ memcpy(descr->subarray, old->subarray,
+ sizeof(PyArray_ArrayDescr));
+ Py_INCREF(descr->subarray->shape);
+ Py_INCREF(descr->subarray->base);
+ }
+ Py_XINCREF(descr->typeobj);
+
+#define _ZERO_CHECK(member) \
+ if (descr->member == 0) descr->member = old->member
+
+ _ZERO_CHECK(kind);
+ _ZERO_CHECK(type);
+ _ZERO_CHECK(byteorder);
+ _ZERO_CHECK(elsize);
+ _ZERO_CHECK(alignment);
+#undef _ZERO_CHECK
+
+ Py_DECREF(old);
+ userdescrs[typenum-PyArray_USERDEF] = descr;
+ return 0;
+}
+
+
+/*OBJECT_API
+ To File
+*/
+static int
+PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
+{
+ intp size;
+ intp n, n2;
+ int n3, n4;
+ PyArrayIterObject *it;
+ PyObject *obj, *strobj, *tupobj;
+
+ n3 = (sep ? strlen((const char *)sep) : 0);
+ if (n3 == 0) { /* binary data */
+ if (PyArray_ISOBJECT(self)) {
+ PyErr_SetString(PyExc_ValueError, "cannot write "\
+ "object arrays to a file in " \
+ "binary mode");
+ return -1;
+ }
+
+ if (PyArray_ISCONTIGUOUS(self)) {
+ size = PyArray_SIZE(self);
+ if ((n=fwrite((const void *)self->data,
+ (size_t) self->descr->elsize,
+ (size_t) size, fp)) < size) {
+ PyErr_Format(PyExc_ValueError,
+ "%ld requested and %ld written",
+ (long) size, (long) n);
+ return -1;
+ }
+ }
+ else {
+ it=(PyArrayIterObject *) \
+ PyArray_IterNew((PyObject *)self);
+ while(it->index < it->size) {
+ if (fwrite((const void *)it->dataptr,
+ (size_t) self->descr->elsize,
+ 1, fp) < 1) {
+ PyErr_Format(PyExc_IOError,
+ "problem writing element"\
+ " %d to file",
+ (int)it->index);
+ Py_DECREF(it);
+ return -1;
+ }
+ PyArray_ITER_NEXT(it);
+ }
+ Py_DECREF(it);
+ }
+ }
+ else { /* text data */
+ it=(PyArrayIterObject *) \
+ PyArray_IterNew((PyObject *)self);
+ n4 = (format ? strlen((const char *)format) : 0);
+ while(it->index < it->size) {
+ obj = self->descr->f->getitem(it->dataptr, self);
+ if (obj == NULL) {Py_DECREF(it); return -1;}
+ if (n4 == 0) { /* standard writing */
+ strobj = PyObject_Str(obj);
+ Py_DECREF(obj);
+ if (strobj == NULL) {Py_DECREF(it); return -1;}
+ }
+ else { /* use format string */
+ tupobj = PyTuple_New(1);
+ if (tupobj == NULL) {Py_DECREF(it); return -1;}
+ PyTuple_SET_ITEM(tupobj,0,obj);
+ obj = PyString_FromString((const char *)format);
+ if (obj == NULL) {Py_DECREF(tupobj);
+ Py_DECREF(it); return -1;}
+ strobj = PyString_Format(obj, tupobj);
+ Py_DECREF(obj);
+ Py_DECREF(tupobj);
+ if (strobj == NULL) {Py_DECREF(it); return -1;}
+ }
+ if ((n=fwrite(PyString_AS_STRING(strobj),
+ 1, n2=PyString_GET_SIZE(strobj),
+ fp)) < n2) {
+ PyErr_Format(PyExc_IOError,
+ "problem writing element %d"\
+ " to file",
+ (int) it->index);
+ Py_DECREF(strobj);
+ Py_DECREF(it);
+ return -1;
+ }
+ /* write separator for all but last one */
+ if (it->index != it->size-1)
+ fwrite(sep, 1, n3, fp);
+ Py_DECREF(strobj);
+ PyArray_ITER_NEXT(it);
+ }
+ Py_DECREF(it);
+ }
+ return 0;
+}
+
+/*OBJECT_API
+ To List
+*/
+static PyObject *
+PyArray_ToList(PyArrayObject *self)
+{
+ PyObject *lp;
+ PyArrayObject *v;
+ intp sz, i;
+
+ if (!PyArray_Check(self)) return (PyObject *)self;
+
+ if (self->nd == 0)
+ return self->descr->f->getitem(self->data,self);
+
+ sz = self->dimensions[0];
+ lp = PyList_New(sz);
+
+ for (i=0; i<sz; i++) {
+ v=(PyArrayObject *)array_big_item(self, i);
+ if (v->nd >= self->nd) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "array_item not returning smaller-" \
+ "dimensional array");
+ Py_DECREF(v);
+ Py_DECREF(lp);
+ return NULL;
+ }
+ PyList_SetItem(lp, i, PyArray_ToList(v));
+ Py_DECREF(v);
+ }
+
+ return lp;
+}
+
+static PyObject *
+PyArray_ToString(PyArrayObject *self)
+{
+ intp numbytes;
+ intp index;
+ char *dptr;
+ int elsize;
+ PyObject *ret;
+ PyArrayIterObject *it;
+
+ /* if (PyArray_TYPE(self) == PyArray_OBJECT) {
+ PyErr_SetString(PyExc_ValueError, "a string for the data" \
+ "in an object array is not appropriate");
+ return NULL;
+ }
+ */
+
+ numbytes = PyArray_NBYTES(self);
+ if (PyArray_ISONESEGMENT(self)) {
+ ret = PyString_FromStringAndSize(self->data, (int) numbytes);
+ }
+ else {
+ it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self);
+ if (it==NULL) return NULL;
+ ret = PyString_FromStringAndSize(NULL, (int) numbytes);
+ if (ret == NULL) {Py_DECREF(it); return NULL;}
+ dptr = PyString_AS_STRING(ret);
+ index = it->size;
+ elsize = self->descr->elsize;
+ while(index--) {
+ memcpy(dptr, it->dataptr, elsize);
+ dptr += elsize;
+ PyArray_ITER_NEXT(it);
+ }
+ Py_DECREF(it);
+ }
+ return ret;
+}
+
+
+/*********************** end C-API functions **********************/
+
+
+/* array object functions */
+
+static void
+array_dealloc(PyArrayObject *self) {
+
+ if (self->weakreflist != NULL)
+ PyObject_ClearWeakRefs((PyObject *)self);
+
+ if(self->base) {
+ /* UPDATEIFCOPY means that base points to an
+ array that should be updated with the contents
+ of this array upon destruction.
+ self->base->flags must have been WRITEABLE
+ (checked previously) and it was locked here
+ thus, unlock it.
+ */
+ if (self->flags & UPDATEIFCOPY) {
+ ((PyArrayObject *)self->base)->flags |= WRITEABLE;
+ Py_INCREF(self); /* hold on to self in next call */
+ PyArray_CopyInto((PyArrayObject *)self->base, self);
+ /* Don't need to DECREF -- because we are deleting
+ self already... */
+ }
+ /* In any case base is pointing to something that we need
+ to DECREF -- either a view or a buffer object */
+ Py_DECREF(self->base);
+ }
+
+ if ((self->flags & OWN_DATA) && self->data) {
+ /* Free internal references if an Object array */
+ if (PyArray_ISOBJECT(self))
+ PyArray_XDECREF(self);
+ PyDataMem_FREE(self->data);
+ }
+
+ PyDimMem_FREE(self->dimensions);
+
+ Py_DECREF(self->descr);
+
+ self->ob_type->tp_free((PyObject *)self);
+}
+
+/*************************************************************************
+ **************** Implement Mapping Protocol ***************************
+ *************************************************************************/
+
+static int
+array_length(PyArrayObject *self)
+{
+ if (self->nd != 0) {
+ return self->dimensions[0];
+ } else {
+ PyErr_SetString(PyExc_TypeError, "len() of unsized object");
+ return -1;
+ }
+}
+
+static PyObject *
+array_big_item(PyArrayObject *self, intp i)
+{
+ char *item;
+ PyArrayObject *r;
+
+ if(self->nd == 0) {
+ PyErr_SetString(PyExc_IndexError,
+ "0-d arrays can't be indexed");
+ return NULL;
+ }
+ if ((item = index2ptr(self, i)) == NULL) return NULL;
+
+ Py_INCREF(self->descr);
+ r = (PyArrayObject *)PyArray_NewFromDescr(self->ob_type,
+ self->descr,
+ self->nd-1,
+ self->dimensions+1,
+ self->strides+1, item,
+ self->flags,
+ (PyObject *)self);
+ if (r == NULL) return NULL;
+ Py_INCREF(self);
+ r->base = (PyObject *)self;
+ PyArray_UpdateFlags(r, CONTIGUOUS | FORTRAN);
+ return (PyObject *)r;
+}
+
+static PyObject *
+array_item_nice(PyArrayObject *self, int i)
+{
+ return PyArray_Return((PyArrayObject *)array_big_item(self, (intp) i));
+}
+
+
+static int
+array_ass_big_item(PyArrayObject *self, intp i, PyObject *v)
+{
+ PyArrayObject *tmp;
+ char *item;
+ int ret;
+
+ if (v == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "can't delete array elements");
+ return -1;
+ }
+ if (!PyArray_ISWRITEABLE(self)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "array is not writeable");
+ return -1;
+ }
+ if (self->nd == 0) {
+ PyErr_SetString(PyExc_IndexError,
+ "0-d arrays can't be indexed.");
+ return -1;
+ }
+
+ if (i < 0) i = i+self->dimensions[0];
+
+ if (self->nd > 1) {
+ if((tmp = (PyArrayObject *)array_big_item(self, i)) == NULL)
+ return -1;
+ ret = PyArray_CopyObject(tmp, v);
+ Py_DECREF(tmp);
+ return ret;
+ }
+
+ if ((item = index2ptr(self, i)) == NULL) return -1;
+ if (self->descr->f->setitem(v, item, self) == -1) return -1;
+ return 0;
+}
+
+#if SIZEOF_INT == SIZEOF_INTP
+#define array_ass_item array_ass_big_item
+#else
+static int
+array_ass_item(PyArrayObject *self, int i, PyObject *v)
+{
+ return array_ass_big_item(self, (intp) i, v);
+}
+#endif
+
+
+/* -------------------------------------------------------------- */
+static int
+slice_coerce_index(PyObject *o, intp *v)
+{
+ *v = PyArray_PyIntAsIntp(o);
+ if (error_converting(*v)) {
+ PyErr_Clear();
+ return 0;
+ }
+ return 1;
+}
+
+
+/* This is basically PySlice_GetIndicesEx, but with our coercion
+ * of indices to integers (plus, that function is new in Python 2.3) */
+static int
+slice_GetIndices(PySliceObject *r, intp length,
+ intp *start, intp *stop, intp *step,
+ intp *slicelength)
+{
+ intp defstart, defstop;
+
+ if (r->step == Py_None) {
+ *step = 1;
+ } else {
+ if (!slice_coerce_index(r->step, step)) return -1;
+ if (*step == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "slice step cannot be zero");
+ return -1;
+ }
+ }
+
+ defstart = *step < 0 ? length - 1 : 0;
+ defstop = *step < 0 ? -1 : length;
+
+ if (r->start == Py_None) {
+ *start = *step < 0 ? length-1 : 0;
+ } else {
+ if (!slice_coerce_index(r->start, start)) return -1;
+ if (*start < 0) *start += length;
+ if (*start < 0) *start = (*step < 0) ? -1 : 0;
+ if (*start >= length) {
+ *start = (*step < 0) ? length - 1 : length;
+ }
+ }
+
+ if (r->stop == Py_None) {
+ *stop = defstop;
+ } else {
+ if (!slice_coerce_index(r->stop, stop)) return -1;
+ if (*stop < 0) *stop += length;
+ if (*stop < 0) *stop = -1;
+ if (*stop > length) *stop = length;
+ }
+
+ if ((*step < 0 && *stop >= *start) || \
+ (*step > 0 && *start >= *stop)) {
+ *slicelength = 0;
+ } else if (*step < 0) {
+ *slicelength = (*stop - *start + 1) / (*step) + 1;
+ } else {
+ *slicelength = (*stop - *start - 1) / (*step) + 1;
+ }
+
+ return 0;
+}
+
+#define PseudoIndex -1
+#define RubberIndex -2
+#define SingleIndex -3
+
+static intp
+parse_subindex(PyObject *op, intp *step_size, intp *n_steps, intp max)
+{
+ intp index;
+
+ if (op == Py_None) {
+ *n_steps = PseudoIndex;
+ index = 0;
+ } else if (op == Py_Ellipsis) {
+ *n_steps = RubberIndex;
+ index = 0;
+ } else if (PySlice_Check(op)) {
+ intp stop;
+ if (slice_GetIndices((PySliceObject *)op, max,
+ &index, &stop, step_size, n_steps) < 0) {
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_IndexError,
+ "invalid slice");
+ }
+ goto fail;
+ }
+ if (*n_steps <= 0) {
+ *n_steps = 0;
+ *step_size = 1;
+ index = 0;
+ }
+ } else {
+ index = PyArray_PyIntAsIntp(op);
+ if (error_converting(index)) {
+ PyErr_SetString(PyExc_IndexError,
+ "each subindex must be either a "\
+ "slice, an integer, Ellipsis, or "\
+ "newaxis");
+ goto fail;
+ }
+ *n_steps = SingleIndex;
+ *step_size = 0;
+ if (index < 0) index += max;
+ if (index >= max || index < 0) {
+ PyErr_SetString(PyExc_IndexError, "invalid index");
+ goto fail;
+ }
+ }
+ return index;
+ fail:
+ return -1;
+}
+
+
+static int
+parse_index(PyArrayObject *self, PyObject *op,
+ intp *dimensions, intp *strides, intp *offset_ptr)
+{
+ int i, j, n;
+ int nd_old, nd_new, n_add, n_pseudo;
+ intp n_steps, start, offset, step_size;
+ PyObject *op1=NULL;
+ int is_slice;
+
+
+ if (PySlice_Check(op) || op == Py_Ellipsis || op == Py_None) {
+ n = 1;
+ op1 = op;
+ Py_INCREF(op);
+ /* this relies on the fact that n==1 for loop below */
+ is_slice = 1;
+ }
+ else {
+ if (!PySequence_Check(op)) {
+ PyErr_SetString(PyExc_IndexError,
+ "index must be either an int "\
+ "or a sequence");
+ return -1;
+ }
+ n = PySequence_Length(op);
+ is_slice = 0;
+ }
+
+ nd_old = nd_new = 0;
+
+ offset = 0;
+ for(i=0; i<n; i++) {
+ if (!is_slice) {
+ if (!(op1=PySequence_GetItem(op, i))) {
+ PyErr_SetString(PyExc_IndexError,
+ "invalid index");
+ return -1;
+ }
+ }
+
+ start = parse_subindex(op1, &step_size, &n_steps,
+ nd_old < self->nd ? \
+ self->dimensions[nd_old] : 0);
+ Py_DECREF(op1);
+ if (start == -1) break;
+
+ if (n_steps == PseudoIndex) {
+ dimensions[nd_new] = 1; strides[nd_new] = 0; nd_new++;
+ } else {
+ if (n_steps == RubberIndex) {
+ for(j=i+1, n_pseudo=0; j<n; j++) {
+ op1 = PySequence_GetItem(op, j);
+ if (op1 == Py_None) n_pseudo++;
+ Py_DECREF(op1);
+ }
+ n_add = self->nd-(n-i-n_pseudo-1+nd_old);
+ if (n_add < 0) {
+ PyErr_SetString(PyExc_IndexError,
+ "too many indices");
+ return -1;
+ }
+ for(j=0; j<n_add; j++) {
+ dimensions[nd_new] = \
+ self->dimensions[nd_old];
+ strides[nd_new] = \
+ self->strides[nd_old];
+ nd_new++; nd_old++;
+ }
+ } else {
+ if (nd_old >= self->nd) {
+ PyErr_SetString(PyExc_IndexError,
+ "too many indices");
+ return -1;
+ }
+ offset += self->strides[nd_old]*start;
+ nd_old++;
+ if (n_steps != SingleIndex) {
+ dimensions[nd_new] = n_steps;
+ strides[nd_new] = step_size * \
+ self->strides[nd_old-1];
+ nd_new++;
+ }
+ }
+ }
+ }
+ if (i < n) return -1;
+ n_add = self->nd-nd_old;
+ for(j=0; j<n_add; j++) {
+ dimensions[nd_new] = self->dimensions[nd_old];
+ strides[nd_new] = self->strides[nd_old];
+ nd_new++; nd_old++;
+ }
+ *offset_ptr = offset;
+ return nd_new;
+}
+
+static void
+_swap_axes(PyArrayMapIterObject *mit, PyArrayObject **ret)
+{
+ PyObject *new;
+ int n1, n2, n3, val;
+ int i;
+ PyArray_Dims permute;
+ intp d[MAX_DIMS];
+
+ permute.ptr = d;
+ permute.len = mit->nd;
+
+ /* tuple for transpose is
+ (n1,..,n1+n2-1,0,..,n1-1,n1+n2,...,n3-1)
+ n1 is the number of dimensions of
+ the broadcasted index array
+ n2 is the number of dimensions skipped at the
+ start
+ n3 is the number of dimensions of the
+ result
+ */
+ n1 = mit->iters[0]->nd_m1 + 1;
+ n2 = mit->iteraxes[0];
+ n3 = mit->nd;
+ val = n1;
+ i = 0;
+ while(val < n1+n2)
+ permute.ptr[i++] = val++;
+ val = 0;
+ while(val < n1)
+ permute.ptr[i++] = val++;
+ val = n1+n2;
+ while(val < n3)
+ permute.ptr[i++] = val++;
+
+ new = PyArray_Transpose(*ret, &permute);
+ Py_DECREF(*ret);
+ *ret = (PyArrayObject *)new;
+}
+
+/* Prototypes for Mapping calls --- not part of the C-API
+ because only useful as part of a getitem call.
+*/
+
+static void PyArray_MapIterReset(PyArrayMapIterObject *);
+static void PyArray_MapIterNext(PyArrayMapIterObject *);
+static void PyArray_MapIterBind(PyArrayMapIterObject *, PyArrayObject *);
+static PyObject* PyArray_MapIterNew(PyObject *, int);
+
+static PyObject *
+PyArray_GetMap(PyArrayMapIterObject *mit)
+{
+
+ PyArrayObject *ret, *temp;
+ PyArrayIterObject *it;
+ int index;
+ int swap;
+ PyArray_CopySwapFunc *copyswap;
+
+ /* Unbound map iterator --- Bind should have been called */
+ if (mit->ait == NULL) return NULL;
+
+ /* This relies on the map iterator object telling us the shape
+ of the new array in nd and dimensions.
+ */
+ temp = mit->ait->ao;
+ Py_INCREF(temp->descr);
+ ret = (PyArrayObject *)\
+ PyArray_NewFromDescr(temp->ob_type,
+ temp->descr,
+ mit->nd, mit->dimensions,
+ NULL, NULL,
+ PyArray_ISFORTRAN(temp),
+ (PyObject *)temp);
+ if (ret == NULL) return NULL;
+
+ /* Now just iterate through the new array filling it in
+ with the next object from the original array as
+ defined by the mapping iterator */
+
+ if ((it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ret))
+ == NULL) {
+ Py_DECREF(ret);
+ return NULL;
+ }
+ index = it->size;
+ swap = (PyArray_ISNOTSWAPPED(temp) != PyArray_ISNOTSWAPPED(ret));
+ copyswap = ret->descr->f->copyswap;
+ PyArray_MapIterReset(mit);
+ while (index--) {
+ copyswap(it->dataptr, mit->dataptr, swap, ret->descr->elsize);
+ PyArray_MapIterNext(mit);
+ PyArray_ITER_NEXT(it);
+ }
+ Py_DECREF(it);
+
+ /* check for consecutive axes */
+ if ((mit->subspace != NULL) && (mit->consec)) {
+ if (mit->iteraxes[0] > 0) { /* then we need to swap */
+ _swap_axes(mit, &ret);
+ }
+ }
+ return (PyObject *)ret;
+}
+
+static int
+PyArray_SetMap(PyArrayMapIterObject *mit, PyObject *op)
+{
+ PyObject *arr=NULL;
+ PyArrayIterObject *it;
+ int index;
+ int swap;
+ PyArray_CopySwapFunc *copyswap;
+ PyArray_Descr *descr;
+
+ /* Unbound Map Iterator */
+ if (mit->ait == NULL) return -1;
+
+ descr = mit->ait->ao->descr;
+ Py_INCREF(descr);
+ arr = PyArray_FromAny(op, descr, 0, 0, FORCECAST);
+ if (arr == NULL) return -1;
+
+ if ((mit->subspace != NULL) && (mit->consec)) {
+ if (mit->iteraxes[0] > 0) { /* then we need to swap */
+ _swap_axes(mit, (PyArrayObject **)&arr);
+ }
+ }
+
+ if ((it = (PyArrayIterObject *)PyArray_IterNew(arr))==NULL) {
+ Py_DECREF(arr);
+ return -1;
+ }
+
+ index = mit->size;
+ swap = (PyArray_ISNOTSWAPPED(mit->ait->ao) != \
+ (PyArray_ISNOTSWAPPED(arr)));
+
+ copyswap = PyArray_DESCR(arr)->f->copyswap;
+ PyArray_MapIterReset(mit);
+ /* Need to decref OBJECT arrays */
+ if (PyTypeNum_ISOBJECT(descr->type_num)) {
+ while (index--) {
+ Py_XDECREF(*((PyObject **)mit->dataptr));
+ Py_INCREF(*((PyObject **)it->dataptr));
+ memmove(mit->dataptr, it->dataptr, sizeof(PyObject *));
+ PyArray_MapIterNext(mit);
+ PyArray_ITER_NEXT(it);
+ if (it->index == it->size)
+ PyArray_ITER_RESET(it);
+ }
+ Py_DECREF(arr);
+ Py_DECREF(it);
+ return 0;
+ }
+ while(index--) {
+ memmove(mit->dataptr, it->dataptr, PyArray_ITEMSIZE(arr));
+ copyswap(mit->dataptr, NULL, swap, PyArray_ITEMSIZE(arr));
+ PyArray_MapIterNext(mit);
+ PyArray_ITER_NEXT(it);
+ if (it->index == it->size)
+ PyArray_ITER_RESET(it);
+ }
+ Py_DECREF(arr);
+ Py_DECREF(it);
+ return 0;
+}
+
+/* Called when treating array object like a mapping -- called first from
+ Python when using a[object] unless object is a standard slice object
+ (not an extended one).
+
+*/
+
+/* There are two situations:
+
+ 1 - the subscript is a standard view and a reference to the
+ array can be returned
+
+ 2 - the subscript uses Boolean masks or integer indexing and
+ therefore a new array is created and returned.
+
+*/
+
+/* Always returns arrays */
+
+static PyObject *iter_subscript(PyArrayIterObject *, PyObject *);
+
+static PyObject *
+array_subscript(PyArrayObject *self, PyObject *op)
+{
+ intp dimensions[MAX_DIMS], strides[MAX_DIMS];
+ intp offset;
+ int nd, oned;
+ intp i;
+ PyArrayObject *other;
+ PyArrayMapIterObject *mit;
+
+ if (PyString_Check(op) || PyUnicode_Check(op)) {
+ if (self->descr->fields) {
+ PyObject *obj;
+ obj = PyDict_GetItem(self->descr->fields, op);
+ if (obj != NULL) {
+ PyArray_Descr *descr;
+ int offset;
+ PyObject *title;
+
+ if (PyArg_ParseTuple(obj, "Oi|O",
+ &descr, &offset, &title)) {
+ Py_INCREF(descr);
+ return PyArray_GetField(self, descr,
+ offset);
+ }
+ }
+ }
+
+ PyErr_Format(PyExc_ValueError,
+ "field named %s not found.",
+ PyString_AsString(op));
+ return NULL;
+ }
+ if (self->nd == 0) {
+ PyErr_SetString(PyExc_IndexError,
+ "0-d arrays can't be indexed.");
+ return NULL;
+ }
+ if (PyArray_IsScalar(op, Integer) || PyInt_Check(op) || \
+ PyLong_Check(op)) {
+ intp value;
+ value = PyArray_PyIntAsIntp(op);
+ if (PyErr_Occurred())
+ PyErr_Clear();
+ else if (value >= 0) {
+ return array_big_item(self, value);
+ }
+ else /* (value < 0) */ {
+ value += self->dimensions[0];
+ return array_big_item(self, value);
+ }
+ }
+
+ oned = ((self->nd == 1) && !(PyTuple_Check(op) && PyTuple_GET_SIZE(op) > 1));
+
+ /* wrap arguments into a mapiter object */
+ mit = (PyArrayMapIterObject *)PyArray_MapIterNew(op, oned);
+ if (mit == NULL) return NULL;
+ if (!mit->view) { /* fancy indexing */
+ if (oned) {
+ PyArrayIterObject *it;
+ PyObject *rval;
+ it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self);
+ if (it == NULL) {Py_DECREF(mit); return NULL;}
+ rval = iter_subscript(it, mit->indexobj);
+ Py_DECREF(it);
+ Py_DECREF(mit);
+ return rval;
+ }
+ PyArray_MapIterBind(mit, self);
+ other = (PyArrayObject *)PyArray_GetMap(mit);
+ Py_DECREF(mit);
+ return (PyObject *)other;
+ }
+ Py_DECREF(mit);
+
+ i = PyArray_PyIntAsIntp(op);
+ if (!error_converting(i)) {
+ if (i < 0 && self->nd > 0) i = i+self->dimensions[0];
+ return array_big_item(self, i);
+ }
+ PyErr_Clear();
+
+ /* Standard (view-based) Indexing */
+ if ((nd = parse_index(self, op, dimensions, strides, &offset))
+ == -1)
+ return NULL;
+
+ /* This will only work if new array will be a view */
+ Py_INCREF(self->descr);
+ if ((other = (PyArrayObject *) \
+ PyArray_NewFromDescr(self->ob_type, self->descr,
+ nd, dimensions,
+ strides, self->data+offset,
+ self->flags,
+ (PyObject *)self)) == NULL)
+ return NULL;
+
+
+ other->base = (PyObject *)self;
+ Py_INCREF(self);
+
+ PyArray_UpdateFlags(other, UPDATE_ALL_FLAGS);
+
+ return (PyObject *)other;
+}
+
+
+/* Another assignment hacked by using CopyObject. */
+
+/* This only works if subscript returns a standard view. */
+
+/* Again there are two cases. In the first case, PyArray_CopyObject
+ can be used. In the second case, a new indexing function has to be
+ used.
+*/
+
+static int iter_ass_subscript(PyArrayIterObject *, PyObject *, PyObject *);
+
+static int
+array_ass_sub(PyArrayObject *self, PyObject *index, PyObject *op)
+{
+ int ret, oned;
+ intp i;
+ PyArrayObject *tmp;
+ PyArrayMapIterObject *mit;
+
+ if (op == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot delete array elements");
+ return -1;
+ }
+ if (!PyArray_ISWRITEABLE(self)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "array is not writeable");
+ return -1;
+ }
+
+ if (PyArray_IsScalar(index, Integer) || PyInt_Check(index) || \
+ PyLong_Check(index)) {
+ intp value;
+ value = PyArray_PyIntAsIntp(index);
+ if (PyErr_Occurred())
+ PyErr_Clear();
+ else
+ return array_ass_big_item(self, value, op);
+ }
+
+ if (PyString_Check(index) || PyUnicode_Check(index)) {
+ if (self->descr->fields) {
+ PyObject *obj;
+ obj = PyDict_GetItem(self->descr->fields, index);
+ if (obj != NULL) {
+ PyArray_Descr *descr;
+ int offset;
+ PyObject *title;
+
+ if (PyArg_ParseTuple(obj, "Oi|O",
+ &descr, &offset, &title)) {
+ Py_INCREF(descr);
+ return PyArray_SetField(self, descr,
+ offset, op);
+ }
+ }
+ }
+
+ PyErr_Format(PyExc_ValueError,
+ "field named %s not found.",
+ PyString_AsString(index));
+ return -1;
+ }
+
+ if (self->nd == 0) {
+ PyErr_SetString(PyExc_IndexError,
+ "0-d arrays can't be indexed.");
+ return -1;
+ }
+
+ oned = ((self->nd == 1) && !(PyTuple_Check(op) && PyTuple_GET_SIZE(op) > 1));
+
+ mit = (PyArrayMapIterObject *)PyArray_MapIterNew(index, oned);
+ if (mit == NULL) return -1;
+ if (!mit->view) {
+ if (oned) {
+ PyArrayIterObject *it;
+ int rval;
+ it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self);
+ if (it == NULL) {Py_DECREF(mit); return -1;}
+ rval = iter_ass_subscript(it, mit->indexobj, op);
+ Py_DECREF(it);
+ Py_DECREF(mit);
+ return rval;
+ }
+ PyArray_MapIterBind(mit, self);
+ ret = PyArray_SetMap(mit, op);
+ Py_DECREF(mit);
+ return ret;
+ }
+ Py_DECREF(mit);
+
+ i = PyArray_PyIntAsIntp(index);
+ if (!error_converting(i)) {
+ return array_ass_big_item(self, i, op);
+ }
+ PyErr_Clear();
+
+ /* Rest of standard (view-based) indexing */
+
+ if ((tmp = (PyArrayObject *)array_subscript(self, index)) == NULL)
+ return -1;
+ if (PyArray_ISOBJECT(self) && (tmp->nd == 0)) {
+ ret = tmp->descr->f->setitem(op, tmp->data, tmp);
+ }
+ else {
+ ret = PyArray_CopyObject(tmp, op);
+ }
+ Py_DECREF(tmp);
+ return ret;
+}
+
+/* There are places that require that array_subscript return a PyArrayObject
+ and not possibly a scalar. Thus, this is the function exposed to
+ Python so that 0-dim arrays are passed as scalars
+*/
+
+static PyObject *
+array_subscript_nice(PyArrayObject *self, PyObject *op)
+{
+ return PyArray_Return((PyArrayObject *)array_subscript(self, op));
+}
+
+
+static PyMappingMethods array_as_mapping = {
+ (inquiry)array_length, /*mp_length*/
+ (binaryfunc)array_subscript_nice, /*mp_subscript*/
+ (objobjargproc)array_ass_sub, /*mp_ass_subscript*/
+};
+
+/****************** End of Mapping Protocol ******************************/
+
+
+/*************************************************************************
+ **************** Implement Buffer Protocol ****************************
+ *************************************************************************/
+
+/* removed multiple segment interface */
+
+static int
+array_getsegcount(PyArrayObject *self, int *lenp)
+{
+ if (lenp)
+ *lenp = PyArray_NBYTES(self);
+
+ if (PyArray_ISONESEGMENT(self)) {
+ return 1;
+ }
+
+ if (lenp)
+ *lenp = 0;
+ return 0;
+}
+
+static int
+array_getreadbuf(PyArrayObject *self, int segment, void **ptrptr)
+{
+ if (segment != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "accessing non-existing array segment");
+ return -1;
+ }
+
+ if (PyArray_ISONESEGMENT(self)) {
+ *ptrptr = self->data;
+ return PyArray_NBYTES(self);
+ }
+ PyErr_SetString(PyExc_ValueError, "array is not a single segment");
+ *ptrptr = NULL;
+ return -1;
+}
+
+
+static int
+array_getwritebuf(PyArrayObject *self, int segment, void **ptrptr)
+{
+ if (PyArray_CHKFLAGS(self, WRITEABLE))
+ return array_getreadbuf(self, segment, (void **) ptrptr);
+ else {
+ PyErr_SetString(PyExc_ValueError, "array cannot be "\
+ "accessed as a writeable buffer");
+ return -1;
+ }
+}
+
+static int
+array_getcharbuf(PyArrayObject *self, int segment, const char **ptrptr)
+{
+ if (self->descr->type_num == PyArray_STRING || \
+ self->descr->type_num == PyArray_UNICODE)
+ return array_getreadbuf(self, segment, (void **) ptrptr);
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "non-character array cannot be interpreted "\
+ "as character buffer");
+ return -1;
+ }
+}
+
+static PyBufferProcs array_as_buffer = {
+ (getreadbufferproc)array_getreadbuf, /*bf_getreadbuffer*/
+ (getwritebufferproc)array_getwritebuf, /*bf_getwritebuffer*/
+ (getsegcountproc)array_getsegcount, /*bf_getsegcount*/
+ (getcharbufferproc)array_getcharbuf, /*bf_getcharbuffer*/
+};
+
+/****************** End of Buffer Protocol *******************************/
+
+
+/*************************************************************************
+ **************** Implement Number Protocol ****************************
+ *************************************************************************/
+
+
+typedef struct {
+ PyObject *add,
+ *subtract,
+ *multiply,
+ *divide,
+ *remainder,
+ *power,
+ *sqrt,
+ *negative,
+ *absolute,
+ *invert,
+ *left_shift,
+ *right_shift,
+ *bitwise_and,
+ *bitwise_xor,
+ *bitwise_or,
+ *less,
+ *less_equal,
+ *equal,
+ *not_equal,
+ *greater,
+ *greater_equal,
+ *floor_divide,
+ *true_divide,
+ *logical_or,
+ *logical_and,
+ *floor,
+ *ceil,
+ *maximum,
+ *minimum;
+
+} NumericOps;
+
+static NumericOps n_ops = {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL};
+
+/* Dictionary can contain any of the numeric operations, by name.
+ Those not present will not be changed
+ */
+
+#define SET(op) temp=PyDict_GetItemString(dict, #op); \
+ if (temp != NULL) { \
+ if (!(PyCallable_Check(temp))) return -1; \
+ Py_XDECREF(n_ops.op); \
+ n_ops.op = temp; \
+ }
+
+
+/*OBJECT_API
+ Set internal structure with number functions that all arrays will use
+*/
+int
+PyArray_SetNumericOps(PyObject *dict)
+{
+ PyObject *temp = NULL;
+ SET(add);
+ SET(subtract);
+ SET(multiply);
+ SET(divide);
+ SET(remainder);
+ SET(power);
+ SET(sqrt);
+ SET(negative);
+ SET(absolute);
+ SET(invert);
+ SET(left_shift);
+ SET(right_shift);
+ SET(bitwise_and);
+ SET(bitwise_or);
+ SET(bitwise_xor);
+ SET(less);
+ SET(less_equal);
+ SET(equal);
+ SET(not_equal);
+ SET(greater);
+ SET(greater_equal);
+ SET(floor_divide);
+ SET(true_divide);
+ SET(logical_or);
+ SET(logical_and);
+ SET(floor);
+ SET(ceil);
+ SET(maximum);
+ SET(minimum);
+ return 0;
+}
+
+#define GET(op) if (n_ops.op && \
+ (PyDict_SetItemString(dict, #op, n_ops.op)==-1)) \
+ goto fail;
+
+/*OBJECT_API
+ Get dictionary showing number functions that all arrays will use
+*/
+static PyObject *
+PyArray_GetNumericOps(void)
+{
+ PyObject *dict;
+ if ((dict = PyDict_New())==NULL)
+ return NULL;
+ GET(add);
+ GET(subtract);
+ GET(multiply);
+ GET(divide);
+ GET(remainder);
+ GET(power);
+ GET(sqrt);
+ GET(negative);
+ GET(absolute);
+ GET(invert);
+ GET(left_shift);
+ GET(right_shift);
+ GET(bitwise_and);
+ GET(bitwise_or);
+ GET(bitwise_xor);
+ GET(less);
+ GET(less_equal);
+ GET(equal);
+ GET(not_equal);
+ GET(greater);
+ GET(greater_equal);
+ GET(floor_divide);
+ GET(true_divide);
+ GET(logical_or);
+ GET(logical_and);
+ GET(floor);
+ GET(ceil);
+ GET(maximum);
+ GET(minimum);
+ return dict;
+
+ fail:
+ Py_DECREF(dict);
+ return NULL;
+}
+
+static PyObject *
+PyArray_GenericReduceFunction(PyArrayObject *m1, PyObject *op, int axis,
+ int rtype)
+{
+ PyObject *args, *ret=NULL, *meth;
+ if (op == NULL) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ if (rtype == PyArray_NOTYPE)
+ args = Py_BuildValue("(Oi)", m1, axis);
+ else {
+ PyArray_Descr *descr;
+ descr = PyArray_DescrFromType(rtype);
+ args = Py_BuildValue("(Oic)", m1, axis, descr->type);
+ Py_DECREF(descr);
+ }
+ meth = PyObject_GetAttrString(op, "reduce");
+ if (meth && PyCallable_Check(meth)) {
+ ret = PyObject_Call(meth, args, NULL);
+ }
+ Py_DECREF(args);
+ Py_DECREF(meth);
+ return ret;
+}
+
+
+static PyObject *
+PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis,
+ int rtype)
+{
+ PyObject *args, *ret=NULL, *meth;
+ if (op == NULL) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ if (rtype == PyArray_NOTYPE)
+ args = Py_BuildValue("(Oi)", m1, axis);
+ else {
+ PyArray_Descr *descr;
+ descr = PyArray_DescrFromType(rtype);
+ args = Py_BuildValue("(Oic)", m1, axis, descr->type);
+ Py_DECREF(descr);
+ }
+ meth = PyObject_GetAttrString(op, "accumulate");
+ if (meth && PyCallable_Check(meth)) {
+ ret = PyObject_Call(meth, args, NULL);
+ }
+ Py_DECREF(args);
+ Py_DECREF(meth);
+ return ret;
+}
+
+
+static PyObject *
+PyArray_GenericBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op)
+{
+ if (op == NULL) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ return PyObject_CallFunction(op, "OO", m1, m2);
+}
+
+static PyObject *
+PyArray_GenericUnaryFunction(PyArrayObject *m1, PyObject *op)
+{
+ if (op == NULL) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ return PyObject_CallFunction(op, "(O)", m1);
+}
+
+static PyObject *
+PyArray_GenericInplaceBinaryFunction(PyArrayObject *m1,
+ PyObject *m2, PyObject *op)
+{
+ if (op == NULL) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ return PyObject_CallFunction(op, "OOO", m1, m2, m1);
+}
+
+static PyObject *
+array_add(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.add);
+}
+
+static PyObject *
+array_subtract(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.subtract);
+}
+
+static PyObject *
+array_multiply(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.multiply);
+}
+
+static PyObject *
+array_divide(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.divide);
+}
+
+static PyObject *
+array_remainder(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.remainder);
+}
+
+static PyObject *
+array_power(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.power);
+}
+
+static PyObject *
+array_negative(PyArrayObject *m1)
+{
+ return PyArray_GenericUnaryFunction(m1, n_ops.negative);
+}
+
+static PyObject *
+array_absolute(PyArrayObject *m1)
+{
+ return PyArray_GenericUnaryFunction(m1, n_ops.absolute);
+}
+
+static PyObject *
+array_invert(PyArrayObject *m1)
+{
+ return PyArray_GenericUnaryFunction(m1, n_ops.invert);
+}
+
+static PyObject *
+array_left_shift(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.left_shift);
+}
+
+static PyObject *
+array_right_shift(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.right_shift);
+}
+
+static PyObject *
+array_bitwise_and(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.bitwise_and);
+}
+
+static PyObject *
+array_bitwise_or(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.bitwise_or);
+}
+
+static PyObject *
+array_bitwise_xor(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.bitwise_xor);
+}
+
+static PyObject *
+array_inplace_add(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.add);
+}
+
+static PyObject *
+array_inplace_subtract(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.subtract);
+}
+
+static PyObject *
+array_inplace_multiply(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.multiply);
+}
+
+static PyObject *
+array_inplace_divide(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.divide);
+}
+
+static PyObject *
+array_inplace_remainder(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.remainder);
+}
+
+static PyObject *
+array_inplace_power(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.power);
+}
+
+static PyObject *
+array_inplace_left_shift(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.left_shift);
+}
+
+static PyObject *
+array_inplace_right_shift(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.right_shift);
+}
+
+static PyObject *
+array_inplace_bitwise_and(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_and);
+}
+
+static PyObject *
+array_inplace_bitwise_or(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_or);
+}
+
+static PyObject *
+array_inplace_bitwise_xor(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_xor);
+}
+
+static PyObject *
+array_floor_divide(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.floor_divide);
+}
+
+static PyObject *
+array_true_divide(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.true_divide);
+}
+
+static PyObject *
+array_inplace_floor_divide(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2,
+ n_ops.floor_divide);
+}
+
+static PyObject *
+array_inplace_true_divide(PyArrayObject *m1, PyObject *m2)
+{
+ return PyArray_GenericInplaceBinaryFunction(m1, m2,
+ n_ops.true_divide);
+}
+
+/* Array evaluates as "TRUE" if any of the elements are non-zero*/
+static int
+array_any_nonzero(PyArrayObject *mp)
+{
+ intp index;
+ PyArrayIterObject *it;
+ Bool anyTRUE = FALSE;
+
+ it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp);
+ if (it==NULL) return anyTRUE;
+ index = it->size;
+ while(index--) {
+ if (mp->descr->f->nonzero(it->dataptr, mp)) {
+ anyTRUE = TRUE;
+ break;
+ }
+ PyArray_ITER_NEXT(it);
+ }
+ Py_DECREF(it);
+ return anyTRUE;
+}
+
+static int
+_array_nonzero(PyArrayObject *mp)
+{
+ intp n;
+ n = PyArray_SIZE(mp);
+ if (n == 1) {
+ return mp->descr->f->nonzero(mp->data, mp);
+ }
+ else if (n == 0) {
+ return 0;
+ }
+ else {
+ PyErr_SetString(PyExc_ValueError,
+ "The truth value of an array " \
+ "with more than one element is ambiguous. " \
+ "Use a.any() or a.all()");
+ return -1;
+ }
+}
+
+
+
+static PyObject *
+array_divmod(PyArrayObject *op1, PyObject *op2)
+{
+ PyObject *divp, *modp, *result;
+
+ divp = array_floor_divide(op1, op2);
+ if (divp == NULL) return NULL;
+ modp = array_remainder(op1, op2);
+ if (modp == NULL) {
+ Py_DECREF(divp);
+ return NULL;
+ }
+ result = Py_BuildValue("OO", divp, modp);
+ Py_DECREF(divp);
+ Py_DECREF(modp);
+ return result;
+}
+
+
+static PyObject *
+array_int(PyArrayObject *v)
+{
+ PyObject *pv, *pv2;
+ if (PyArray_SIZE(v) != 1) {
+ PyErr_SetString(PyExc_TypeError, "only length-1 arrays can be"\
+ " converted to Python scalars");
+ return NULL;
+ }
+ pv = v->descr->f->getitem(v->data, v);
+ if (pv == NULL) return NULL;
+ if (pv->ob_type->tp_as_number == 0) {
+ PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\
+ "scalar object is not a number");
+ Py_DECREF(pv);
+ return NULL;
+ }
+ if (pv->ob_type->tp_as_number->nb_int == 0) {
+ PyErr_SetString(PyExc_TypeError, "don't know how to convert "\
+ "scalar number to int");
+ Py_DECREF(pv);
+ return NULL;
+ }
+
+ pv2 = pv->ob_type->tp_as_number->nb_int(pv);
+ Py_DECREF(pv);
+ return pv2;
+}
+
+static PyObject *
+array_float(PyArrayObject *v)
+{
+ PyObject *pv, *pv2;
+ if (PyArray_SIZE(v) != 1) {
+ PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\
+ "be converted to Python scalars");
+ return NULL;
+ }
+ pv = v->descr->f->getitem(v->data, v);
+ if (pv == NULL) return NULL;
+ if (pv->ob_type->tp_as_number == 0) {
+ PyErr_SetString(PyExc_TypeError, "cannot convert to an "\
+ "int; scalar object is not a number");
+ Py_DECREF(pv);
+ return NULL;
+ }
+ if (pv->ob_type->tp_as_number->nb_float == 0) {
+ PyErr_SetString(PyExc_TypeError, "don't know how to convert "\
+ "scalar number to float");
+ Py_DECREF(pv);
+ return NULL;
+ }
+ pv2 = pv->ob_type->tp_as_number->nb_float(pv);
+ Py_DECREF(pv);
+ return pv2;
+}
+
+static PyObject *
+array_long(PyArrayObject *v)
+{
+ PyObject *pv, *pv2;
+ if (PyArray_SIZE(v) != 1) {
+ PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\
+ "be converted to Python scalars");
+ return NULL;
+ }
+ pv = v->descr->f->getitem(v->data, v);
+ if (pv->ob_type->tp_as_number == 0) {
+ PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\
+ "scalar object is not a number");
+ return NULL;
+ }
+ if (pv->ob_type->tp_as_number->nb_long == 0) {
+ PyErr_SetString(PyExc_TypeError, "don't know how to convert "\
+ "scalar number to long");
+ return NULL;
+ }
+ pv2 = pv->ob_type->tp_as_number->nb_long(pv);
+ Py_DECREF(pv);
+ return pv2;
+}
+
+static PyObject *
+array_oct(PyArrayObject *v)
+{
+ PyObject *pv, *pv2;
+ if (PyArray_SIZE(v) != 1) {
+ PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\
+ "be converted to Python scalars");
+ return NULL;
+ }
+ pv = v->descr->f->getitem(v->data, v);
+ if (pv->ob_type->tp_as_number == 0) {
+ PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\
+ "scalar object is not a number");
+ return NULL;
+ }
+ if (pv->ob_type->tp_as_number->nb_oct == 0) {
+ PyErr_SetString(PyExc_TypeError, "don't know how to convert "\
+ "scalar number to oct");
+ return NULL;
+ }
+ pv2 = pv->ob_type->tp_as_number->nb_oct(pv);
+ Py_DECREF(pv);
+ return pv2;
+}
+
+static PyObject *
+array_hex(PyArrayObject *v)
+{
+ PyObject *pv, *pv2;
+ if (PyArray_SIZE(v) != 1) {
+ PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\
+ "be converted to Python scalars");
+ return NULL;
+ }
+ pv = v->descr->f->getitem(v->data, v);
+ if (pv->ob_type->tp_as_number == 0) {
+ PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\
+ "scalar object is not a number");
+ return NULL;
+ }
+ if (pv->ob_type->tp_as_number->nb_hex == 0) {
+ PyErr_SetString(PyExc_TypeError, "don't know how to convert "\
+ "scalar number to hex");
+ return NULL;
+ }
+ pv2 = pv->ob_type->tp_as_number->nb_hex(pv);
+ Py_DECREF(pv);
+ return pv2;
+}
+
+static PyObject *
+_array_copy_nice(PyArrayObject *self)
+{
+ return PyArray_Return((PyArrayObject *) \
+ PyArray_Copy(self));
+}
+
+static PyNumberMethods array_as_number = {
+ (binaryfunc)array_add, /*nb_add*/
+ (binaryfunc)array_subtract, /*nb_subtract*/
+ (binaryfunc)array_multiply, /*nb_multiply*/
+ (binaryfunc)array_divide, /*nb_divide*/
+ (binaryfunc)array_remainder, /*nb_remainder*/
+ (binaryfunc)array_divmod, /*nb_divmod*/
+ (ternaryfunc)array_power, /*nb_power*/
+ (unaryfunc)array_negative, /*nb_neg*/
+ (unaryfunc)_array_copy_nice, /*nb_pos*/
+ (unaryfunc)array_absolute, /*(unaryfunc)array_abs,*/
+ (inquiry)_array_nonzero, /*nb_nonzero*/
+ (unaryfunc)array_invert, /*nb_invert*/
+ (binaryfunc)array_left_shift, /*nb_lshift*/
+ (binaryfunc)array_right_shift, /*nb_rshift*/
+ (binaryfunc)array_bitwise_and, /*nb_and*/
+ (binaryfunc)array_bitwise_xor, /*nb_xor*/
+ (binaryfunc)array_bitwise_or, /*nb_or*/
+ 0, /*nb_coerce*/
+ (unaryfunc)array_int, /*nb_int*/
+ (unaryfunc)array_long, /*nb_long*/
+ (unaryfunc)array_float, /*nb_float*/
+ (unaryfunc)array_oct, /*nb_oct*/
+ (unaryfunc)array_hex, /*nb_hex*/
+
+ /*This code adds augmented assignment functionality*/
+ /*that was made available in Python 2.0*/
+ (binaryfunc)array_inplace_add, /*inplace_add*/
+ (binaryfunc)array_inplace_subtract, /*inplace_subtract*/
+ (binaryfunc)array_inplace_multiply, /*inplace_multiply*/
+ (binaryfunc)array_inplace_divide, /*inplace_divide*/
+ (binaryfunc)array_inplace_remainder, /*inplace_remainder*/
+ (ternaryfunc)array_inplace_power, /*inplace_power*/
+ (binaryfunc)array_inplace_left_shift, /*inplace_lshift*/
+ (binaryfunc)array_inplace_right_shift, /*inplace_rshift*/
+ (binaryfunc)array_inplace_bitwise_and, /*inplace_and*/
+ (binaryfunc)array_inplace_bitwise_xor, /*inplace_xor*/
+ (binaryfunc)array_inplace_bitwise_or, /*inplace_or*/
+
+ (binaryfunc)array_floor_divide, /*nb_floor_divide*/
+ (binaryfunc)array_true_divide, /*nb_true_divide*/
+ (binaryfunc)array_inplace_floor_divide, /*nb_inplace_floor_divide*/
+ (binaryfunc)array_inplace_true_divide, /*nb_inplace_true_divide*/
+
+};
+
+/****************** End of Buffer Protocol *******************************/
+
+
+/*************************************************************************
+ **************** Implement Sequence Protocol **************************
+ *************************************************************************/
+
+/* Some of this is repeated in the array_as_mapping protocol. But
+ we fill it in here so that PySequence_XXXX calls work as expected
+*/
+
+
+static PyObject *
+array_slice(PyArrayObject *self, int ilow, int ihigh)
+{
+ PyArrayObject *r;
+ int l;
+ char *data;
+
+ if (self->nd == 0) {
+ PyErr_SetString(PyExc_ValueError, "cannot slice a scalar");
+ return NULL;
+ }
+
+ l=self->dimensions[0];
+ if (ihigh < 0) ihigh += l;
+ if (ilow < 0) ilow += l;
+ if (ilow < 0) ilow = 0;
+ else if (ilow > l) ilow = l;
+ if (ihigh < 0) ihigh = 0;
+ else if (ihigh > l) ihigh = l;
+ if (ihigh < ilow) ihigh = ilow;
+
+ if (ihigh != ilow) {
+ data = index2ptr(self, ilow);
+ if (data == NULL) return NULL;
+ } else {
+ data = self->data;
+ }
+
+ self->dimensions[0] = ihigh-ilow;
+ Py_INCREF(self->descr);
+ r = (PyArrayObject *) \
+ PyArray_NewFromDescr(self->ob_type, self->descr,
+ self->nd, self->dimensions,
+ self->strides, data,
+ self->flags, (PyObject *)self);
+
+ self->dimensions[0] = l;
+ r->base = (PyObject *)self;
+ Py_INCREF(self);
+ PyArray_UpdateFlags(r, UPDATE_ALL_FLAGS);
+ return (PyObject *)r;
+}
+
+
+static int
+array_ass_slice(PyArrayObject *self, int ilow, int ihigh, PyObject *v) {
+ int ret;
+ PyArrayObject *tmp;
+
+ if (v == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot delete array elements");
+ return -1;
+ }
+ if (!PyArray_ISWRITEABLE(self)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "array is not writeable");
+ return -1;
+ }
+ if ((tmp = (PyArrayObject *)array_slice(self, ilow, ihigh)) \
+ == NULL)
+ return -1;
+ ret = PyArray_CopyObject(tmp, v);
+ Py_DECREF(tmp);
+
+ return ret;
+}
+
+static int
+array_contains(PyArrayObject *self, PyObject *el)
+{
+ /* equivalent to (self == el).any() */
+
+ PyObject *res;
+ int ret;
+
+ res = PyArray_EnsureArray(PyObject_RichCompare((PyObject *)self, el, Py_EQ));
+ if (res == NULL) return -1;
+ ret = array_any_nonzero((PyArrayObject *)res);
+ Py_DECREF(res);
+ return ret;
+}
+
+
+static PySequenceMethods array_as_sequence = {
+ (inquiry)array_length, /*sq_length*/
+ (binaryfunc)NULL, /* sq_concat is handled by nb_add*/
+ (intargfunc)NULL, /* sq_repeat is handled nb_multiply*/
+ (intargfunc)array_item_nice, /*sq_item*/
+ (intintargfunc)array_slice, /*sq_slice*/
+ (intobjargproc)array_ass_item, /*sq_ass_item*/
+ (intintobjargproc)array_ass_slice, /*sq_ass_slice*/
+ (objobjproc) array_contains, /* sq_contains */
+ (binaryfunc) NULL, /* sg_inplace_concat */
+ (intargfunc) NULL /* sg_inplace_repeat */
+};
+
+
+/****************** End of Sequence Protocol ****************************/
+
+
+static int
+dump_data(char **string, int *n, int *max_n, char *data, int nd,
+ intp *dimensions, intp *strides, PyArrayObject* self)
+{
+ PyArray_Descr *descr=self->descr;
+ PyObject *op, *sp;
+ char *ostring;
+ int i, N;
+
+#define CHECK_MEMORY if (*n >= *max_n-16) { *max_n *= 2; \
+ *string = (char *)_pya_realloc(*string, *max_n); }
+
+ if (nd == 0) {
+
+ if ((op = descr->f->getitem(data, self)) == NULL) return -1;
+ sp = PyObject_Repr(op);
+ if (sp == NULL) {Py_DECREF(op); return -1;}
+ ostring = PyString_AsString(sp);
+ N = PyString_Size(sp)*sizeof(char);
+ *n += N;
+ CHECK_MEMORY
+ memmove(*string+(*n-N), ostring, N);
+ Py_DECREF(sp);
+ Py_DECREF(op);
+ return 0;
+ } else {
+ CHECK_MEMORY
+ (*string)[*n] = '[';
+ *n += 1;
+ for(i=0; i<dimensions[0]; i++) {
+ if (dump_data(string, n, max_n,
+ data+(*strides)*i,
+ nd-1, dimensions+1,
+ strides+1, self) < 0)
+ return -1;
+ CHECK_MEMORY
+ if (i<dimensions[0]-1) {
+ (*string)[*n] = ',';
+ (*string)[*n+1] = ' ';
+ *n += 2;
+ }
+ }
+ CHECK_MEMORY
+ (*string)[*n] = ']'; *n += 1;
+ return 0;
+ }
+
+#undef CHECK_MEMORY
+}
+
+static PyObject *
+array_repr_builtin(PyArrayObject *self)
+{
+ PyObject *ret;
+ char *string;
+ int n, max_n;
+
+ max_n = PyArray_NBYTES(self)*4*sizeof(char) + 7;
+
+ if ((string = (char *)_pya_malloc(max_n)) == NULL) {
+ PyErr_SetString(PyExc_MemoryError, "out of memory");
+ return NULL;
+ }
+
+ n = 6;
+ sprintf(string, "array(");
+
+ if (dump_data(&string, &n, &max_n, self->data,
+ self->nd, self->dimensions,
+ self->strides, self) < 0) {
+ _pya_free(string); return NULL;
+ }
+
+ if (PyArray_ISEXTENDED(self)) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%d", self->descr->elsize);
+ sprintf(string+n, ", '%c%s')", self->descr->type, buf);
+ ret = PyString_FromStringAndSize(string, n+6+strlen(buf));
+ }
+ else {
+ sprintf(string+n, ", '%c')", self->descr->type);
+ ret = PyString_FromStringAndSize(string, n+6);
+ }
+
+
+ _pya_free(string);
+ return ret;
+}
+
+static PyObject *PyArray_StrFunction=NULL;
+static PyObject *PyArray_ReprFunction=NULL;
+
+/*OBJECT_API
+ Set the array print function to be a Python function.
+*/
+static void
+PyArray_SetStringFunction(PyObject *op, int repr)
+{
+ if (repr) {
+ /* Dispose of previous callback */
+ Py_XDECREF(PyArray_ReprFunction);
+ /* Add a reference to new callback */
+ Py_XINCREF(op);
+ /* Remember new callback */
+ PyArray_ReprFunction = op;
+ } else {
+ /* Dispose of previous callback */
+ Py_XDECREF(PyArray_StrFunction);
+ /* Add a reference to new callback */
+ Py_XINCREF(op);
+ /* Remember new callback */
+ PyArray_StrFunction = op;
+ }
+}
+
+static PyObject *
+array_repr(PyArrayObject *self)
+{
+ PyObject *s, *arglist;
+
+ if (PyArray_ReprFunction == NULL) {
+ s = array_repr_builtin(self);
+ } else {
+ arglist = Py_BuildValue("(O)", self);
+ s = PyEval_CallObject(PyArray_ReprFunction, arglist);
+ Py_DECREF(arglist);
+ }
+ return s;
+}
+
+static PyObject *
+array_str(PyArrayObject *self)
+{
+ PyObject *s, *arglist;
+
+ if (PyArray_StrFunction == NULL) {
+ s = array_repr(self);
+ } else {
+ arglist = Py_BuildValue("(O)", self);
+ s = PyEval_CallObject(PyArray_StrFunction, arglist);
+ Py_DECREF(arglist);
+ }
+ return s;
+}
+
+static PyObject *
+array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
+{
+ PyObject *array_other, *result;
+
+ switch (cmp_op)
+ {
+ case Py_LT:
+ return PyArray_GenericBinaryFunction(self, other,
+ n_ops.less);
+ case Py_LE:
+ return PyArray_GenericBinaryFunction(self, other,
+ n_ops.less_equal);
+ case Py_EQ:
+ /* Try to convert other to an array */
+ array_other = PyArray_FromObject(other,
+ PyArray_NOTYPE, 0, 0);
+ /* If not successful, then return the integer
+ object 0. This fixes code that used to
+ allow equality comparisons between arrays
+ and other objects which would give a result
+ of 0
+ */
+ if ((array_other == NULL) || \
+ (array_other == Py_None)) {
+ Py_XDECREF(array_other);
+ PyErr_Clear();
+ Py_INCREF(Py_False);
+ return Py_False;
+ }
+ result = PyArray_GenericBinaryFunction(self,
+ array_other,
+ n_ops.equal);
+ /* If the comparison results in NULL, then the
+ two array objects can not be compared together so
+ return zero
+ */
+ Py_DECREF(array_other);
+ if (result == NULL) {
+ PyErr_Clear();
+ Py_INCREF(Py_False);
+ return Py_False;
+ }
+ return result;
+ case Py_NE:
+ /* Try to convert other to an array */
+ array_other = PyArray_FromObject(other,
+ PyArray_NOTYPE, 0, 0);
+ /* If not successful, then objects cannot be
+ compared and cannot be equal, therefore,
+ return True;
+ */
+ if ((array_other == NULL) || \
+ (array_other == Py_None)) {
+ Py_XDECREF(array_other);
+ PyErr_Clear();
+ Py_INCREF(Py_True);
+ return Py_True;
+ }
+ result = PyArray_GenericBinaryFunction(self,
+ array_other,
+ n_ops.not_equal);
+ Py_DECREF(array_other);
+ if (result == NULL) {
+ PyErr_Clear();
+ Py_INCREF(Py_True);
+ return Py_True;
+ }
+ return result;
+ case Py_GT:
+ return PyArray_GenericBinaryFunction(self, other,
+ n_ops.greater);
+ case Py_GE:
+ return PyArray_GenericBinaryFunction(self,
+ other,
+ n_ops.greater_equal);
+ }
+ return NULL;
+}
+
+static PyObject *
+_check_axis(PyArrayObject *arr, int *axis, int flags)
+{
+ PyObject *temp;
+ int n = arr->nd;
+
+ if ((*axis >= MAX_DIMS) || (n==0)) {
+ temp = PyArray_Ravel(arr,0);
+ *axis = 0;
+ return temp;
+ }
+ else {
+ if (flags) {
+ temp = PyArray_FromAny((PyObject *)arr, NULL,
+ 0, 0, flags);
+ if (temp == NULL) return NULL;
+ }
+ else {
+ Py_INCREF(arr);
+ temp = (PyObject *)arr;
+ }
+ }
+ if (*axis < 0) *axis += n;
+ if ((*axis < 0) || (*axis >= n)) {
+ PyErr_Format(PyExc_ValueError,
+ "axis(=%d) out of bounds", *axis);
+ Py_DECREF(temp);
+ return NULL;
+ }
+ return temp;
+}
+
+#include "arraymethods.c"
+
+/* Lifted from numarray */
+static PyObject *
+PyArray_IntTupleFromIntp(int len, intp *vals)
+{
+ int i;
+ PyObject *intTuple = PyTuple_New(len);
+ if (!intTuple) goto fail;
+ for(i=0; i<len; i++) {
+#if SIZEOF_INTP <= SIZEOF_LONG
+ PyObject *o = PyInt_FromLong((long) vals[i]);
+#else
+ PyObject *o = PyLong_FromLongLong((longlong) vals[i]);
+#endif
+ if (!o) {
+ Py_DECREF(intTuple);
+ intTuple = NULL;
+ goto fail;
+ }
+ PyTuple_SET_ITEM(intTuple, i, o);
+ }
+ fail:
+ return intTuple;
+}
+
+/* Returns the number of dimensions or -1 if an error occurred */
+/* vals must be large enough to hold maxvals */
+/*MULTIARRAY_API
+ PyArray_IntpFromSequence
+*/
+static int
+PyArray_IntpFromSequence(PyObject *seq, intp *vals, int maxvals)
+{
+ int nd, i;
+ PyObject *op;
+
+ /* Check to see if sequence is a single integer first.
+ or, can be made into one */
+ if ((nd=PySequence_Length(seq)) == -1) {
+ if (PyErr_Occurred()) PyErr_Clear();
+ if (!(op = PyNumber_Int(seq))) return -1;
+ nd = 1;
+ vals[0] = (intp ) PyInt_AsLong(op);
+ Py_DECREF(op);
+ } else {
+ for(i=0; i < MIN(nd,maxvals); i++) {
+ op = PySequence_GetItem(seq, i);
+ if (op == NULL) return -1;
+ vals[i]=(intp )PyInt_AsLong(op);
+ Py_DECREF(op);
+ if(PyErr_Occurred()) return -1;
+ }
+ }
+ return nd;
+}
+
+
+/* Check whether the given array is stored contiguously (row-wise) in
+ memory. */
+static int
+_IsContiguous(PyArrayObject *ap)
+{
+ intp sd;
+ int i;
+
+ if (ap->nd == 0) return 1;
+ sd = ap->descr->elsize;
+ if (ap->nd == 1) return sd == ap->strides[0];
+ for (i = ap->nd-1; i >= 0; --i) {
+ /* contiguous by definition */
+ if (ap->dimensions[i] == 0) return 1;
+
+ if (ap->strides[i] != sd) return 0;
+ sd *= ap->dimensions[i];
+ }
+ return 1;
+}
+
+
+static int
+_IsFortranContiguous(PyArrayObject *ap)
+{
+ intp sd;
+ int i;
+
+ if (ap->nd == 0) return 1;
+ sd = ap->descr->elsize;
+ if (ap->nd == 1) return sd == ap->strides[0];
+ for (i=0; i< ap->nd; ++i) {
+ /* contiguous by definition */
+ if (ap->dimensions[i] == 0) return 1;
+
+ if (ap->strides[i] != sd) return 0;
+ sd *= ap->dimensions[i];
+ }
+ return 1;
+}
+
+static int
+_IsAligned(PyArrayObject *ap)
+{
+ int i, alignment, aligned=1;
+ intp ptr;
+ int type = ap->descr->type_num;
+
+ if ((type == PyArray_STRING) || (type == PyArray_VOID))
+ return 1;
+
+ alignment = ap->descr->alignment;
+ if (alignment == 1) return 1;
+
+ ptr = (intp) ap->data;
+ aligned = (ptr % alignment) == 0;
+ for (i=0; i <ap->nd; i++)
+ aligned &= ((ap->strides[i] % alignment) == 0);
+ return aligned != 0;
+}
+
+static Bool
+_IsWriteable(PyArrayObject *ap)
+{
+ PyObject *base=ap->base;
+ void *dummy;
+ int n;
+
+ /* If we own our own data, then no-problem */
+ if ((base == NULL) || (ap->flags & OWN_DATA)) return TRUE;
+
+ /* Get to the final base object
+ If it is a writeable array, then return TRUE
+ If we can find an array object
+ or a writeable buffer object as the final base object
+ or a string object (for pickling support memory savings).
+ - this last could be removed if a proper pickleable
+ buffer was added to Python.
+ */
+
+ while(PyArray_Check(base)) {
+ if (PyArray_CHKFLAGS(base, OWN_DATA))
+ return (Bool) (PyArray_ISWRITEABLE(base));
+ base = PyArray_BASE(base);
+ }
+
+ /* here so pickle support works seamlessly
+ and unpickled array can be set and reset writeable
+ -- could be abused -- */
+ if PyString_Check(base) return TRUE;
+
+ if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0)
+ return FALSE;
+
+ return TRUE;
+}
+
+
+/*OBJECT_API
+ Update Several Flags at once.
+*/
+static void
+PyArray_UpdateFlags(PyArrayObject *ret, int flagmask)
+{
+
+ if (flagmask & FORTRAN) {
+ if (_IsFortranContiguous(ret)) {
+ ret->flags |= FORTRAN;
+ if (ret->nd > 1) ret->flags &= ~CONTIGUOUS;
+ }
+ else ret->flags &= ~FORTRAN;
+ }
+ if (flagmask & CONTIGUOUS) {
+ if (_IsContiguous(ret)) {
+ ret->flags |= CONTIGUOUS;
+ if (ret->nd > 1) ret->flags &= ~FORTRAN;
+ }
+ else ret->flags &= ~CONTIGUOUS;
+ }
+ if (flagmask & ALIGNED) {
+ if (_IsAligned(ret)) ret->flags |= ALIGNED;
+ else ret->flags &= ~ALIGNED;
+ }
+ /* This is not checked by default WRITEABLE is not part of UPDATE_ALL_FLAGS */
+ if (flagmask & WRITEABLE) {
+ if (_IsWriteable(ret)) ret->flags |= WRITEABLE;
+ else ret->flags &= ~WRITEABLE;
+ }
+ return;
+}
+
+/* This routine checks to see if newstrides (of length nd) will not
+ walk outside of the memory implied by a single segment array of the provided
+ dimensions and element size. If numbytes is 0 it will be calculated from
+ the provided shape and element size.
+*/
+/*OBJECT_API*/
+static Bool
+PyArray_CheckStrides(int elsize, int nd, intp numbytes,
+ intp *dims, intp *newstrides)
+{
+ int i;
+
+ if (numbytes == 0)
+ numbytes = PyArray_MultiplyList(dims, nd) * elsize;
+
+ for (i=0; i<nd; i++) {
+ if (newstrides[i]*(dims[i]-1)+elsize > numbytes) {
+ return FALSE;
+ }
+ }
+ return TRUE;
+
+}
+
+
+/* This is the main array creation routine. */
+
+/* Flags argument has multiple related meanings
+ depending on data and strides:
+
+ If data is given, then flags is flags associated with data.
+ If strides is not given, then a contiguous strides array will be created
+ and the CONTIGUOUS bit will be set. If the flags argument
+ has the FORTRAN bit set, then a FORTRAN-style strides array will be
+ created (and of course the FORTRAN flag bit will be set).
+
+ If data is not given but created here, then flags will be DEFAULT_FLAGS
+ and a non-zero flags argument can be used to indicate a FORTRAN style
+ array is desired.
+*/
+
+static intp
+_array_fill_strides(intp *strides, intp *dims, int nd, intp itemsize,
+ int inflag, int *objflags)
+{
+ int i;
+ /* Only make Fortran strides if not contiguous as well */
+ if ((inflag & FORTRAN) && !(inflag & CONTIGUOUS)) {
+ for (i=0; i<nd; i++) {
+ strides[i] = itemsize;
+ itemsize *= dims[i] ? dims[i] : 1;
+ }
+ *objflags |= FORTRAN;
+ if (nd > 1) *objflags &= ~CONTIGUOUS;
+ else *objflags |= CONTIGUOUS;
+ }
+ else {
+ for (i=nd-1;i>=0;i--) {
+ strides[i] = itemsize;
+ itemsize *= dims[i] ? dims[i] : 1;
+ }
+ *objflags |= CONTIGUOUS;
+ if (nd > 1) *objflags &= ~FORTRAN;
+ else *objflags |= FORTRAN;
+ }
+ return itemsize;
+}
+
+/*OBJECT_API
+ Generic new array creation routine.
+*/
+static PyObject *
+PyArray_New(PyTypeObject *subtype, int nd, intp *dims, int type_num,
+ intp *strides, void *data, int itemsize, int flags,
+ PyObject *obj)
+{
+ PyArray_Descr *descr;
+ PyObject *new;
+
+ descr = PyArray_DescrFromType(type_num);
+ if (descr == NULL) return NULL;
+ if (descr->elsize == 0) {
+ if (itemsize < 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "data type must provide an itemsize");
+ Py_DECREF(descr);
+ return NULL;
+ }
+ PyArray_DESCR_REPLACE(descr);
+ descr->elsize = itemsize;
+ }
+ new = PyArray_NewFromDescr(subtype, descr, nd, dims, strides,
+ data, flags, obj);
+ return new;
+}
+
+/* Change a sub-array field to the base descriptor */
+static int
+_update_descr_and_dimensions(PyArray_Descr **des, intp *newdims,
+ intp *newstrides, int oldnd)
+{
+ PyArray_Descr *old;
+ int newnd;
+ int numnew;
+ intp *mydim;
+ int i;
+
+ old = *des;
+ *des = old->subarray->base;
+
+ mydim = newdims + oldnd;
+ if (PyTuple_Check(old->subarray->shape)) {
+ numnew = PyTuple_GET_SIZE(old->subarray->shape);
+
+ for (i=0; i<numnew; i++) {
+ mydim[i] = (intp) PyInt_AsLong \
+ (PyTuple_GET_ITEM(old->subarray->shape, i));
+ }
+ }
+ else {
+ numnew = 1;
+ mydim[0] = (intp) PyInt_AsLong(old->subarray->shape);
+ }
+
+ newnd = oldnd + numnew;
+
+ if (newstrides) {
+ intp tempsize;
+ intp *mystrides;
+ mystrides = newstrides + oldnd;
+ /* Make new strides */
+ tempsize = (*des)->elsize;
+ for (i=numnew-1; i>=0; i--) {
+ mystrides[i] = tempsize;
+ tempsize *= mydim[i] ? mydim[i] : 1;
+ }
+ }
+ Py_INCREF(*des);
+ Py_DECREF(old);
+ return newnd;
+}
+
+
+/* steals a reference to descr (even on failure) */
+/*OBJECT_API
+ Generic new array creation routine.
+*/
+static PyObject *
+PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
+ intp *dims, intp *strides, void *data,
+ int flags, PyObject *obj)
+{
+ PyArrayObject *self;
+ register int i;
+ intp sd;
+
+ if (descr->subarray) {
+ PyObject *ret;
+ intp newdims[2*MAX_DIMS];
+ intp *newstrides=NULL;
+ memcpy(newdims, dims, nd*sizeof(intp));
+ if (strides) {
+ newstrides = newdims + MAX_DIMS;
+ memcpy(newstrides, strides, nd*sizeof(intp));
+ }
+ nd =_update_descr_and_dimensions(&descr, newdims,
+ newstrides, nd);
+ ret = PyArray_NewFromDescr(subtype, descr, nd, newdims,
+ newstrides,
+ data, flags, obj);
+ return ret;
+ }
+
+ if (nd < 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "number of dimensions must be >=0");
+ Py_DECREF(descr);
+ return NULL;
+ }
+ if (nd > MAX_DIMS) {
+ PyErr_Format(PyExc_ValueError,
+ "maximum number of dimensions is %d", MAX_DIMS);
+ Py_DECREF(descr);
+ return NULL;
+ }
+
+ /* Check dimensions */
+ for (i=nd-1;i>=0;i--) {
+ if (dims[i] < 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "negative dimensions " \
+ "are not allowed");
+ Py_DECREF(descr);
+ return NULL;
+ }
+ }
+
+ self = (PyArrayObject *) subtype->tp_alloc(subtype, 0);
+ if (self == NULL) {
+ Py_DECREF(descr);
+ return NULL;
+ }
+ self->dimensions = NULL;
+ if (data == NULL) { /* strides is NULL too */
+ self->flags = DEFAULT_FLAGS;
+ if (flags) {
+ self->flags |= FORTRAN;
+ if (nd > 1) self->flags &= ~CONTIGUOUS;
+ flags = FORTRAN;
+ }
+ }
+ else self->flags = (flags & ~UPDATEIFCOPY);
+
+ sd = descr->elsize;
+
+ if (nd > 0) {
+ self->dimensions = PyDimMem_NEW(2*nd);
+ if (self->dimensions == NULL) {
+ PyErr_NoMemory();
+ goto fail;
+ }
+ self->strides = self->dimensions + nd;
+ memcpy(self->dimensions, dims, sizeof(intp)*nd);
+ if (strides == NULL) { /* fill it in */
+ sd = _array_fill_strides(self->strides, dims, nd, sd,
+ flags, &(self->flags));
+ }
+ else {
+ if (data == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "if 'strides' is given in " \
+ "array creation, data must " \
+ "be given too");
+ PyDimMem_FREE(self->dimensions);
+ self->ob_type->tp_free((PyObject *)self);
+ return NULL;
+ }
+ memcpy(self->strides, strides, sizeof(intp)*nd);
+ }
+ }
+
+ self->descr = descr;
+
+
+ if (data == NULL) {
+
+ /* Allocate something even for zero-space arrays
+ e.g. shape=(0,) -- otherwise buffer exposure
+ (a.data) doesn't work as it should. */
+
+ if (sd==0) sd = sizeof(intp);
+
+ if ((data = PyDataMem_NEW(sd))==NULL) {
+ PyErr_NoMemory();
+ goto fail;
+ }
+ self->flags |= OWN_DATA;
+
+ /* It is bad to have unitialized OBJECT pointers */
+ if (descr == &OBJECT_Descr) {
+ memset(data, 0, sd);
+ }
+ }
+ else {
+ self->flags &= ~OWN_DATA; /* If data is passed in,
+ this object won't own it
+ by default.
+ Caller must arrange for
+ this to be reset if truly
+ desired */
+ }
+ self->data = data;
+ self->nd = nd;
+ self->base = (PyObject *)NULL;
+ self->weakreflist = (PyObject *)NULL;
+
+ /* call the __array_finalize__
+ method if a subtype and some object passed in */
+ if ((obj != NULL) && (subtype != &PyArray_Type) &&
+ (subtype != &PyBigArray_Type)) {
+ PyObject *res;
+ if (!(self->flags & OWNDATA)) { /* did not allocate own data */
+ /* update flags before calling back into
+ Python */
+ PyArray_UpdateFlags(self, UPDATE_ALL_FLAGS);
+ }
+ res = PyObject_CallMethod((PyObject *)self,
+ "__array_finalize__",
+ "O", obj);
+ if (res == NULL) {
+ if (self->flags & OWNDATA) PyDataMem_FREE(self);
+ PyDimMem_FREE(self->dimensions);
+ /* theoretically should free self
+ but this causes segmentation faults...
+ Not sure why */
+ return NULL;
+ }
+ else Py_DECREF(res);
+ }
+
+ return (PyObject *)self;
+
+ fail:
+ Py_DECREF(descr);
+ PyDimMem_FREE(self->dimensions);
+ subtype->tp_free((PyObject *)self);
+ return NULL;
+
+}
+
+
+
+/*OBJECT_API
+ Resize (reallocate data). Only works if nothing else is referencing
+ this array and it is contiguous.
+*/
+static PyObject *
+PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape)
+{
+ intp oldsize, newsize;
+ int new_nd=newshape->len, k, n, elsize;
+ int refcnt;
+ intp* new_dimensions=newshape->ptr;
+ intp new_strides[MAX_DIMS];
+ intp sd;
+ intp *dimptr;
+ char *new_data;
+
+ if (!PyArray_ISCONTIGUOUS(self)) {
+ PyErr_SetString(PyExc_ValueError,
+ "resize only works on contiguous arrays");
+ return NULL;
+ }
+
+
+ newsize = PyArray_MultiplyList(new_dimensions, new_nd);
+
+ if (newsize == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "newsize is zero; cannot delete an array "\
+ "in this way");
+ return NULL;
+ }
+ oldsize = PyArray_SIZE(self);
+
+ if (oldsize != newsize) {
+ if (!(self->flags & OWN_DATA)) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot resize this array: " \
+ "it does not own its data");
+ return NULL;
+ }
+
+ refcnt = REFCOUNT(self);
+ if ((refcnt > 2) || (self->base != NULL) || \
+ (self->weakreflist != NULL)) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot resize an array that has "\
+ "been referenced or is referencing\n"\
+ "another array in this way. Use the "\
+ "resize function");
+ return NULL;
+ }
+
+ /* Reallocate space if needed */
+ new_data = PyDataMem_RENEW(self->data,
+ newsize*(self->descr->elsize));
+ if (new_data == NULL) {
+ PyErr_SetString(PyExc_MemoryError,
+ "cannot allocate memory for array");
+ return NULL;
+ }
+ self->data = new_data;
+ }
+
+ if ((newsize > oldsize) && PyArray_ISWRITEABLE(self)) {
+ /* Fill new memory with zeros */
+ elsize = self->descr->elsize;
+ if ((PyArray_TYPE(self) == PyArray_OBJECT)) {
+ PyObject *zero = PyInt_FromLong(0);
+ PyObject **optr;
+ optr = ((PyObject **)self->data) + oldsize;
+ n = newsize - oldsize;
+ for (k=0; k<n; k++) {
+ Py_INCREF(zero);
+ *optr++ = zero;
+ }
+ Py_DECREF(zero);
+ }
+ else{
+ memset(self->data+oldsize*elsize, 0,
+ (newsize-oldsize)*elsize);
+ }
+ }
+
+ if (self->nd != new_nd) { /* Different number of dimensions. */
+ self->nd = new_nd;
+
+ /* Need new dimensions and strides arrays */
+ dimptr = PyDimMem_RENEW(self->dimensions, 2*new_nd);
+ if (dimptr == NULL) {
+ PyErr_SetString(PyExc_MemoryError,
+ "cannot allocate memory for array " \
+ "(array may be corrupted)");
+ return NULL;
+ }
+ self->dimensions = dimptr;
+ self->strides = dimptr + new_nd;
+ }
+
+ /* make new_strides variable */
+ sd = (intp) self->descr->elsize;
+ sd = _array_fill_strides(new_strides, new_dimensions, new_nd, sd,
+ 0, &(self->flags));
+
+
+ memmove(self->dimensions, new_dimensions, new_nd*sizeof(intp));
+ memmove(self->strides, new_strides, new_nd*sizeof(intp));
+
+ Py_INCREF(Py_None);
+ return Py_None;
+
+}
+
+
+/* Assumes contiguous */
+/*OBJECT_API*/
+static void
+PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj)
+{
+ PyObject **optr;
+ intp i,n;
+ optr = (PyObject **)(arr->data);
+ n = PyArray_SIZE(arr);
+ if (obj == NULL) {
+ for (i=0; i<n; i++) {
+ *optr++ = NULL;
+ }
+ }
+ else {
+ for (i=0; i<n; i++) {
+ Py_INCREF(obj);
+ *optr++ = obj;
+ }
+ }
+}
+
+/*OBJECT_API*/
+static int
+PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
+{
+ PyObject *newarr;
+ int itemsize, swap;
+ void *fromptr;
+ PyArray_Descr *descr;
+ intp size;
+ PyArray_CopySwapFunc *copyswap;
+
+ descr = PyArray_DESCR(arr);
+ itemsize = descr->elsize;
+ Py_INCREF(descr);
+ newarr = PyArray_FromAny(obj, descr, 0,0, ALIGNED);
+ if (newarr == NULL) return -1;
+ fromptr = PyArray_DATA(newarr);
+ size=PyArray_SIZE(arr);
+ swap=!PyArray_ISNOTSWAPPED(arr);
+ copyswap = arr->descr->f->copyswap;
+ if (PyArray_ISONESEGMENT(arr)) {
+ char *toptr=PyArray_DATA(arr);
+ while (size--) {
+ copyswap(toptr, fromptr, swap, itemsize);
+ toptr += itemsize;
+ }
+ }
+ else {
+ PyArrayIterObject *iter;
+
+ iter = (PyArrayIterObject *)\
+ PyArray_IterNew((PyObject *)arr);
+ if (iter == NULL) {
+ Py_DECREF(newarr);
+ return -1;
+ }
+ while(size--) {
+ copyswap(iter->dataptr, fromptr, swap, itemsize);
+ PyArray_ITER_NEXT(iter);
+ }
+ Py_DECREF(iter);
+ }
+ Py_DECREF(newarr);
+ return 0;
+}
+
+static PyObject *
+array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"shape", "dtype", "buffer",
+ "offset", "strides",
+ "fortran", NULL};
+ PyArray_Descr *descr=NULL;
+ int type_num;
+ int itemsize;
+ PyArray_Dims dims = {NULL, 0};
+ PyArray_Dims strides = {NULL, 0};
+ PyArray_Chunk buffer;
+ longlong offset=0;
+ int fortran = 0;
+ PyArrayObject *ret;
+
+ buffer.ptr = NULL;
+ /* Usually called with shape and type
+ but can also be called with buffer, strides, and swapped info
+ */
+
+ /* For now, let's just use this to create an empty, contiguous
+ array of a specific type and shape.
+ */
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&LO&i",
+ kwlist, PyArray_IntpConverter,
+ &dims,
+ PyArray_DescrConverter,
+ &descr,
+ PyArray_BufferConverter,
+ &buffer,
+ &offset,
+ &PyArray_IntpConverter,
+ &strides,
+ &fortran))
+ goto fail;
+
+ type_num = descr->type_num;
+ itemsize = descr->elsize;
+
+ if (dims.ptr == NULL) {
+ PyErr_SetString(PyExc_ValueError, "need to give a "\
+ "valid shape as the first argument");
+ goto fail;
+ }
+ if (buffer.ptr == NULL) {
+ ret = (PyArrayObject *)\
+ PyArray_NewFromDescr(subtype, descr,
+ (int)dims.len,
+ dims.ptr,
+ NULL, NULL, fortran, NULL);
+ if (ret == NULL) {descr=NULL;goto fail;}
+ if (type_num == PyArray_OBJECT) { /* place Py_None */
+ PyArray_FillObjectArray(ret, Py_None);
+ }
+ }
+ else { /* buffer given -- use it */
+ buffer.len -= offset;
+ buffer.ptr += offset;
+ if (dims.len == 1 && dims.ptr[0] == -1) {
+ dims.ptr[0] = buffer.len / itemsize;
+ }
+ else if (buffer.len < itemsize* \
+ PyArray_MultiplyList(dims.ptr, dims.len)) {
+ PyErr_SetString(PyExc_TypeError,
+ "buffer is too small for " \
+ "requested array");
+ goto fail;
+ }
+ if (strides.ptr != NULL) {
+ if (strides.len != dims.len) {
+ PyErr_SetString(PyExc_ValueError,
+ "strides, if given, must be "\
+ "the same length as shape");
+ goto fail;
+ }
+ if (!PyArray_CheckStrides(itemsize, strides.len,
+ buffer.len,
+ dims.ptr, strides.ptr)) {
+ PyErr_SetString(PyExc_ValueError,
+ "strides is incompatible "\
+ "with shape of requested"\
+ "array and size of buffer");
+ goto fail;
+ }
+ }
+ if (type_num == PyArray_OBJECT) {
+ PyErr_SetString(PyExc_TypeError, "cannot construct "\
+ "an object array from buffer data");
+ goto fail;
+ }
+ /* get writeable and aligned */
+ if (fortran) buffer.flags |= FORTRAN;
+ ret = (PyArrayObject *)\
+ PyArray_NewFromDescr(subtype, descr,
+ dims.len, dims.ptr,
+ strides.ptr,
+ (char *)buffer.ptr,
+ buffer.flags, NULL);
+ if (ret == NULL) {descr=NULL; goto fail;}
+ PyArray_UpdateFlags(ret, UPDATE_ALL_FLAGS);
+ ret->base = buffer.base;
+ Py_INCREF(buffer.base);
+ }
+
+ PyDimMem_FREE(dims.ptr);
+ if (strides.ptr) PyDimMem_FREE(strides.ptr);
+ return (PyObject *)ret;
+
+ fail:
+ Py_XDECREF(descr);
+ if (dims.ptr) PyDimMem_FREE(dims.ptr);
+ if (strides.ptr) PyDimMem_FREE(strides.ptr);
+ return NULL;
+}
+
+
+static PyObject *
+array_iter(PyArrayObject *arr)
+{
+ if (arr->nd == 0) {
+ PyErr_SetString(PyExc_TypeError,
+ "iteration over a scalar (0-dim array)");
+ return NULL;
+ }
+ return PySeqIter_New((PyObject *)arr);
+}
+
+
+/******************* array attribute get and set routines ******************/
+
+static PyObject *
+array_ndim_get(PyArrayObject *self)
+{
+ return PyInt_FromLong(self->nd);
+}
+
+static PyObject *
+array_flags_get(PyArrayObject *self)
+{
+ return PyObject_CallMethod(_scipy_internal, "flagsobj", "Oii",
+ self, self->flags, 0);
+}
+
+static PyObject *
+array_shape_get(PyArrayObject *self)
+{
+ return PyArray_IntTupleFromIntp(self->nd, self->dimensions);
+}
+
+
+static int
+array_shape_set(PyArrayObject *self, PyObject *val)
+{
+ int nd;
+ PyObject *ret;
+
+ ret = PyArray_Reshape(self, val);
+ if (ret == NULL) return -1;
+
+ /* Free old dimensions and strides */
+ PyDimMem_FREE(self->dimensions);
+ nd = PyArray_NDIM(ret);
+ self->nd = nd;
+ if (nd > 0) { /* create new dimensions and strides */
+ self->dimensions = PyDimMem_NEW(2*nd);
+ if (self->dimensions == NULL) {
+ Py_DECREF(ret);
+ PyErr_SetString(PyExc_MemoryError,"");
+ return -1;
+ }
+ self->strides = self->dimensions + nd;
+ memcpy(self->dimensions, PyArray_DIMS(ret),
+ nd*sizeof(intp));
+ memcpy(self->strides, PyArray_STRIDES(ret),
+ nd*sizeof(intp));
+ }
+ else {self->dimensions=NULL; self->strides=NULL;}
+ Py_DECREF(ret);
+ PyArray_UpdateFlags(self, CONTIGUOUS | FORTRAN);
+ return 0;
+}
+
+
+static PyObject *
+array_strides_get(PyArrayObject *self)
+{
+ return PyArray_IntTupleFromIntp(self->nd, self->strides);
+}
+
+static int
+array_strides_set(PyArrayObject *self, PyObject *obj)
+{
+ PyArray_Dims newstrides = {NULL, 0};
+ PyArrayObject *new;
+ intp numbytes;
+
+ if (!PyArray_IntpConverter(obj, &newstrides) || \
+ newstrides.ptr == NULL) {
+ PyErr_SetString(PyExc_TypeError, "invalid strides");
+ return -1;
+ }
+ if (newstrides.len != self->nd) {
+ PyErr_Format(PyExc_ValueError, "strides must be " \
+ " same length as shape (%d)", self->nd);
+ goto fail;
+ }
+ new = self;
+ while(new->base != NULL) {
+ if (PyArray_Check(new->base))
+ new = (PyArrayObject *)new->base;
+ }
+ numbytes = PyArray_MultiplyList(new->dimensions,
+ new->nd)*new->descr->elsize;
+
+ if (!PyArray_CheckStrides(self->descr->elsize, self->nd, numbytes,
+ self->dimensions, newstrides.ptr)) {
+ PyErr_SetString(PyExc_ValueError, "strides is not "\
+ "compatible with available memory");
+ goto fail;
+ }
+ memcpy(self->strides, newstrides.ptr, sizeof(intp)*newstrides.len);
+ PyArray_UpdateFlags(self, CONTIGUOUS | FORTRAN);
+ PyDimMem_FREE(newstrides.ptr);
+ return 0;
+
+ fail:
+ PyDimMem_FREE(newstrides.ptr);
+ return -1;
+}
+
+
+static PyObject *
+array_protocol_strides_get(PyArrayObject *self)
+{
+ if PyArray_ISCONTIGUOUS(self) {
+ Py_INCREF(Py_None);
+ return Py_None;
+ }
+ return PyArray_IntTupleFromIntp(self->nd, self->strides);
+}
+
+static PyObject *
+array_priority_get(PyArrayObject *self)
+{
+ if (PyArray_CheckExact(self))
+ return PyFloat_FromDouble(PyArray_PRIORITY);
+ else if (PyBigArray_CheckExact(self))
+ return PyFloat_FromDouble(PyArray_BIG_PRIORITY);
+ else
+ return PyFloat_FromDouble(PyArray_SUBTYPE_PRIORITY);
+}
+
+
+static PyObject *
+array_dataptr_get(PyArrayObject *self)
+{
+ return Py_BuildValue("NO",
+ PyString_FromFormat("%p", self->data),
+ (self->flags & WRITEABLE ? Py_False :
+ Py_True));
+}
+
+static PyObject *
+array_data_get(PyArrayObject *self)
+{
+ intp nbytes;
+ if (!(PyArray_ISONESEGMENT(self))) {
+ PyErr_SetString(PyExc_AttributeError, "cannot get single-"\
+ "segment buffer for discontiguous array");
+ return NULL;
+ }
+ nbytes = PyArray_NBYTES(self);
+ if PyArray_ISWRITEABLE(self)
+ return PyBuffer_FromReadWriteObject((PyObject *)self, 0,
+ (int) nbytes);
+ else
+ return PyBuffer_FromObject((PyObject *)self, 0, (int) nbytes);
+}
+
+static int
+array_data_set(PyArrayObject *self, PyObject *op)
+{
+ void *buf;
+ int buf_len;
+ int writeable=1;
+
+ if (PyObject_AsWriteBuffer(op, &buf, &buf_len) < 0) {
+ writeable = 0;
+ if (PyObject_AsReadBuffer(op, (const void **)&buf,
+ &buf_len) < 0) {
+ PyErr_SetString(PyExc_AttributeError,
+ "object does not have single-segment " \
+ "buffer interface");
+ return -1;
+ }
+ }
+ if (!PyArray_ISONESEGMENT(self)) {
+ PyErr_SetString(PyExc_AttributeError, "cannot set single-" \
+ "segment buffer for discontiguous array");
+ return -1;
+ }
+ if (PyArray_NBYTES(self) > buf_len) {
+ PyErr_SetString(PyExc_AttributeError,
+ "not enough data for array");
+ return -1;
+ }
+ if (self->flags & OWN_DATA) {
+ PyArray_XDECREF(self);
+ PyDataMem_FREE(self->data);
+ }
+ if (self->base) {
+ if (self->flags & UPDATEIFCOPY) {
+ ((PyArrayObject *)self->base)->flags |= WRITEABLE;
+ self->flags &= ~UPDATEIFCOPY;
+ }
+ Py_DECREF(self->base);
+ }
+ Py_INCREF(op);
+ self->base = op;
+ self->data = buf;
+ self->flags = CARRAY_FLAGS;
+ if (!writeable)
+ self->flags &= ~WRITEABLE;
+ return 0;
+}
+
+
+static PyObject *
+array_itemsize_get(PyArrayObject *self)
+{
+ return PyInt_FromLong((long) self->descr->elsize);
+}
+
+static PyObject *
+array_size_get(PyArrayObject *self)
+{
+ intp size=PyArray_SIZE(self);
+#if SIZEOF_INTP <= SIZEOF_LONG
+ return PyInt_FromLong((long) size);
+#else
+ if (size > MAX_LONG || size < MIN_LONG)
+ return PyLong_FromLongLong(size);
+ else
+ return PyInt_FromLong((long) size);
+#endif
+}
+
+static PyObject *
+array_nbytes_get(PyArrayObject *self)
+{
+ intp nbytes = PyArray_NBYTES(self);
+#if SIZEOF_INTP <= SIZEOF_LONG
+ return PyInt_FromLong((long) nbytes);
+#else
+ if (nbytes > MAX_LONG || nbytes < MIN_LONG)
+ return PyLong_FromLongLong(nbytes);
+ else
+ return PyInt_FromLong((long) nbytes);
+#endif
+}
+
+
+static PyObject *
+array_typechar_get(PyArrayObject *self)
+{
+ if PyArray_ISEXTENDED(self)
+ return PyString_FromFormat("%c%d", (self->descr->type),
+ self->descr->elsize);
+ else
+ return PyString_FromStringAndSize(&(self->descr->type), 1);
+}
+
+static PyObject *arraydescr_protocol_typestr_get(PyArray_Descr *);
+
+static PyObject *
+array_typestr_get(PyArrayObject *self)
+{
+ return arraydescr_protocol_typestr_get(self->descr);
+}
+
+static PyObject *
+array_descr_get(PyArrayObject *self)
+{
+ Py_INCREF(self->descr);
+ return (PyObject *)self->descr;
+}
+
+
+/* If the type is changed.
+ Also needing change: strides, itemsize
+
+ Either itemsize is exactly the same
+ or the array is single-segment (contiguous or fortran) with
+ compatibile dimensions
+
+ The shape and strides will be adjusted in that case as well.
+*/
+
+static int
+array_descr_set(PyArrayObject *self, PyObject *arg)
+{
+ PyArray_Descr *newtype=NULL;
+ intp newdim;
+ int index;
+ char *msg = "new type not compatible with array.";
+
+ if (!(PyArray_DescrConverter(arg, &newtype)) ||
+ newtype == NULL) {
+ PyErr_SetString(PyExc_TypeError, "invalid type for array");
+ return -1;
+ }
+ if (newtype->type_num == PyArray_OBJECT || \
+ self->descr->type_num == PyArray_OBJECT) {
+ PyErr_SetString(PyExc_TypeError, \
+ "Cannot change descriptor for object"\
+ "array.");
+ Py_DECREF(newtype);
+ return -1;
+ }
+
+ if ((newtype->elsize != self->descr->elsize) && \
+ (self->nd == 0 || !PyArray_ISONESEGMENT(self) || \
+ newtype->subarray)) goto fail;
+
+ if (PyArray_ISCONTIGUOUS(self)) index = self->nd - 1;
+ else index = 0;
+
+ if (newtype->elsize < self->descr->elsize) {
+ /* if it is compatible increase the size of the
+ dimension at end (or at the front for FORTRAN)
+ */
+ if (self->descr->elsize % newtype->elsize != 0)
+ goto fail;
+ newdim = self->descr->elsize / newtype->elsize;
+ self->dimensions[index] *= newdim;
+ self->strides[index] = newtype->elsize;
+ }
+
+ else if (newtype->elsize > self->descr->elsize) {
+
+ /* Determine if last (or first if FORTRAN) dimension
+ is compatible */
+
+ newdim = self->dimensions[index] * self->descr->elsize;
+ if ((newdim % newtype->elsize) != 0) goto fail;
+
+ self->dimensions[index] = newdim / newtype->elsize;
+ self->strides[index] = newtype->elsize;
+ }
+
+ /* fall through -- adjust type*/
+
+ Py_DECREF(self->descr);
+ if (newtype->subarray) {
+ /* create new array object from data and update
+ dimensions, strides and descr from it */
+ PyArrayObject *temp;
+
+ temp = (PyArrayObject *)\
+ PyArray_NewFromDescr(&PyArray_Type, newtype, self->nd,
+ self->dimensions, self->strides,
+ self->data, self->flags, NULL);
+ PyDimMem_FREE(self->dimensions);
+ self->dimensions = temp->dimensions;
+ self->nd = temp->nd;
+ self->strides = temp->strides;
+ Py_DECREF(newtype);
+ newtype = temp->descr;
+ /* Fool deallocator */
+ temp->nd = 0;
+ temp->dimensions = NULL;
+ temp->descr = NULL;
+ Py_DECREF(temp);
+ }
+
+ self->descr = newtype;
+ PyArray_UpdateFlags(self, UPDATE_ALL_FLAGS);
+
+ return 0;
+
+ fail:
+ PyErr_SetString(PyExc_ValueError, msg);
+ Py_DECREF(newtype);
+ return -1;
+}
+
+static PyObject *
+array_protocol_descr_get(PyArrayObject *self)
+{
+ PyObject *res;
+ PyObject *dobj;
+
+ res = PyObject_GetAttrString((PyObject *)self->descr, "arrdescr");
+ if (res) return res;
+ PyErr_Clear();
+
+ /* get default */
+ dobj = PyTuple_New(2);
+ if (dobj == NULL) return NULL;
+ PyTuple_SET_ITEM(dobj, 0, PyString_FromString(""));
+ PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self));
+ res = PyList_New(1);
+ if (res == NULL) {Py_DECREF(dobj); return NULL;}
+ PyList_SET_ITEM(res, 0, dobj);
+ return res;
+}
+
+static PyObject *
+array_struct_get(PyArrayObject *self)
+{
+ PyArrayInterface *inter;
+
+ inter = (PyArrayInterface *)_pya_malloc(sizeof(PyArrayInterface));
+ inter->version = 2;
+ inter->nd = self->nd;
+ inter->typekind = self->descr->kind;
+ inter->itemsize = self->descr->elsize;
+ inter->flags = self->flags;
+ /* reset unused flags */
+ inter->flags &= ~(UPDATEIFCOPY | OWNDATA);
+ if (PyArray_ISNOTSWAPPED(self)) inter->flags |= NOTSWAPPED;
+ inter->strides = self->strides;
+ inter->shape = self->dimensions;
+ inter->data = self->data;
+ Py_INCREF(self);
+ return PyCObject_FromVoidPtrAndDesc(inter, self, gentype_struct_free);
+}
+
+static PyObject *
+array_type_get(PyArrayObject *self)
+{
+ Py_INCREF(self->descr->typeobj);
+ return (PyObject *)self->descr->typeobj;
+}
+
+
+
+static PyObject *
+array_base_get(PyArrayObject *self)
+{
+ if (self->base == NULL) {
+ Py_INCREF(Py_None);
+ return Py_None;
+ }
+ else {
+ Py_INCREF(self->base);
+ return self->base;
+ }
+}
+
+
+static PyObject *
+array_real_get(PyArrayObject *self)
+{
+ PyArrayObject *ret;
+
+ if (PyArray_ISCOMPLEX(self)) {
+ ret = (PyArrayObject *)PyArray_New(self->ob_type,
+ self->nd,
+ self->dimensions,
+ self->descr->type_num - \
+ PyArray_NUM_FLOATTYPE,
+ self->strides,
+ self->data,
+ 0,
+ self->flags, (PyObject *)self);
+ if (ret == NULL) return NULL;
+ ret->flags &= ~CONTIGUOUS;
+ ret->flags &= ~FORTRAN;
+ Py_INCREF(self);
+ ret->base = (PyObject *)self;
+ return (PyObject *)ret;
+ }
+ else {
+ Py_INCREF(self);
+ return (PyObject *)self;
+ }
+}
+
+
+static int
+array_real_set(PyArrayObject *self, PyObject *val)
+{
+ PyArrayObject *ret;
+ PyArrayObject *new;
+ int rint;
+
+ new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0);
+ if (new == NULL) return -1;
+
+ if (PyArray_ISCOMPLEX(self)) {
+ ret = (PyArrayObject *)PyArray_New(self->ob_type,
+ self->nd,
+ self->dimensions,
+ self->descr->type_num - \
+ PyArray_NUM_FLOATTYPE,
+ self->strides,
+ self->data,
+ 0,
+ self->flags, (PyObject *)self);
+ if (ret == NULL) {Py_DECREF(new); return -1;}
+ ret->flags &= ~CONTIGUOUS;
+ ret->flags &= ~FORTRAN;
+ Py_INCREF(self);
+ ret->base = (PyObject *)self;
+ }
+ else {
+ Py_INCREF(self);
+ ret = self;
+ }
+ rint = PyArray_CopyInto(ret, new);
+ Py_DECREF(ret);
+ Py_DECREF(new);
+ return rint;
+}
+
+static PyObject *
+array_imag_get(PyArrayObject *self)
+{
+ PyArrayObject *ret;
+ PyArray_Descr *type;
+
+ if (PyArray_ISCOMPLEX(self)) {
+ type = PyArray_DescrFromType(self->descr->type_num -
+ PyArray_NUM_FLOATTYPE);
+ ret = (PyArrayObject *) \
+ PyArray_NewFromDescr(self->ob_type,
+ type,
+ self->nd,
+ self->dimensions,
+ self->strides,
+ self->data + type->elsize,
+ self->flags, (PyObject *)self);
+ if (ret == NULL) return NULL;
+ ret->flags &= ~CONTIGUOUS;
+ ret->flags &= ~FORTRAN;
+ Py_INCREF(self);
+ ret->base = (PyObject *)self;
+ return (PyObject *) ret;
+ }
+ else {
+ type = self->descr;
+ Py_INCREF(type);
+ ret = (PyArrayObject *)PyArray_Zeros(self->nd,
+ self->dimensions,
+ type,
+ PyArray_ISFORTRAN(self));
+ ret->flags &= ~WRITEABLE;
+ return (PyObject *)ret;
+ }
+}
+
+static int
+array_imag_set(PyArrayObject *self, PyObject *val)
+{
+ if (PyArray_ISCOMPLEX(self)) {
+ PyArrayObject *ret;
+ PyArrayObject *new;
+ int rint;
+
+ new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0);
+ if (new == NULL) return -1;
+ ret = (PyArrayObject *)PyArray_New(self->ob_type,
+ self->nd,
+ self->dimensions,
+ self->descr->type_num - \
+ PyArray_NUM_FLOATTYPE,
+ self->strides,
+ self->data + \
+ (self->descr->elsize >> 1),
+ 0,
+ self->flags, (PyObject *)self);
+ if (ret == NULL) {
+ Py_DECREF(new);
+ return -1;
+ }
+ ret->flags &= ~CONTIGUOUS;
+ ret->flags &= ~FORTRAN;
+ Py_INCREF(self);
+ ret->base = (PyObject *)self;
+ rint = PyArray_CopyInto(ret, new);
+ Py_DECREF(ret);
+ Py_DECREF(new);
+ return rint;
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError, "does not have imaginary " \
+ "part to set");
+ return -1;
+ }
+}
+
+static PyObject *
+array_flat_get(PyArrayObject *self)
+{
+ return PyArray_IterNew((PyObject *)self);
+}
+
+static int
+array_flat_set(PyArrayObject *self, PyObject *val)
+{
+ PyObject *arr=NULL;
+ int retval = -1;
+ PyArrayIterObject *selfit=NULL, *arrit=NULL;
+ PyArray_Descr *typecode;
+ int swap;
+ PyArray_CopySwapFunc *copyswap;
+
+ typecode = self->descr;
+ Py_INCREF(typecode);
+ arr = PyArray_FromAny(val, typecode,
+ 0, 0, FORCECAST | FORTRAN_IF(self));
+ if (arr == NULL) return -1;
+ arrit = (PyArrayIterObject *)PyArray_IterNew(arr);
+ if (arrit == NULL) goto exit;
+ selfit = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self);
+ if (selfit == NULL) goto exit;
+
+ swap = PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(arr);
+ copyswap = self->descr->f->copyswap;
+ if (PyArray_ISOBJECT(self)) {
+ while(selfit->index < selfit->size) {
+ Py_XDECREF(*((PyObject **)selfit->dataptr));
+ Py_INCREF(*((PyObject **)arrit->dataptr));
+ memmove(selfit->dataptr, arrit->dataptr,
+ sizeof(PyObject *));
+ PyArray_ITER_NEXT(selfit);
+ PyArray_ITER_NEXT(arrit);
+ if (arrit->index == arrit->size)
+ PyArray_ITER_RESET(arrit);
+ }
+ retval = 0;
+ goto exit;
+ }
+
+ while(selfit->index < selfit->size) {
+ memmove(selfit->dataptr, arrit->dataptr, self->descr->elsize);
+ copyswap(selfit->dataptr, NULL, swap, self->descr->elsize);
+ PyArray_ITER_NEXT(selfit);
+ PyArray_ITER_NEXT(arrit);
+ if (arrit->index == arrit->size)
+ PyArray_ITER_RESET(arrit);
+ }
+ retval = 0;
+ exit:
+ Py_XDECREF(selfit);
+ Py_XDECREF(arrit);
+ Py_XDECREF(arr);
+ return retval;
+}
+
+static PyGetSetDef array_getsetlist[] = {
+ {"ndim",
+ (getter)array_ndim_get,
+ NULL,
+ "number of array dimensions"},
+ {"flags",
+ (getter)array_flags_get,
+ NULL,
+ "special dictionary of flags"},
+ {"shape",
+ (getter)array_shape_get,
+ (setter)array_shape_set,
+ "tuple of array dimensions"},
+ {"strides",
+ (getter)array_strides_get,
+ (setter)array_strides_set,
+ "tuple of bytes steps in each dimension"},
+ {"data",
+ (getter)array_data_get,
+ (setter)array_data_set,
+ "pointer to start of data"},
+ {"itemsize",
+ (getter)array_itemsize_get,
+ NULL,
+ "length of one element in bytes"},
+ {"size",
+ (getter)array_size_get,
+ NULL,
+ "number of elements in the array"},
+ {"nbytes",
+ (getter)array_nbytes_get,
+ NULL,
+ "number of bytes in the array"},
+ {"base",
+ (getter)array_base_get,
+ NULL,
+ "base object"},
+ {"dtype",
+ (getter)array_type_get,
+ NULL,
+ "get array type class"},
+ {"dtypechar",
+ (getter)array_typechar_get,
+ NULL,
+ "get array type character code"},
+ {"dtypestr",
+ (getter)array_typestr_get,
+ NULL,
+ "get array type string"},
+ {"dtypedescr",
+ (getter)array_descr_get,
+ (setter)array_descr_set,
+ "get(set) data-type-descriptor for array"},
+ {"real",
+ (getter)array_real_get,
+ (setter)array_real_set,
+ "real part of array"},
+ {"imag",
+ (getter)array_imag_get,
+ (setter)array_imag_set,
+ "imaginary part of array"},
+ {"flat",
+ (getter)array_flat_get,
+ (setter)array_flat_set,
+ "a 1-d view of a contiguous array"},
+ {"__array_data__",
+ (getter)array_dataptr_get,
+ NULL,
+ "Array protocol: data"},
+ {"__array_typestr__",
+ (getter)array_typestr_get,
+ NULL,
+ "Array protocol: typestr"},
+ {"__array_descr__",
+ (getter)array_protocol_descr_get,
+ NULL,
+ "Array protocol: descr"},
+ {"__array_shape__",
+ (getter)array_shape_get,
+ NULL,
+ "Array protocol: shape"},
+ {"__array_strides__",
+ (getter)array_protocol_strides_get,
+ NULL,
+ "Array protocol: strides"},
+ {"__array_struct__",
+ (getter)array_struct_get,
+ NULL,
+ "Array protocol: struct"},
+ {"__array_priority__",
+ (getter)array_priority_get,
+ NULL,
+ "Array priority"},
+ {NULL, NULL, NULL, NULL}, /* Sentinel */
+};
+
+/****************** end of attribute get and set routines *******************/
+
+
+static PyObject *
+array_alloc(PyTypeObject *type, int nitems)
+{
+ PyObject *obj;
+ /* nitems will always be 0 */
+ obj = (PyObject *)_pya_malloc(sizeof(PyArrayObject));
+ PyObject_Init(obj, type);
+ return obj;
+}
+
+
+static char Arraytype__doc__[] =
+ "A array object represents a multidimensional, homogeneous array\n"
+ " of fixed-size items. An associated data-type-descriptor object\n"
+ " details the data-type in an array (including byteorder and any\n"
+ " fields). An array can be constructed using the scipy.array\n"
+ " command. Arrays are sequence, mapping and numeric objects.\n"
+ " More information is available in the scipy module and by looking\n"
+ " at the methods and attributes of an array.\n\n"
+ " ndarray.__new__(subtype, shape=, dtype=int_, buffer=None, \n"
+ " offset=0, strides=None, fortran=False)\n\n"
+ " There are two modes of creating an array using __new__:\n"
+ " 1) If buffer is None, then only shape, dtype, and fortran \n"
+ " are used\n"
+ " 2) If buffer is an object exporting the buffer interface, then\n"
+ " all keywords are interpreted.\n"
+ " The dtype parameter can be any object that can be interpreted \n"
+ " as a scipy.dtypedescr object.\n\n"
+ " No __init__ method is needed because the array is fully \n"
+ " initialized after the __new__ method.";
+
+static PyTypeObject PyBigArray_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "scipy.bigndarray", /*tp_name*/
+ sizeof(PyArrayObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)array_dealloc, /*tp_dealloc */
+ (printfunc)NULL, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ (cmpfunc)0, /*tp_compare*/
+ (reprfunc)array_repr, /*tp_repr*/
+ &array_as_number, /*tp_as_number*/
+ NULL, /*tp_as_sequence*/
+ &array_as_mapping, /*tp_as_mapping*/
+ (hashfunc)0, /*tp_hash*/
+ (ternaryfunc)0, /*tp_call*/
+ (reprfunc)array_str, /*tp_str*/
+
+ (getattrofunc)0, /*tp_getattro*/
+ (setattrofunc)0, /*tp_setattro*/
+ NULL, /*tp_as_buffer*/
+ (Py_TPFLAGS_DEFAULT
+ | Py_TPFLAGS_BASETYPE
+ | Py_TPFLAGS_CHECKTYPES), /*tp_flags*/
+ /*Documentation string */
+ Arraytype__doc__, /*tp_doc*/
+
+ (traverseproc)0, /*tp_traverse */
+ (inquiry)0, /*tp_clear */
+ (richcmpfunc)array_richcompare,
+ offsetof(PyArrayObject, weakreflist), /*tp_weaklistoffset */
+
+ /* Iterator support (use standard) */
+
+ (getiterfunc)array_iter, /* tp_iter */
+ (iternextfunc)0, /* tp_iternext */
+
+ /* Sub-classing (new-style object) support */
+
+ array_methods, /* tp_methods */
+ 0, /* tp_members */
+ array_getsetlist, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)0, /* tp_init */
+ array_alloc, /* tp_alloc */
+ (newfunc)array_new, /* tp_new */
+ _pya_free, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0 /* tp_weaklist */
+};
+
+/* A standard array will subclass from the Big Array and
+ add the array_as_sequence table
+ and the array_as_buffer table
+ */
+
+static PyTypeObject PyArray_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "scipy.ndarray", /*tp_name*/
+ sizeof(PyArrayObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+};
+
+
+/* The rest of this code is to build the right kind of array from a python */
+/* object. */
+
+static int
+discover_depth(PyObject *s, int max, int stop_at_string, int stop_at_tuple)
+{
+ int d=0;
+ PyObject *e;
+
+ if(max < 1) return -1;
+
+ if(! PySequence_Check(s) || PyInstance_Check(s) || \
+ PySequence_Length(s) < 0) {
+ PyErr_Clear(); return 0;
+ }
+ if (PyArray_Check(s))
+ return PyArray_NDIM(s);
+ if(PyString_Check(s) || PyBuffer_Check(s) || PyUnicode_Check(s))
+ return stop_at_string ? 0:1;
+ if (stop_at_tuple && PyTuple_Check(s)) return 0;
+ if ((e=PyObject_GetAttrString(s, "__array_shape__")) != NULL) {
+ if (PyTuple_Check(e)) d=PyTuple_GET_SIZE(e);
+ else d=-1;
+ Py_DECREF(e);
+ if (d>-1) return d;
+ }
+ else PyErr_Clear();
+
+ if (PySequence_Length(s) == 0)
+ return 1;
+ if ((e=PySequence_GetItem(s,0)) == NULL) return -1;
+ if(e!=s) {
+ d=discover_depth(e, max-1, stop_at_string, stop_at_tuple);
+ if(d >= 0) d++;
+ }
+ Py_DECREF(e);
+ return d;
+}
+
+static int
+discover_itemsize(PyObject *s, int nd, int *itemsize)
+{
+ int n, r, i;
+ PyObject *e;
+
+ n = PyObject_Length(s);
+
+ if ((nd == 0) || PyString_Check(s) || \
+ PyUnicode_Check(s) || PyBuffer_Check(s)) {
+ if PyUnicode_Check(s)
+ *itemsize = MAX(*itemsize, sizeof(Py_UNICODE)*n);
+ else
+ *itemsize = MAX(*itemsize, n);
+ return 0;
+ }
+ for (i=0; i<n; i++) {
+ if ((e=PySequence_GetItem(s,i))==NULL) return -1;
+ r=discover_itemsize(e,nd-1,itemsize);
+ Py_DECREF(e);
+ if (r == -1) return -1;
+ }
+ return 0;
+}
+
+/* Take an arbitrary object known to represent
+ an array of ndim nd, and determine the size in each dimension
+*/
+
+static int
+discover_dimensions(PyObject *s, int nd, intp *d, int check_it)
+{
+ PyObject *e;
+ int r, n, i, n_lower;
+
+ n=PyObject_Length(s);
+ *d = n;
+ if(*d < 0) return -1;
+ if(nd <= 1) return 0;
+ n_lower = 0;
+ for(i=0; i<n; i++) {
+ if ((e=PySequence_GetItem(s,i)) == NULL) return -1;
+ r=discover_dimensions(e,nd-1,d+1,check_it);
+ Py_DECREF(e);
+
+ if (r == -1) return -1;
+ if (check_it && n_lower != 0 && n_lower != d[1]) {
+ PyErr_SetString(PyExc_ValueError,
+ "inconsistent shape in sequence");
+ return -1;
+ }
+ if (d[1] > n_lower) n_lower = d[1];
+ }
+ d[1] = n_lower;
+
+ return 0;
+}
+
+/* new reference */
+/* doesn't alter refcount of chktype or mintype ---
+ unless one of them is returned */
+static PyArray_Descr *
+_array_small_type(PyArray_Descr *chktype, PyArray_Descr* mintype)
+{
+ PyArray_Descr *outtype;
+
+ if (chktype->type_num > mintype->type_num) outtype = chktype;
+ else outtype = mintype;
+
+ Py_INCREF(outtype);
+ if (PyTypeNum_ISEXTENDED(outtype->type_num) && \
+ (PyTypeNum_ISEXTENDED(mintype->type_num) || \
+ mintype->type_num==0)) {
+ int testsize = outtype->elsize;
+ register int chksize, minsize;
+ chksize = chktype->elsize;
+ minsize = mintype->elsize;
+ /* Handle string->unicode case separately
+ because string itemsize is twice as large */
+ if (outtype->type_num == PyArray_UNICODE &&
+ mintype->type_num == PyArray_STRING) {
+ testsize = MAX(chksize, 2*minsize);
+ }
+ else {
+ testsize = MAX(chksize, minsize);
+ }
+ if (testsize != outtype->elsize) {
+ PyArray_DESCR_REPLACE(outtype);
+ outtype->elsize = testsize;
+ Py_XDECREF(outtype->fields);
+ outtype->fields = NULL;
+ }
+ }
+ return outtype;
+}
+
+/* op is an object to be converted to an ndarray.
+
+ minitype is the minimum type-descriptor needed.
+
+ max is the maximum number of dimensions -- used for recursive call
+ to avoid infinite recursion...
+
+*/
+
+static PyArray_Descr *
+_array_find_type(PyObject *op, PyArray_Descr *minitype, int max)
+{
+ int l;
+ PyObject *ip;
+ PyArray_Descr *chktype=NULL;
+ PyArray_Descr *outtype;
+
+ if (minitype == NULL)
+ minitype = PyArray_DescrFromType(PyArray_BOOL);
+ else Py_INCREF(minitype);
+
+ if (max < 0) goto deflt;
+
+ if (PyArray_Check(op)) {
+ chktype = PyArray_DESCR(op);
+ Py_INCREF(chktype);
+ goto finish;
+ }
+
+ if (PyArray_IsScalar(op, Generic)) {
+ chktype = PyArray_DescrFromScalar(op);
+ goto finish;
+ }
+
+ if ((ip=PyObject_GetAttrString(op, "__array_typestr__"))!=NULL) {
+ if (PyString_Check(ip)) {
+ chktype =_array_typedescr_fromstr(PyString_AS_STRING(ip));
+ }
+ Py_DECREF(ip);
+ if (chktype) goto finish;
+ }
+ else PyErr_Clear();
+
+ if ((ip=PyObject_GetAttrString(op, "__array_struct__")) != NULL) {
+ PyArrayInterface *inter;
+ char buf[40];
+ if (PyCObject_Check(ip)) {
+ inter=(PyArrayInterface *)PyCObject_AsVoidPtr(ip);
+ if (inter->version == 2) {
+ snprintf(buf, 40, "|%c%d", inter->typekind,
+ inter->itemsize);
+ chktype = _array_typedescr_fromstr(buf);
+ }
+ }
+ Py_DECREF(ip);
+ if (chktype) goto finish;
+ }
+ else PyErr_Clear();
+
+ if (PyString_Check(op)) {
+ chktype = PyArray_DescrNewFromType(PyArray_STRING);
+ chktype->elsize = PyString_GET_SIZE(op);
+ goto finish;
+ }
+
+ if (PyUnicode_Check(op)) {
+ chktype = PyArray_DescrNewFromType(PyArray_UNICODE);
+ chktype->elsize = PyUnicode_GET_DATA_SIZE(op);
+ goto finish;
+ }
+
+ if (PyBuffer_Check(op)) {
+ chktype = PyArray_DescrNewFromType(PyArray_VOID);
+ chktype->elsize = op->ob_type->tp_as_sequence->sq_length(op);
+ PyErr_Clear();
+ goto finish;
+ }
+
+ if (PyObject_HasAttrString(op, "__array__")) {
+ ip = PyObject_CallMethod(op, "__array__", NULL);
+ if(ip && PyArray_Check(ip)) {
+ chktype = PyArray_DESCR(ip);
+ Py_INCREF(chktype);
+ Py_DECREF(ip);
+ goto finish;
+ }
+ Py_XDECREF(ip);
+ if (PyErr_Occurred()) PyErr_Clear();
+ }
+
+ if (PyInstance_Check(op)) goto deflt;
+
+ if (PySequence_Check(op)) {
+
+ l = PyObject_Length(op);
+ if (l < 0 && PyErr_Occurred()) {
+ PyErr_Clear();
+ goto deflt;
+ }
+ if (l == 0 && minitype->type_num == PyArray_BOOL) {
+ Py_DECREF(minitype);
+ minitype = PyArray_DescrFromType(PyArray_INTP);
+ }
+ while (--l >= 0) {
+ PyArray_Descr *newtype;
+ ip = PySequence_GetItem(op, l);
+ if (ip==NULL) {
+ PyErr_Clear();
+ goto deflt;
+ }
+ chktype = _array_find_type(ip, minitype, max-1);
+ newtype = _array_small_type(chktype, minitype);
+ Py_DECREF(minitype);
+ minitype = newtype;
+ Py_DECREF(chktype);
+ Py_DECREF(ip);
+ }
+ chktype = minitype;
+ Py_INCREF(minitype);
+ goto finish;
+ }
+
+ if (PyBool_Check(op)) {
+ chktype = PyArray_DescrFromType(PyArray_BOOL);
+ goto finish;
+ }
+ else if (PyInt_Check(op)) {
+ chktype = PyArray_DescrFromType(PyArray_LONG);
+ goto finish;
+ } else if (PyFloat_Check(op)) {
+ chktype = PyArray_DescrFromType(PyArray_DOUBLE);
+ goto finish;
+ } else if (PyComplex_Check(op)) {
+ chktype = PyArray_DescrFromType(PyArray_CDOUBLE);
+ goto finish;
+ }
+
+ deflt:
+ chktype = PyArray_DescrFromType(PyArray_OBJECT);
+
+ finish:
+
+ outtype = _array_small_type(chktype, minitype);
+ Py_DECREF(chktype);
+ Py_DECREF(minitype);
+ return outtype;
+}
+
+static int
+Assign_Array(PyArrayObject *self, PyObject *v)
+{
+ PyObject *e;
+ int l, r;
+
+ if (!PySequence_Check(v)) {
+ PyErr_SetString(PyExc_ValueError,
+ "assignment from non-sequence");
+ return -1;
+ }
+
+ l=PyObject_Length(v);
+ if(l < 0) return -1;
+
+ while(--l >= 0)
+ {
+ e=PySequence_GetItem(v,l);
+ if (e == NULL) return -1;
+ r = PySequence_SetItem((PyObject*)self,l,e);
+ Py_DECREF(e);
+ if(r == -1) return -1;
+ }
+ return 0;
+}
+
+/* "Array Scalars don't call this code" */
+/* steals reference to typecode -- no NULL*/
+static PyObject *
+Array_FromScalar(PyObject *op, PyArray_Descr *typecode)
+{
+ PyArrayObject *ret;
+ int itemsize;
+ int type;
+
+ itemsize = typecode->elsize;
+ type = typecode->type_num;
+
+ if (itemsize == 0 && PyTypeNum_ISEXTENDED(type)) {
+ itemsize = PyObject_Length(op);
+ if (type == PyArray_UNICODE) itemsize *= sizeof(Py_UNICODE);
+ }
+
+ ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, typecode,
+ 0, NULL,
+ NULL, NULL, 0, NULL);
+
+ if (ret == NULL) return NULL;
+
+ ret->descr->f->setitem(op, ret->data, ret);
+
+ if (PyErr_Occurred()) {
+ Py_DECREF(ret);
+ return NULL;
+ } else {
+ return (PyObject *)ret;
+ }
+}
+
+
+/* steals reference to typecode unless return value is NULL*/
+static PyObject *
+Array_FromSequence(PyObject *s, PyArray_Descr *typecode, int fortran,
+ int min_depth, int max_depth)
+{
+ PyArrayObject *r;
+ int nd;
+ intp d[MAX_DIMS];
+ int stop_at_string;
+ int stop_at_tuple;
+ int type = typecode->type_num;
+ int itemsize = typecode->elsize;
+ PyArray_Descr *savetype=typecode;
+
+ stop_at_string = ((type == PyArray_OBJECT) || \
+ (type == PyArray_STRING) || \
+ (type == PyArray_UNICODE) || \
+ (type == PyArray_VOID));
+
+ stop_at_tuple = (type == PyArray_VOID && ((typecode->fields && \
+ typecode->fields!=Py_None) \
+ || (typecode->subarray)));
+
+ if (!((nd=discover_depth(s, MAX_DIMS+1, stop_at_string,
+ stop_at_tuple)) > 0)) {
+ if (nd==0)
+ return Array_FromScalar(s, typecode);
+ PyErr_SetString(PyExc_ValueError,
+ "invalid input sequence");
+ return NULL;
+ }
+
+ if ((max_depth && nd > max_depth) || \
+ (min_depth && nd < min_depth)) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid number of dimensions");
+ return NULL;
+ }
+
+ if(discover_dimensions(s,nd,d, !stop_at_string) == -1) {
+ return NULL;
+ }
+ if (itemsize == 0 && PyTypeNum_ISEXTENDED(type)) {
+ if (discover_itemsize(s, nd, &itemsize) == -1) {
+ return NULL;
+ }
+ if (type == PyArray_UNICODE) itemsize*=sizeof(Py_UNICODE);
+ }
+
+ if (itemsize != typecode->elsize) {
+ PyArray_DESCR_REPLACE(typecode);
+ typecode->elsize = itemsize;
+ }
+
+ r=(PyArrayObject*)PyArray_NewFromDescr(&PyArray_Type, typecode,
+ nd, d,
+ NULL, NULL,
+ fortran, NULL);
+
+ if(!r) {Py_XINCREF(savetype); return NULL;}
+ if(Assign_Array(r,s) == -1) {
+ Py_XINCREF(savetype);
+ Py_DECREF(r);
+ return NULL;
+ }
+ return (PyObject*)r;
+}
+
+
+/*OBJECT_API
+ Is the typenum valid?
+*/
+static int
+PyArray_ValidType(int type)
+{
+ PyArray_Descr *descr;
+ int res=TRUE;
+
+ descr = PyArray_DescrFromType(type);
+ if (descr==NULL) res = FALSE;
+ Py_DECREF(descr);
+ return res;
+}
+
+
+/* If the output is not a CARRAY, then it is buffered also */
+
+static int
+_bufferedcast(PyArrayObject *out, PyArrayObject *in)
+{
+ char *inbuffer, *bptr, *optr;
+ char *outbuffer=NULL;
+ PyArrayIterObject *it_in=NULL, *it_out=NULL;
+ register intp i, index;
+ intp ncopies = PyArray_SIZE(out) / PyArray_SIZE(in);
+ int elsize=in->descr->elsize;
+ int nels = PyArray_BUFSIZE;
+ int el;
+ int inswap, outswap=0;
+ int obuf=!PyArray_ISCARRAY(out);
+ int oelsize = out->descr->elsize;
+ PyArray_VectorUnaryFunc *castfunc;
+ PyArray_CopySwapFunc *in_csn;
+ PyArray_CopySwapFunc *out_csn;
+ int retval = -1;
+
+ castfunc = in->descr->f->cast[out->descr->type_num];
+ in_csn = in->descr->f->copyswap;
+ out_csn = out->descr->f->copyswap;
+
+ /* If the input or output is STRING, UNICODE, or VOID */
+ /* then getitem and setitem are used for the cast */
+ /* and byteswapping is handled by those methods */
+
+ inswap = !(PyArray_ISFLEXIBLE(in) || PyArray_ISNOTSWAPPED(in));
+
+ inbuffer = PyDataMem_NEW(PyArray_BUFSIZE*elsize);
+ if (inbuffer == NULL) return -1;
+ if (PyArray_ISOBJECT(in))
+ memset(inbuffer, 0, PyArray_BUFSIZE*elsize);
+ it_in = (PyArrayIterObject *)PyArray_IterNew((PyObject *)in);
+ if (it_in == NULL) goto exit;
+
+ if (obuf) {
+ outswap = !(PyArray_ISFLEXIBLE(out) || \
+ PyArray_ISNOTSWAPPED(out));
+ outbuffer = PyDataMem_NEW(PyArray_BUFSIZE*oelsize);
+ if (outbuffer == NULL) goto exit;
+ if (PyArray_ISOBJECT(out))
+ memset(outbuffer, 0, PyArray_BUFSIZE*oelsize);
+
+ it_out = (PyArrayIterObject *)PyArray_IterNew((PyObject *)out);
+ if (it_out == NULL) goto exit;
+
+ nels = MIN(nels, PyArray_BUFSIZE);
+ }
+
+ optr = (obuf) ? outbuffer: out->data;
+ bptr = inbuffer;
+ el = 0;
+ while(ncopies--) {
+ index = it_in->size;
+ PyArray_ITER_RESET(it_in);
+ while(index--) {
+ in_csn(bptr, it_in->dataptr, inswap, elsize);
+ bptr += elsize;
+ PyArray_ITER_NEXT(it_in);
+ el += 1;
+ if ((el == nels) || (index == 0)) {
+ /* buffer filled, do cast */
+
+ castfunc(inbuffer, optr, el, in, out);
+
+ if (obuf) {
+ /* Copy from outbuffer to array */
+ for(i=0; i<el; i++) {
+ out_csn(it_out->dataptr,
+ optr, outswap,
+ oelsize);
+ optr += oelsize;
+ PyArray_ITER_NEXT(it_out);
+ }
+ optr = outbuffer;
+ }
+ else {
+ optr += out->descr->elsize * nels;
+ }
+ el = 0;
+ bptr = inbuffer;
+ }
+ }
+ }
+ retval = 0;
+ exit:
+ Py_XDECREF(it_in);
+ PyDataMem_FREE(inbuffer);
+ PyDataMem_FREE(outbuffer);
+ if (obuf) {
+ Py_XDECREF(it_out);
+ }
+ return retval;
+}
+
+
+/* For backward compatibility */
+
+/* steals reference to at --- cannot be NULL*/
+/*OBJECT_API
+ Cast an array using typecode structure.
+*/
+static PyObject *
+PyArray_CastToType(PyArrayObject *mp, PyArray_Descr *at, int fortran)
+{
+ PyObject *out;
+ int ret;
+ PyArray_Descr *mpd;
+
+ mpd = mp->descr;
+
+ if (((mpd == at) || ((mpd->type_num == at->type_num) && \
+ PyArray_EquivByteorders(mpd->byteorder,\
+ at->byteorder) && \
+ ((mpd->elsize == at->elsize) || \
+ (at->elsize==0)))) && \
+ PyArray_ISBEHAVED_RO(mp)) {
+ Py_DECREF(at);
+ Py_INCREF(mp);
+ return (PyObject *)mp;
+ }
+
+ if (at->elsize == 0) {
+ PyArray_DESCR_REPLACE(at);
+ if (at == NULL) return NULL;
+ if (mpd->type_num == PyArray_STRING && \
+ at->type_num == PyArray_UNICODE)
+ at->elsize = mpd->elsize*sizeof(Py_UNICODE);
+ if (mpd->type_num == PyArray_UNICODE &&
+ at->type_num == PyArray_STRING)
+ at->elsize = mpd->elsize/sizeof(Py_UNICODE);
+ if (at->type_num == PyArray_VOID)
+ at->elsize = mpd->elsize;
+ }
+
+ out = PyArray_NewFromDescr(mp->ob_type, at,
+ mp->nd,
+ mp->dimensions,
+ NULL, NULL,
+ fortran,
+ (PyObject *)mp);
+
+ if (out == NULL) return NULL;
+ ret = PyArray_CastTo((PyArrayObject *)out, mp);
+ if (ret != -1) return out;
+
+ Py_DECREF(out);
+ return NULL;
+
+}
+
+/* The number of elements in out must be an integer multiple
+ of the number of elements in mp.
+*/
+
+/*OBJECT_API
+ Cast to an already created array.
+*/
+static int
+PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp)
+{
+
+ int simple;
+ intp mpsize = PyArray_SIZE(mp);
+ intp outsize = PyArray_SIZE(out);
+
+ if (mpsize == 0) return 0;
+ if (!PyArray_ISWRITEABLE(out)) {
+ PyErr_SetString(PyExc_ValueError,
+ "output array is not writeable");
+ return -1;
+ }
+ if (outsize % mpsize != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "output array must have an integer-multiple"\
+ " of the number of elements in the input "\
+ "array");
+ return -1;
+ }
+
+ if (out->descr->type_num >= PyArray_NTYPES) {
+ PyErr_SetString(PyExc_ValueError,
+ "Can only cast to builtin types.");
+ return -1;
+
+ }
+
+ simple = ((PyArray_ISCARRAY_RO(mp) && PyArray_ISCARRAY(out)) || \
+ (PyArray_ISFARRAY_RO(mp) && PyArray_ISFARRAY(out)));
+
+ if (simple) {
+ char *inptr;
+ char *optr = out->data;
+ intp obytes = out->descr->elsize * outsize;
+ intp ncopies = outsize / mpsize;
+
+ while(ncopies--) {
+ inptr = mp->data;
+ mp->descr->f->cast[out->descr->type_num](inptr,
+ optr,
+ mpsize,
+ mp, out);
+ optr += obytes;
+ }
+ return 0;
+ }
+
+ /* If not a well-behaved cast, then use buffers */
+ if (_bufferedcast(out, mp) == -1) {
+ return -1;
+ }
+ return 0;
+}
+
+/* steals reference to newtype --- acc. NULL */
+static PyObject *
+array_fromarray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
+{
+
+ PyArrayObject *ret=NULL;
+ int type, itemsize;
+ int copy = 0;
+ int arrflags;
+ PyArray_Descr *oldtype;
+ char *msg = "cannot copy back to a read-only array";
+ PyTypeObject *subtype;
+
+ oldtype = PyArray_DESCR(arr);
+
+ subtype = arr->ob_type;
+
+ if (newtype == NULL) {newtype = oldtype; Py_INCREF(oldtype);}
+ type = newtype->type_num;
+ itemsize = newtype->elsize;
+
+ /* Don't copy if sizes are compatible */
+ if (PyArray_EquivTypes(oldtype, newtype)) {
+ arrflags = arr->flags;
+
+ copy = (flags & ENSURECOPY) || \
+ ((flags & CONTIGUOUS) && (!(arrflags & CONTIGUOUS))) \
+ || ((flags & ALIGNED) && (!(arrflags & ALIGNED))) \
+ || (arr->nd > 1 && \
+ ((flags & FORTRAN) != (arrflags & FORTRAN))) || \
+ ((flags & WRITEABLE) && (!(arrflags & WRITEABLE)));
+
+ if (copy) {
+ if ((flags & UPDATEIFCOPY) && \
+ (!PyArray_ISWRITEABLE(arr))) {
+ Py_DECREF(newtype);
+ PyErr_SetString(PyExc_ValueError, msg);
+ return NULL;
+ }
+ if ((flags & ENSUREARRAY) && \
+ (subtype != &PyBigArray_Type)) {
+ subtype = &PyArray_Type;
+ }
+ ret = (PyArrayObject *) \
+ PyArray_NewFromDescr(subtype, newtype,
+ arr->nd,
+ arr->dimensions,
+ NULL, NULL,
+ flags & FORTRAN,
+ (PyObject *)arr);
+ if (ret == NULL) return NULL;
+ if (PyArray_CopyInto(ret, arr) == -1)
+ {Py_DECREF(ret); return NULL;}
+ if (flags & UPDATEIFCOPY) {
+ ret->flags |= UPDATEIFCOPY;
+ ret->base = (PyObject *)arr;
+ PyArray_FLAGS(ret->base) &= ~WRITEABLE;
+ Py_INCREF(arr);
+ }
+ }
+ /* If no copy then just increase the reference
+ count and return the input */
+ else {
+ if ((flags & ENSUREARRAY) && \
+ (subtype != &PyBigArray_Type)) {
+ Py_DECREF(newtype);
+ Py_INCREF(arr->descr);
+ ret = (PyArrayObject *) \
+ PyArray_NewFromDescr(&PyArray_Type,
+ arr->descr,
+ arr->nd,
+ arr->dimensions,
+ arr->strides,
+ arr->data,
+ arr->flags,NULL);
+ if (ret == NULL) return NULL;
+ ret->base = (PyObject *)arr;
+ }
+ else {
+ ret = arr;
+ }
+ Py_INCREF(arr);
+ }
+ }
+
+ /* The desired output type is different than the input
+ array type */
+ else {
+ /* Cast to the desired type if we can do it safely
+ Also cast if source is a ndim-0 array to mimic
+ behavior with Python scalars */
+ if (flags & FORCECAST || PyArray_NDIM(arr)==0 ||
+ PyArray_CanCastTo(oldtype, newtype)) {
+ if ((flags & UPDATEIFCOPY) && \
+ (!PyArray_ISWRITEABLE(arr))) {
+ Py_DECREF(newtype);
+ PyErr_SetString(PyExc_ValueError, msg);
+ return NULL;
+ }
+ if ((flags & ENSUREARRAY) && \
+ (subtype != &PyBigArray_Type)) {
+ subtype = &PyArray_Type;
+ }
+ ret = (PyArrayObject *)\
+ PyArray_NewFromDescr(subtype,
+ newtype,
+ arr->nd,
+ arr->dimensions,
+ NULL, NULL,
+ flags & FORTRAN,
+ (PyObject *)arr);
+ if (ret == NULL) return NULL;
+ if (PyArray_CastTo(ret, arr) < 0) {
+ Py_DECREF(ret);
+ return NULL;
+ }
+ if (flags & UPDATEIFCOPY) {
+ ret->flags |= UPDATEIFCOPY;
+ ret->base = (PyObject *)arr;
+ PyArray_FLAGS(ret->base) &= ~WRITEABLE;
+ Py_INCREF(arr);
+ }
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "array cannot be safely cast " \
+ "to required type");
+ ret = NULL;
+ }
+ }
+ return (PyObject *)ret;
+}
+
+/* new reference */
+static PyArray_Descr *
+_array_typedescr_fromstr(char *str)
+{
+ PyArray_Descr *descr;
+ int type_num;
+ char typechar;
+ int size;
+ char msg[] = "unsupported typestring";
+ int swap;
+ char swapchar;
+
+ swapchar = str[0];
+ str += 1;
+
+#define _MY_FAIL { \
+ PyErr_SetString(PyExc_ValueError, msg); \
+ return NULL; \
+ }
+
+ typechar = str[0];
+ size = atoi(str + 1);
+ switch (typechar) {
+ case 'b':
+ if (size == sizeof(Bool))
+ type_num = PyArray_BOOL;
+ else _MY_FAIL
+ break;
+ case 'u':
+ if (size == sizeof(uintp))
+ type_num = PyArray_UINTP;
+ else if (size == sizeof(char))
+ type_num = PyArray_UBYTE;
+ else if (size == sizeof(short))
+ type_num = PyArray_USHORT;
+ else if (size == sizeof(ulong))
+ type_num = PyArray_ULONG;
+ else if (size == sizeof(int))
+ type_num = PyArray_UINT;
+ else if (size == sizeof(ulonglong))
+ type_num = PyArray_ULONGLONG;
+ else _MY_FAIL
+ break;
+ case 'i':
+ if (size == sizeof(intp))
+ type_num = PyArray_INTP;
+ else if (size == sizeof(char))
+ type_num = PyArray_BYTE;
+ else if (size == sizeof(short))
+ type_num = PyArray_SHORT;
+ else if (size == sizeof(long))
+ type_num = PyArray_LONG;
+ else if (size == sizeof(int))
+ type_num = PyArray_INT;
+ else if (size == sizeof(longlong))
+ type_num = PyArray_LONGLONG;
+ else _MY_FAIL
+ break;
+ case 'f':
+ if (size == sizeof(float))
+ type_num = PyArray_FLOAT;
+ else if (size == sizeof(double))
+ type_num = PyArray_DOUBLE;
+ else if (size == sizeof(longdouble))
+ type_num = PyArray_LONGDOUBLE;
+ else _MY_FAIL
+ break;
+ case 'c':
+ if (size == sizeof(float)*2)
+ type_num = PyArray_CFLOAT;
+ else if (size == sizeof(double)*2)
+ type_num = PyArray_CDOUBLE;
+ else if (size == sizeof(longdouble)*2)
+ type_num = PyArray_CLONGDOUBLE;
+ else _MY_FAIL
+ break;
+ case 'O':
+ if (size == sizeof(PyObject *))
+ type_num = PyArray_OBJECT;
+ else _MY_FAIL
+ break;
+ case 'S':
+ type_num = PyArray_STRING;
+ break;
+ case 'U':
+ type_num = PyArray_UNICODE;
+ size *= sizeof(Py_UNICODE);
+ break;
+ case 'V':
+ type_num = PyArray_VOID;
+ break;
+ default:
+ _MY_FAIL
+ }
+
+#undef _MY_FAIL
+
+ descr = PyArray_DescrFromType(type_num);
+ if (descr == NULL) return NULL;
+ swap = !PyArray_ISNBO(swapchar);
+ if (descr->elsize == 0 || swap) {
+ /* Need to make a new PyArray_Descr */
+ PyArray_DESCR_REPLACE(descr);
+ if (descr==NULL) return NULL;
+ if (descr->elsize == 0)
+ descr->elsize = size;
+ if (swap)
+ descr->byteorder = swapchar;
+ }
+ return descr;
+}
+
+/* steals a reference to intype unless NotImplemented */
+static PyObject *
+array_fromstructinterface(PyObject *input, PyArray_Descr *intype, int flags)
+{
+ PyArray_Descr *thetype;
+ char buf[40];
+ PyArrayInterface *inter;
+ PyObject *attr, *r, *ret;
+ char endian = PyArray_NATBYTE;
+
+ attr = PyObject_GetAttrString(input, "__array_struct__");
+ if (attr == NULL) {
+ PyErr_Clear();
+ return Py_NotImplemented;
+ }
+ if (!PyCObject_Check(attr) || \
+ ((inter=((PyArrayInterface *)\
+ PyCObject_AsVoidPtr(attr)))->version != 2)) {
+ PyErr_SetString(PyExc_ValueError, "invalid __array_struct__");
+ Py_XDECREF(intype);
+ Py_DECREF(attr);
+ return NULL;
+ }
+ if ((inter->flags & NOTSWAPPED) != NOTSWAPPED) {
+ endian = PyArray_OPPBYTE;
+ inter->flags &= ~NOTSWAPPED;
+ }
+
+ snprintf(buf, 40, "%c%c%d", endian, inter->typekind, inter->itemsize);
+ if (!(thetype=_array_typedescr_fromstr(buf))) {
+ Py_XDECREF(intype);
+ Py_DECREF(attr);
+ return NULL;
+ }
+
+ r = PyArray_NewFromDescr(&PyArray_Type, thetype,
+ inter->nd, inter->shape,
+ inter->strides, inter->data,
+ inter->flags, NULL);
+ Py_INCREF(input);
+ PyArray_BASE(r) = input;
+ Py_DECREF(attr);
+ PyArray_UpdateFlags((PyArrayObject *)r, UPDATE_ALL_FLAGS);
+ ret = array_fromarray((PyArrayObject*)r, intype, flags);
+ Py_DECREF(r);
+ return ret;
+}
+
+/* steals a reference to intype unless NotImplemented */
+static PyObject *
+array_frominterface(PyObject *input, PyArray_Descr *intype, int flags)
+{
+ PyObject *attr=NULL, *item=NULL, *r;
+ PyObject *tstr=NULL, *shape=NULL;
+ PyArrayObject *ret=NULL;
+ PyArray_Descr *type=NULL;
+ char *data;
+ int buffer_len;
+ int res, i, n;
+ intp dims[MAX_DIMS], strides[MAX_DIMS];
+ int dataflags = BEHAVED_FLAGS;
+
+ /* Get the memory from __array_data__ and __array_offset__ */
+ /* Get the shape */
+ /* Get the typestring -- ignore array_descr */
+ /* Get the strides */
+
+ shape = PyObject_GetAttrString(input, "__array_shape__");
+ if (shape == NULL) {PyErr_Clear(); return Py_NotImplemented;}
+ tstr = PyObject_GetAttrString(input, "__array_typestr__");
+ if (tstr == NULL) {Py_DECREF(shape); PyErr_Clear(); return Py_NotImplemented;}
+
+ attr = PyObject_GetAttrString(input, "__array_data__");
+ if ((attr == NULL) || (attr==Py_None) || (!PyTuple_Check(attr))) {
+ if (attr && (attr != Py_None)) item=attr;
+ else item=input;
+ res = PyObject_AsWriteBuffer(item, (void **)&data,
+ &buffer_len);
+ if (res < 0) {
+ PyErr_Clear();
+ res = PyObject_AsReadBuffer(item, (const void **)&data,
+ &buffer_len);
+ if (res < 0) goto fail;
+ dataflags &= ~WRITEABLE;
+ }
+ Py_XDECREF(attr);
+ attr = PyObject_GetAttrString(input, "__array_offset__");
+ if (attr) {
+ long num = PyInt_AsLong(attr);
+ if (error_converting(num)) {
+ PyErr_SetString(PyExc_TypeError,
+ "__array_offset__ "\
+ "must be an integer");
+ goto fail;
+ }
+ data += num;
+ }
+ else PyErr_Clear();
+ }
+ else {
+ if (PyTuple_GET_SIZE(attr) != 2) {
+ PyErr_SetString(PyExc_TypeError,
+ "__array_data__ must return " \
+ "a 2-tuple with ('data pointer "\
+ "string', read-only flag)");
+ goto fail;
+ }
+ res = sscanf(PyString_AsString(PyTuple_GET_ITEM(attr,0)),
+ "%p", (void **)&data);
+ if (res < 1) {
+ PyErr_SetString(PyExc_TypeError,
+ "__array_data__ string cannot be " \
+ "converted");
+ goto fail;
+ }
+ if (PyObject_IsTrue(PyTuple_GET_ITEM(attr,1))) {
+ dataflags &= ~WRITEABLE;
+ }
+ }
+ Py_XDECREF(attr);
+ attr = tstr;
+ if (!PyString_Check(attr)) {
+ PyErr_SetString(PyExc_TypeError, "__array_typestr__ must be a string");
+ Py_INCREF(attr); /* decref'd twice below */
+ goto fail;
+ }
+ type = _array_typedescr_fromstr(PyString_AS_STRING(attr));
+ Py_DECREF(attr); attr=NULL; tstr=NULL;
+ if (type==NULL) goto fail;
+ attr = shape;
+ if (!PyTuple_Check(attr)) {
+ PyErr_SetString(PyExc_TypeError, "__array_shape__ must be a tuple");
+ Py_INCREF(attr); /* decref'd twice below */
+ Py_DECREF(type);
+ goto fail;
+ }
+ n = PyTuple_GET_SIZE(attr);
+ for (i=0; i<n; i++) {
+ item = PyTuple_GET_ITEM(attr, i);
+ dims[i] = PyArray_PyIntAsIntp(item);
+ if (error_converting(dims[i])) break;
+ }
+ Py_DECREF(attr); shape=NULL;
+
+ ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, type,
+ n, dims,
+ NULL, data,
+ dataflags, NULL);
+ if (ret == NULL) {Py_XDECREF(intype); return NULL;}
+ Py_INCREF(input);
+ ret->base = input;
+
+ attr = PyObject_GetAttrString(input, "__array_strides__");
+ if (attr != NULL && attr != Py_None) {
+ if (!PyTuple_Check(attr)) {
+ PyErr_SetString(PyExc_TypeError,
+ "__array_strides__ must be a tuple");
+ Py_DECREF(attr);
+ Py_DECREF(ret);
+ Py_XDECREF(intype);
+ return NULL;
+ }
+ if (n != PyTuple_GET_SIZE(attr)) {
+ PyErr_SetString(PyExc_ValueError,
+ "mismatch in length of "\
+ "__array_strides__ and "\
+ "__array_shape__");
+ Py_DECREF(attr);
+ Py_DECREF(ret);
+ Py_XDECREF(intype);
+ return NULL;
+ }
+ for (i=0; i<n; i++) {
+ item = PyTuple_GET_ITEM(attr, i);
+ strides[i] = PyArray_PyIntAsIntp(item);
+ if (error_converting(strides[i])) break;
+ }
+ Py_DECREF(attr);
+ if (PyErr_Occurred()) PyErr_Clear();
+ memcpy(ret->strides, strides, n*sizeof(intp));
+ }
+ else PyErr_Clear();
+ PyArray_UpdateFlags(ret, UPDATE_ALL_FLAGS);
+ r = array_fromarray(ret, intype, flags);
+ Py_DECREF(ret);
+ return r;
+
+ fail:
+ Py_XDECREF(intype);
+ Py_XDECREF(attr);
+ Py_XDECREF(shape);
+ Py_XDECREF(tstr);
+ return NULL;
+}
+
+/* steals a reference to typecode */
+static PyObject *
+array_fromattr(PyObject *op, PyArray_Descr *typecode, int flags)
+{
+ PyObject *new, *r;
+
+ if (typecode == NULL) {
+ new = PyObject_CallMethod(op, "__array__", NULL);
+ } else {
+ PyObject *obj;
+
+ if (PyTypeNum_ISEXTENDED(typecode->type_num)) {
+ obj = PyString_FromFormat("%c%d", typecode->type,
+ typecode->elsize);
+ }
+ else {
+ obj = (PyObject *)(typecode->typeobj); Py_INCREF(obj);
+ }
+ new = PyObject_CallMethod(op, "__array__", "N", obj);
+ }
+ if (new == NULL) {Py_XDECREF(typecode); return NULL;}
+ if (!PyArray_Check(new)) {
+ PyErr_SetString(PyExc_ValueError,
+ "object __array__ method not " \
+ "producing an array");
+ Py_DECREF(new);
+ Py_DECREF(typecode);
+ return NULL;
+ }
+ r = array_fromarray((PyArrayObject *)new, typecode, flags);
+ Py_DECREF(new);
+ return r;
+}
+
+/* Steals a reference to newtype --- which can be NULL */
+static PyObject *
+array_fromobject(PyObject *op, PyArray_Descr *newtype, int min_depth,
+ int max_depth, int flags)
+{
+ /* This is the main code to make a SciPy array from a Python
+ Object. It is called from lot's of different places which
+ is why there are so many checks. The comments try to
+ explain some of the checks. */
+
+ PyObject *r=NULL;
+ int seq = FALSE;
+
+ /* Is input object already an array? */
+ /* This is where the flags are used */
+ if (PyArray_Check(op))
+ r = array_fromarray((PyArrayObject *)op, newtype, flags);
+ else if (PyArray_IsScalar(op, Generic)) {
+ r = PyArray_FromScalar(op, newtype);
+ }
+ else if ((r = array_fromstructinterface(op, newtype, flags)) != \
+ Py_NotImplemented) {
+ }
+ else if ((r = array_frominterface(op, newtype, flags)) != \
+ Py_NotImplemented) {
+ }
+ else if (PyObject_HasAttrString(op, "__array__")) {
+ /* Code that returns the object to convert for a non
+ multiarray input object from the __array__ attribute of the
+ object. */
+ r = array_fromattr(op, newtype, flags);
+ }
+ else {
+ if (newtype == NULL) {
+ newtype = _array_find_type(op, NULL, MAX_DIMS);
+ }
+ if (PySequence_Check(op)) {
+ /* necessary but not sufficient */
+
+ r = Array_FromSequence(op, newtype, flags & FORTRAN,
+ min_depth, max_depth);
+ if (PyErr_Occurred() && r == NULL)
+ /* It wasn't really a sequence after all.
+ * Try interpreting it as a scalar */
+ PyErr_Clear();
+ else
+ seq = TRUE;
+ }
+ if (!seq)
+ r = Array_FromScalar(op, newtype);
+ }
+
+ /* If we didn't succeed return NULL */
+ if (r == NULL) return NULL;
+
+ /* Be sure we succeed here */
+
+ if(!PyArray_Check(r)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "internal error: array_fromobject "\
+ "not producing an array");
+ Py_DECREF(r);
+ return NULL;
+ }
+
+ if (min_depth != 0 && ((PyArrayObject *)r)->nd < min_depth) {
+ PyErr_SetString(PyExc_ValueError,
+ "object of too small depth for desired array");
+ Py_DECREF(r);
+ return NULL;
+ }
+ if (max_depth != 0 && ((PyArrayObject *)r)->nd > max_depth) {
+ PyErr_SetString(PyExc_ValueError,
+ "object too deep for desired array");
+ Py_DECREF(r);
+ return NULL;
+ }
+ return r;
+}
+
+/* new reference -- accepts NULL for mintype*/
+/*OBJECT_API*/
+static PyArray_Descr *
+PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype)
+{
+ return _array_find_type(op, mintype, MAX_DIMS);
+}
+
+/*OBJECT_API
+ Return the typecode of the array a Python object would be converted
+ to
+*/
+static int
+PyArray_ObjectType(PyObject *op, int minimum_type)
+{
+ PyArray_Descr *intype;
+ PyArray_Descr *outtype;
+ int ret;
+
+ intype = PyArray_DescrFromType(minimum_type);
+ if (intype == NULL) PyErr_Clear();
+ outtype = _array_find_type(op, intype, MAX_DIMS);
+ ret = outtype->type_num;
+ Py_DECREF(outtype);
+ Py_DECREF(intype);
+ return ret;
+}
+
+
+/* flags is any of
+ CONTIGUOUS,
+ FORTRAN,
+ ALIGNED,
+ WRITEABLE,
+ NOTSWAPPED,
+ ENSURECOPY,
+ UPDATEIFCOPY,
+ FORCECAST,
+ ENSUREARRAY
+
+ or'd (|) together
+
+ Any of these flags present means that the returned array should
+ guarantee that aspect of the array. Otherwise the returned array
+ won't guarantee it -- it will depend on the object as to whether or
+ not it has such features.
+
+ Note that ENSURECOPY is enough
+ to guarantee CONTIGUOUS, ALIGNED and WRITEABLE
+ and therefore it is redundant to include those as well.
+
+ BEHAVED_FLAGS == ALIGNED | WRITEABLE
+ CARRAY_FLAGS = CONTIGUOUS | BEHAVED_FLAGS
+ FARRAY_FLAGS = FORTRAN | BEHAVED_FLAGS
+
+ FORTRAN can be set in the FLAGS to request a FORTRAN array.
+ Fortran arrays are always behaved (aligned,
+ notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d).
+
+ UPDATEIFCOPY flag sets this flag in the returned array if a copy is
+ made and the base argument points to the (possibly) misbehaved array.
+ When the new array is deallocated, the original array held in base
+ is updated with the contents of the new array.
+
+ FORCECAST will cause a cast to occur regardless of whether or not
+ it is safe.
+*/
+
+
+/* steals a reference to descr -- accepts NULL */
+/*OBJECT_API*/
+static PyObject *
+PyArray_FromAny(PyObject *op, PyArray_Descr *descr, int min_depth,
+ int max_depth, int requires)
+{
+ if (requires & ENSURECOPY) {
+ requires |= DEFAULT_FLAGS;
+ }
+ if (requires & NOTSWAPPED) {
+ if (!descr && PyArray_Check(op) && \
+ !PyArray_ISNBO(PyArray_DESCR(op)->byteorder)) {
+ descr = PyArray_DescrNew(PyArray_DESCR(op));
+ }
+ else if ((descr && !PyArray_ISNBO(descr->byteorder))) {
+ PyArray_DESCR_REPLACE(descr);
+ }
+ descr->byteorder = PyArray_NATIVE;
+ }
+
+ return array_fromobject(op, descr, min_depth, max_depth,
+ requires);
+}
+
+/* This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0,
+ ENSUREARRAY) */
+/* that special cases Arrays and PyArray_Scalars up front */
+/* It *steals a reference* to the object */
+/* It also guarantees that the result is PyArray_Type or PyBigArray_Type */
+
+/* Because it decrefs op if any conversion needs to take place
+ so it can be used like PyArray_EnsureArray(some_function(...)) */
+
+/*OBJECT_API*/
+static PyObject *
+PyArray_EnsureArray(PyObject *op)
+{
+ PyObject *new;
+
+ if (op == NULL) return NULL;
+
+ if (PyArray_CheckExact(op) || PyBigArray_CheckExact(op)) return op;
+
+ if (PyArray_IsScalar(op, Generic)) {
+ new = PyArray_FromScalar(op, NULL);
+ Py_DECREF(op);
+ return new;
+ }
+ new = PyArray_FROM_OF(op, ENSUREARRAY);
+ Py_DECREF(op);
+ return new;
+}
+
+
+
+/*OBJECT_API
+ Check the type coercion rules.
+*/
+static int
+PyArray_CanCastSafely(int fromtype, int totype)
+{
+ PyArray_Descr *from, *to;
+ register int felsize, telsize;
+
+ if (fromtype == totype) return 1;
+ if (fromtype == PyArray_BOOL) return 1;
+ if (totype == PyArray_BOOL) return 0;
+ if (totype == PyArray_OBJECT || totype == PyArray_VOID) return 1;
+ if (fromtype == PyArray_OBJECT || fromtype == PyArray_VOID) return 0;
+
+ from = PyArray_DescrFromType(fromtype);
+ to = PyArray_DescrFromType(totype);
+ telsize = to->elsize;
+ felsize = from->elsize;
+ Py_DECREF(from);
+ Py_DECREF(to);
+
+ switch(fromtype) {
+ case PyArray_BYTE:
+ case PyArray_SHORT:
+ case PyArray_INT:
+ case PyArray_LONG:
+ case PyArray_LONGLONG:
+ if (PyTypeNum_ISINTEGER(totype)) {
+ if (PyTypeNum_ISUNSIGNED(totype)) {
+ return (telsize > felsize);
+ }
+ else {
+ return (telsize >= felsize);
+ }
+ }
+ else if (PyTypeNum_ISFLOAT(totype)) {
+ if (felsize < 8)
+ return (telsize > felsize);
+ else
+ return (telsize >= felsize);
+ }
+ else if (PyTypeNum_ISCOMPLEX(totype)) {
+ if (felsize < 8)
+ return ((telsize >> 1) > felsize);
+ else
+ return ((telsize >> 1) >= felsize);
+ }
+ else return totype > fromtype;
+ case PyArray_UBYTE:
+ case PyArray_USHORT:
+ case PyArray_UINT:
+ case PyArray_ULONG:
+ case PyArray_ULONGLONG:
+ if (PyTypeNum_ISINTEGER(totype)) {
+ if (PyTypeNum_ISSIGNED(totype)) {
+ return (telsize > felsize);
+ }
+ else {
+ return (telsize >= felsize);
+ }
+ }
+ else if (PyTypeNum_ISFLOAT(totype)) {
+ if (felsize < 8)
+ return (telsize > felsize);
+ else
+ return (telsize >= felsize);
+ }
+ else if (PyTypeNum_ISCOMPLEX(totype)) {
+ if (felsize < 8)
+ return ((telsize >> 1) > felsize);
+ else
+ return ((telsize >> 1) >= felsize);
+ }
+ else return totype > fromtype;
+ case PyArray_FLOAT:
+ case PyArray_DOUBLE:
+ case PyArray_LONGDOUBLE:
+ if (PyTypeNum_ISCOMPLEX(totype))
+ return ((telsize >> 1) >= felsize);
+ else
+ return (totype > fromtype);
+ case PyArray_CFLOAT:
+ case PyArray_CDOUBLE:
+ case PyArray_CLONGDOUBLE:
+ return (totype > fromtype);
+ case PyArray_STRING:
+ case PyArray_UNICODE:
+ return (totype > fromtype);
+ default:
+ return 0;
+ }
+}
+
+/* leaves reference count alone --- cannot be NULL*/
+/*OBJECT_API*/
+static Bool
+PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to)
+{
+ int fromtype=from->type_num;
+ int totype=to->type_num;
+ Bool ret;
+
+ ret = (Bool) PyArray_CanCastSafely(fromtype, totype);
+ if (ret) { /* Check String and Unicode more closely */
+ if (fromtype == PyArray_STRING) {
+ if (totype == PyArray_STRING) {
+ ret = (from->elsize <= to->elsize);
+ }
+ else if (totype == PyArray_UNICODE) {
+ ret = (from->elsize * sizeof(Py_UNICODE)\
+ <= to->elsize);
+ }
+ }
+ else if (fromtype == PyArray_UNICODE) {
+ if (totype == PyArray_UNICODE) {
+ ret = (from->elsize <= to->elsize);
+ }
+ }
+ /* TODO: If totype is STRING or unicode
+ see if the length is long enough to hold the
+ stringified value of the object.
+ */
+ }
+ return ret;
+}
+
+
+
+/*********************** Element-wise Array Iterator ***********************/
+/* Aided by Peter J. Verveer's nd_image package and scipy's arraymap ****/
+/* and Python's array iterator ***/
+
+
+/*OBJECT_API
+ Get Iterator.
+*/
+static PyObject *
+PyArray_IterNew(PyObject *obj)
+{
+ PyArrayIterObject *it;
+ int i, nd;
+ PyArrayObject *ao = (PyArrayObject *)obj;
+
+ if (!PyArray_Check(ao)) {
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+
+ it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject));
+ PyObject_Init((PyObject *)it, &PyArrayIter_Type);
+ /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/
+ if (it == NULL)
+ return NULL;
+
+ nd = ao->nd;
+ PyArray_UpdateFlags(ao, CONTIGUOUS);
+ it->contiguous = 0;
+ if PyArray_ISCONTIGUOUS(ao) it->contiguous = 1;
+ Py_INCREF(ao);
+ it->ao = ao;
+ it->size = PyArray_SIZE(ao);
+ it->nd_m1 = nd - 1;
+ it->factors[nd-1] = 1;
+ for (i=0; i < nd; i++) {
+ it->dims_m1[i] = it->ao->dimensions[i] - 1;
+ it->strides[i] = it->ao->strides[i];
+ it->backstrides[i] = it->strides[i] * \
+ it->dims_m1[i];
+ if (i > 0)
+ it->factors[nd-i-1] = it->factors[nd-i] * \
+ it->ao->dimensions[nd-i];
+ }
+ PyArray_ITER_RESET(it);
+
+ return (PyObject *)it;
+}
+
+
+/*OBJECT_API
+ Get Iterator that iterates over all but one axis (don't use this with
+ PyArray_ITER_GOTO1D)
+*/
+static PyObject *
+PyArray_IterAllButAxis(PyObject *obj, int axis)
+{
+ PyArrayIterObject *it;
+ it = (PyArrayIterObject *)PyArray_IterNew(obj);
+ if (it == NULL) return NULL;
+
+ /* adjust so that will not iterate over axis */
+ it->contiguous = 0;
+ if (it->size != 0) {
+ it->size /= PyArray_DIM(obj,axis);
+ }
+ it->dims_m1[axis] = 0;
+ it->backstrides[axis] = 0;
+
+ /* (won't fix factors so don't use
+ PyArray_ITER_GOTO1D with this iterator) */
+ return (PyObject *)it;
+}
+
+/* Returns an array scalar holding the element desired */
+
+static PyObject *
+arrayiter_next(PyArrayIterObject *it)
+{
+ PyObject *ret;
+
+ if (it->index < it->size) {
+ ret = PyArray_ToScalar(it->dataptr, it->ao);
+ PyArray_ITER_NEXT(it);
+ return ret;
+ }
+ return NULL;
+}
+
+static void
+arrayiter_dealloc(PyArrayIterObject *it)
+{
+ Py_XDECREF(it->ao);
+ _pya_free(it);
+}
+
+static int
+iter_length(PyArrayIterObject *self)
+{
+ return (int) self->size;
+}
+
+
+static PyObject *
+iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind)
+{
+ int index, strides, itemsize;
+ intp count=0;
+ char *dptr, *optr;
+ PyObject *r;
+ int swap;
+ PyArray_CopySwapFunc *copyswap;
+
+
+ if (ind->nd != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "boolean index array should have 1 dimension");
+ return NULL;
+ }
+ index = (ind->dimensions[0]);
+ strides = ind->strides[0];
+ dptr = ind->data;
+ /* Get size of return array */
+ while(index--) {
+ if (*((Bool *)dptr) != 0)
+ count++;
+ dptr += strides;
+ }
+ itemsize = self->ao->descr->elsize;
+ Py_INCREF(self->ao->descr);
+ r = PyArray_NewFromDescr(self->ao->ob_type,
+ self->ao->descr, 1, &count,
+ NULL, NULL,
+ 0, (PyObject *)self->ao);
+ if (r==NULL) return NULL;
+
+ /* Set up loop */
+ optr = PyArray_DATA(r);
+ index = ind->dimensions[0];
+ dptr = ind->data;
+
+ copyswap = self->ao->descr->f->copyswap;
+ /* Loop over Boolean array */
+ swap = !(PyArray_ISNOTSWAPPED(self->ao));
+ while(index--) {
+ if (*((Bool *)dptr) != 0) {
+ copyswap(optr, self->dataptr, swap, itemsize);
+ optr += itemsize;
+ }
+ dptr += strides;
+ PyArray_ITER_NEXT(self);
+ }
+ PyArray_ITER_RESET(self);
+ return r;
+}
+
+static PyObject *
+iter_subscript_int(PyArrayIterObject *self, PyArrayObject *ind)
+{
+ intp num;
+ PyObject *r;
+ PyArrayIterObject *ind_it;
+ int itemsize;
+ int swap;
+ char *optr;
+ int index;
+ PyArray_CopySwapFunc *copyswap;
+
+ itemsize = self->ao->descr->elsize;
+ if (ind->nd == 0) {
+ num = *((intp *)ind->data);
+ PyArray_ITER_GOTO1D(self, num);
+ r = PyArray_ToScalar(self->dataptr, self->ao);
+ PyArray_ITER_RESET(self);
+ return r;
+ }
+
+ Py_INCREF(self->ao->descr);
+ r = PyArray_NewFromDescr(self->ao->ob_type, self->ao->descr,
+ ind->nd, ind->dimensions,
+ NULL, NULL,
+ 0, (PyObject *)self->ao);
+ if (r==NULL) return NULL;
+
+ optr = PyArray_DATA(r);
+ ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind);
+ if (ind_it == NULL) {Py_DECREF(r); return NULL;}
+ index = ind_it->size;
+ copyswap = PyArray_DESCR(r)->f->copyswap;
+ swap = !PyArray_ISNOTSWAPPED(self->ao);
+ while(index--) {
+ num = *((intp *)(ind_it->dataptr));
+ if (num < 0) num += self->size;
+ if (num < 0 || num >= self->size) {
+ PyErr_Format(PyExc_IndexError,
+ "index %d out of bounds" \
+ " 0<=index<%d", (int) num,
+ (int) self->size);
+ Py_DECREF(ind_it);
+ Py_DECREF(r);
+ PyArray_ITER_RESET(self);
+ return NULL;
+ }
+ PyArray_ITER_GOTO1D(self, num);
+ copyswap(optr, self->dataptr, swap, itemsize);
+ optr += itemsize;
+ PyArray_ITER_NEXT(ind_it);
+ }
+ Py_DECREF(ind_it);
+ PyArray_ITER_RESET(self);
+ return r;
+}
+
+
+static PyObject *
+iter_subscript(PyArrayIterObject *self, PyObject *ind)
+{
+ PyArray_Descr *indtype=NULL;
+ intp start, step_size;
+ intp n_steps;
+ PyObject *r;
+ char *dptr;
+ int size;
+ PyObject *obj = NULL;
+ int swap;
+ PyArray_CopySwapFunc *copyswap;
+
+ if (ind == Py_Ellipsis) {
+ ind = PySlice_New(NULL, NULL, NULL);
+ obj = iter_subscript(self, ind);
+ Py_DECREF(ind);
+ return obj;
+ }
+ if (PyTuple_Check(ind)) {
+ int len;
+ len = PyTuple_GET_SIZE(ind);
+ if (len > 1) goto fail;
+ ind = PyTuple_GET_ITEM(ind, 0);
+ }
+
+ /* Tuples >1d not accepted --- i.e. no NewAxis */
+ /* Could implement this with adjusted strides
+ and dimensions in iterator */
+
+ /* Check for Boolean -- this is first becasue
+ Bool is a subclass of Int */
+ PyArray_ITER_RESET(self);
+
+ if (PyBool_Check(ind)) {
+ if (PyObject_IsTrue(ind)) {
+ return PyArray_ToScalar(self->dataptr, self->ao);
+ }
+ else { /* empty array */
+ intp ii = 0;
+ Py_INCREF(self->ao->descr);
+ r = PyArray_NewFromDescr(self->ao->ob_type,
+ self->ao->descr,
+ 1, &ii,
+ NULL, NULL, 0,
+ (PyObject *)self->ao);
+ return r;
+ }
+ }
+
+ /* Check for Integer or Slice */
+
+ if (PyLong_Check(ind) || PyInt_Check(ind) || PySlice_Check(ind)) {
+ start = parse_subindex(ind, &step_size, &n_steps,
+ self->size);
+ if (start == -1)
+ goto fail;
+ if (n_steps == RubberIndex || n_steps == PseudoIndex) {
+ PyErr_SetString(PyExc_IndexError,
+ "cannot use Ellipsis or NewAxes here");
+ goto fail;
+ }
+ PyArray_ITER_GOTO1D(self, start)
+ if (n_steps == SingleIndex) { /* Integer */
+ r = PyArray_ToScalar(self->dataptr, self->ao);
+ PyArray_ITER_RESET(self);
+ return r;
+ }
+ size = self->ao->descr->elsize;
+ Py_INCREF(self->ao->descr);
+ r = PyArray_NewFromDescr(self->ao->ob_type,
+ self->ao->descr,
+ 1, &n_steps,
+ NULL, NULL,
+ 0, (PyObject *)self->ao);
+ if (r==NULL) goto fail;
+ dptr = PyArray_DATA(r);
+ swap = !PyArray_ISNOTSWAPPED(self->ao);
+ copyswap = PyArray_DESCR(r)->f->copyswap;
+ while(n_steps--) {
+ copyswap(dptr, self->dataptr, swap, size);
+ start += step_size;
+ PyArray_ITER_GOTO1D(self, start)
+ dptr += size;
+ }
+ PyArray_ITER_RESET(self);
+ return r;
+ }
+
+ /* convert to INTP array if Integer array scalar or List */
+
+ indtype = PyArray_DescrFromType(PyArray_INTP);
+ if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) {
+ Py_INCREF(indtype);
+ obj = PyArray_FromAny(ind, indtype, 0, 0, FORCECAST);
+ if (obj == NULL) goto fail;
+ }
+ else {
+ Py_INCREF(ind);
+ obj = ind;
+ }
+
+ if (PyArray_Check(obj)) {
+ /* Check for Boolean object */
+ if (PyArray_TYPE(obj)==PyArray_BOOL) {
+ r = iter_subscript_Bool(self, (PyArrayObject *)obj);
+ Py_DECREF(indtype);
+ }
+ /* Check for integer array */
+ else if (PyArray_ISINTEGER(obj)) {
+ PyObject *new;
+ new = PyArray_FromAny(obj, indtype, 0, 0,
+ FORCECAST | ALIGNED);
+ if (new==NULL) goto fail;
+ Py_DECREF(obj);
+ obj = new;
+ r = iter_subscript_int(self, (PyArrayObject *)obj);
+ }
+ else {
+ goto fail;
+ }
+ Py_DECREF(obj);
+ return r;
+ }
+ else Py_DECREF(indtype);
+
+
+ fail:
+ if (!PyErr_Occurred())
+ PyErr_SetString(PyExc_IndexError, "unsupported iterator index");
+ Py_XDECREF(indtype);
+ Py_XDECREF(obj);
+ return NULL;
+
+}
+
+
+static int
+iter_ass_sub_Bool(PyArrayIterObject *self, PyArrayObject *ind,
+ PyArrayIterObject *val, int swap)
+{
+ int index, strides, itemsize;
+ char *dptr;
+ PyArray_CopySwapFunc *copyswap;
+
+ if (ind->nd != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "boolean index array should have 1 dimension");
+ return -1;
+ }
+ itemsize = self->ao->descr->elsize;
+ index = ind->dimensions[0];
+ strides = ind->strides[0];
+ dptr = ind->data;
+ PyArray_ITER_RESET(self);
+ /* Loop over Boolean array */
+ copyswap = self->ao->descr->f->copyswap;
+ while(index--) {
+ if (*((Bool *)dptr) != 0) {
+ copyswap(self->dataptr, val->dataptr, swap,
+ itemsize);
+ PyArray_ITER_NEXT(val);
+ if (val->index==val->size)
+ PyArray_ITER_RESET(val);
+ }
+ dptr += strides;
+ PyArray_ITER_NEXT(self);
+ }
+ PyArray_ITER_RESET(self);
+ return 0;
+}
+
+static int
+iter_ass_sub_int(PyArrayIterObject *self, PyArrayObject *ind,
+ PyArrayIterObject *val, int swap)
+{
+ PyArray_Descr *typecode;
+ intp num;
+ PyArrayIterObject *ind_it;
+ int itemsize;
+ int index;
+ PyArray_CopySwapFunc *copyswap;
+
+ typecode = self->ao->descr;
+ itemsize = typecode->elsize;
+ copyswap = self->ao->descr->f->copyswap;
+ if (ind->nd == 0) {
+ num = *((intp *)ind->data);
+ PyArray_ITER_GOTO1D(self, num);
+ copyswap(self->dataptr, val->dataptr, swap, itemsize);
+ return 0;
+ }
+ ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind);
+ if (ind_it == NULL) return -1;
+ index = ind_it->size;
+ while(index--) {
+ num = *((intp *)(ind_it->dataptr));
+ if (num < 0) num += self->size;
+ if ((num < 0) || (num >= self->size)) {
+ PyErr_Format(PyExc_IndexError,
+ "index %d out of bounds" \
+ " 0<=index<%d", (int) num,
+ (int) self->size);
+ Py_DECREF(ind_it);
+ return -1;
+ }
+ PyArray_ITER_GOTO1D(self, num);
+ copyswap(self->dataptr, val->dataptr, swap, itemsize);
+ PyArray_ITER_NEXT(ind_it);
+ PyArray_ITER_NEXT(val);
+ if (val->index == val->size)
+ PyArray_ITER_RESET(val);
+ }
+ Py_DECREF(ind_it);
+ return 0;
+}
+
+static int
+iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val)
+{
+ PyObject *arrval=NULL;
+ PyArrayIterObject *val_it=NULL;
+ PyArray_Descr *type;
+ PyArray_Descr *indtype=NULL;
+ int swap, retval=-1;
+ int itemsize;
+ intp start, step_size;
+ intp n_steps;
+ PyObject *obj=NULL;
+ PyArray_CopySwapFunc *copyswap;
+
+
+ if (ind == Py_Ellipsis) {
+ ind = PySlice_New(NULL, NULL, NULL);
+ retval = iter_ass_subscript(self, ind, val);
+ Py_DECREF(ind);
+ return retval;
+ }
+
+ if (PyTuple_Check(ind)) {
+ int len;
+ len = PyTuple_GET_SIZE(ind);
+ if (len > 1) goto finish;
+ ind = PyTuple_GET_ITEM(ind, 0);
+ }
+
+ type = self->ao->descr;
+ itemsize = type->elsize;
+
+ Py_INCREF(type);
+ arrval = PyArray_FromAny(val, type, 0, 0, 0);
+ if (arrval==NULL) return -1;
+ val_it = (PyArrayIterObject *)PyArray_IterNew(arrval);
+ if (val_it==NULL) goto finish;
+
+ /* Check for Boolean -- this is first becasue
+ Bool is a subclass of Int */
+
+ copyswap = PyArray_DESCR(arrval)->f->copyswap;
+ swap = (PyArray_ISNOTSWAPPED(self->ao)!=PyArray_ISNOTSWAPPED(arrval));
+ if (PyBool_Check(ind)) {
+ if (PyObject_IsTrue(ind)) {
+ copyswap(self->dataptr, PyArray_DATA(arrval),
+ swap, itemsize);
+ }
+ retval=0;
+ goto finish;
+ }
+
+ /* Check for Integer or Slice */
+
+ if (PyLong_Check(ind) || PyInt_Check(ind) || PySlice_Check(ind)) {
+ start = parse_subindex(ind, &step_size, &n_steps,
+ self->size);
+ if (start == -1) goto finish;
+ if (n_steps == RubberIndex || n_steps == PseudoIndex) {
+ PyErr_SetString(PyExc_IndexError,
+ "cannot use Ellipsis or NewAxes here");
+ goto finish;
+ }
+ PyArray_ITER_GOTO1D(self, start);
+ if (n_steps == SingleIndex) { /* Integer */
+ copyswap(self->dataptr, PyArray_DATA(arrval),
+ swap, itemsize);
+ PyArray_ITER_RESET(self);
+ retval=0;
+ goto finish;
+ }
+ while(n_steps--) {
+ copyswap(self->dataptr, val_it->dataptr,
+ swap, itemsize);
+ start += step_size;
+ PyArray_ITER_GOTO1D(self, start)
+ PyArray_ITER_NEXT(val_it);
+ if (val_it->index == val_it->size)
+ PyArray_ITER_RESET(val_it);
+ }
+ PyArray_ITER_RESET(self);
+ retval = 0;
+ goto finish;
+ }
+
+ /* convert to INTP array if Integer array scalar or List */
+
+ indtype = PyArray_DescrFromType(PyArray_INTP);
+ if (PyArray_IsScalar(ind, Integer)) {
+ Py_INCREF(indtype);
+ obj = PyArray_FromScalar(ind, indtype);
+ }
+ else if (PyList_Check(ind)) {
+ Py_INCREF(indtype);
+ obj = PyArray_FromAny(ind, indtype, 0, 0, FORCECAST);
+ }
+ else {
+ Py_INCREF(ind);
+ obj = ind;
+ }
+
+ if (PyArray_Check(obj)) {
+ /* Check for Boolean object */
+ if (PyArray_TYPE(obj)==PyArray_BOOL) {
+ if (iter_ass_sub_Bool(self, (PyArrayObject *)obj,
+ val_it, swap) < 0)
+ goto finish;
+ retval=0;
+ }
+ /* Check for integer array */
+ else if (PyArray_ISINTEGER(obj)) {
+ PyObject *new;
+ Py_INCREF(indtype);
+ new = PyArray_FromAny(obj, indtype, 0, 0,
+ FORCECAST | BEHAVED_FLAGS);
+ Py_DECREF(obj);
+ obj = new;
+ if (new==NULL) goto finish;
+ if (iter_ass_sub_int(self, (PyArrayObject *)obj,
+ val_it, swap) < 0)
+ goto finish;
+ retval=0;
+ }
+ }
+
+ finish:
+ if (!PyErr_Occurred() && retval < 0)
+ PyErr_SetString(PyExc_IndexError,
+ "unsupported iterator index");
+ Py_XDECREF(indtype);
+ Py_XDECREF(obj);
+ Py_XDECREF(val_it);
+ Py_XDECREF(arrval);
+ return retval;
+
+}
+
+
+static PyMappingMethods iter_as_mapping = {
+ (inquiry)iter_length, /*mp_length*/
+ (binaryfunc)iter_subscript, /*mp_subscript*/
+ (objobjargproc)iter_ass_subscript, /*mp_ass_subscript*/
+};
+
+static char doc_iter_array[] = "__array__(type=None)\n Get array "\
+ "from iterator";
+
+static PyObject *
+iter_array(PyArrayIterObject *it, PyObject *op)
+{
+
+ PyObject *r;
+ intp size;
+
+ /* Any argument ignored */
+
+ /* Two options:
+ 1) underlying array is contiguous
+ -- return 1-d wrapper around it
+ 2) underlying array is not contiguous
+ -- make new 1-d contiguous array with updateifcopy flag set
+ to copy back to the old array
+ */
+
+ size = PyArray_SIZE(it->ao);
+ Py_INCREF(it->ao->descr);
+ if (PyArray_ISCONTIGUOUS(it->ao)) {
+ r = PyArray_NewFromDescr(it->ao->ob_type,
+ it->ao->descr,
+ 1, &size,
+ NULL, it->ao->data,
+ it->ao->flags,
+ (PyObject *)it->ao);
+ if (r==NULL) return NULL;
+ }
+ else {
+ r = PyArray_NewFromDescr(it->ao->ob_type,
+ it->ao->descr,
+ 1, &size,
+ NULL, NULL,
+ 0, (PyObject *)it->ao);
+ if (r==NULL) return NULL;
+ if (PyArray_CopyInto((PyArrayObject *)r, it->ao) < 0) {
+ Py_DECREF(r);
+ return NULL;
+ }
+ PyArray_FLAGS(r) |= UPDATEIFCOPY;
+ it->ao->flags &= ~WRITEABLE;
+ }
+ Py_INCREF(it->ao);
+ PyArray_BASE(r) = (PyObject *)it->ao;
+ return r;
+
+}
+
+static char doc_iter_copy[] = "copy()\n Get a copy of 1-d array";
+
+static PyObject *
+iter_copy(PyArrayIterObject *it, PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+ return PyArray_Flatten(it->ao, 0);
+}
+
+static PyMethodDef iter_methods[] = {
+ /* to get array */
+ {"__array__", (PyCFunction)iter_array, 1, doc_iter_array},
+ {"copy", (PyCFunction)iter_copy, 1, doc_iter_copy},
+ {NULL, NULL} /* sentinel */
+};
+
+static PyMemberDef iter_members[] = {
+ {"base", T_OBJECT, offsetof(PyArrayIterObject, ao), RO, NULL},
+ {NULL},
+};
+
+static PyTypeObject PyArrayIter_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+ "scipy.flatiter", /* tp_name */
+ sizeof(PyArrayIterObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)arrayiter_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ &iter_as_mapping, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ (iternextfunc)arrayiter_next, /* tp_iternext */
+ iter_methods, /* tp_methods */
+ iter_members, /* tp_members */
+ 0, /* tp_getset */
+
+};
+
+/** END of Array Iterator **/
+
+
+
+/*********************** Subscript Array Iterator *************************
+ * *
+ * This object handles subscript behavior for array objects. *
+ * It is an iterator object with a next method *
+ * It abstracts the n-dimensional mapping behavior to make the looping *
+ * code more understandable (maybe) *
+ * and so that indexing can be set up ahead of time *
+ */
+
+/* This checks the args for any fancy indexing objects */
+
+#define SOBJ_NOTFANCY 0
+#define SOBJ_ISFANCY 1
+#define SOBJ_BADARRAY 2
+#define SOBJ_TOOMANY 3
+#define SOBJ_LISTTUP 4
+
+static int
+fancy_indexing_check(PyObject *args)
+{
+ int i, n;
+ PyObject *obj;
+ int retval = SOBJ_NOTFANCY;
+
+ if (PyTuple_Check(args)) {
+ n = PyTuple_GET_SIZE(args);
+ if (n >= MAX_DIMS) return SOBJ_TOOMANY;
+ for (i=0; i<n; i++) {
+ obj = PyTuple_GET_ITEM(args,i);
+ if (PyArray_Check(obj)) {
+ if (PyArray_ISINTEGER(obj))
+ retval = SOBJ_ISFANCY;
+ else {
+ retval = SOBJ_BADARRAY;
+ break;
+ }
+ }
+ else if (PySequence_Check(obj)) {
+ retval = SOBJ_ISFANCY;
+ }
+ }
+ }
+ else if (PyArray_Check(args)) {
+ if ((PyArray_TYPE(args)==PyArray_BOOL) ||
+ (PyArray_ISINTEGER(args)))
+ return SOBJ_ISFANCY;
+ else
+ return SOBJ_BADARRAY;
+ }
+ else if (PySequence_Check(args)) {
+ /* Sequences < MAX_DIMS with any slice objects
+ or NewAxis, or Ellipsis is considered standard
+ as long as there are also no Arrays and or additional
+ sequences embedded.
+ */
+ retval = SOBJ_ISFANCY;
+ n = PySequence_Size(args);
+ if (n<0 || n>=MAX_DIMS) return SOBJ_ISFANCY;
+ for (i=0; i<n; i++) {
+ obj = PySequence_GetItem(args, i);
+ if (obj == NULL) return SOBJ_ISFANCY;
+ if (PyArray_Check(obj)) {
+ if (PyArray_ISINTEGER(obj))
+ retval = SOBJ_LISTTUP;
+ else
+ retval = SOBJ_BADARRAY;
+ }
+ else if (PySequence_Check(obj)) {
+ retval = SOBJ_LISTTUP;
+ }
+ else if (PySlice_Check(obj) || obj == Py_Ellipsis || \
+ obj == Py_None) {
+ retval = SOBJ_NOTFANCY;
+ }
+ Py_DECREF(obj);
+ if (retval > SOBJ_ISFANCY) return retval;
+ }
+ }
+
+ return retval;
+}
+
+/* convert an indexing object to an INTP indexing array iterator
+ if possible -- otherwise, it is a Slice or Ellipsis object
+ and has to be interpreted on bind to a particular
+ array so leave it NULL for now.
+ */
+static int
+_convert_obj(PyObject *obj, PyArrayIterObject **iter)
+{
+ PyArray_Descr *indtype;
+ PyObject *arr;
+
+ if (PySlice_Check(obj) || (obj == Py_Ellipsis))
+ *iter = NULL;
+ else {
+ indtype = PyArray_DescrFromType(PyArray_INTP);
+ arr = PyArray_FromAny(obj, indtype, 0, 0, FORCECAST);
+ if (arr == NULL) return -1;
+ *iter = (PyArrayIterObject *)PyArray_IterNew(arr);
+ Py_DECREF(arr);
+ if (*iter == NULL) return -1;
+ }
+ return 0;
+}
+
+/* Adjust dimensionality and strides for index object iterators
+ --- i.e. broadcast
+ */
+/*OBJECT_API*/
+static int
+PyArray_Broadcast(PyArrayMultiIterObject *mit)
+{
+ int i, nd, k, j;
+ intp tmp;
+ PyArrayIterObject *it;
+
+ /* Discover the broadcast number of dimensions */
+ for (i=0, nd=0; i<mit->numiter; i++)
+ nd = MAX(nd, mit->iters[i]->ao->nd);
+ mit->nd = nd;
+
+ /* Discover the broadcast shape in each dimension */
+ for (i=0; i<nd; i++) {
+ mit->dimensions[i] = 1;
+ for (j=0; j<mit->numiter; j++) {
+ it = mit->iters[j];
+ /* This prepends 1 to shapes not already
+ equal to nd */
+ k = i + it->ao->nd - nd;
+ if (k>=0) {
+ tmp = it->ao->dimensions[k];
+ if (tmp == 1) continue;
+ if (mit->dimensions[i] == 1)
+ mit->dimensions[i] = tmp;
+ else if (mit->dimensions[i] != tmp) {
+ PyErr_SetString(PyExc_ValueError,
+ "index objects are " \
+ "not broadcastable " \
+ "to a single shape");
+ return -1;
+ }
+ }
+ }
+ }
+
+ /* Reset the iterator dimensions and strides of each iterator
+ object -- using 0 valued strides for broadcasting */
+
+ tmp = PyArray_MultiplyList(mit->dimensions, mit->nd);
+ mit->size = tmp;
+ for (i=0; i<mit->numiter; i++) {
+ it = mit->iters[i];
+ it->nd_m1 = mit->nd - 1;
+ it->size = tmp;
+ nd = it->ao->nd;
+ it->factors[mit->nd-1] = 1;
+ for (j=0; j < mit->nd; j++) {
+ it->dims_m1[j] = mit->dimensions[j] - 1;
+ k = j + nd - mit->nd;
+ /* If this dimension was added or shape
+ of underlying array was 1 */
+ if ((k < 0) || \
+ it->ao->dimensions[k] != mit->dimensions[j]) {
+ it->contiguous = 0;
+ it->strides[j] = 0;
+ }
+ else {
+ it->strides[j] = it->ao->strides[k];
+ }
+ it->backstrides[j] = it->strides[j] * \
+ it->dims_m1[j];
+ if (j > 0)
+ it->factors[mit->nd-j-1] = \
+ it->factors[mit->nd-j] * \
+ mit->dimensions[mit->nd-j];
+ }
+ PyArray_ITER_RESET(it);
+ }
+ return 0;
+}
+
+/* Reset the map iterator to the beginning */
+static void
+PyArray_MapIterReset(PyArrayMapIterObject *mit)
+{
+ int i,j; intp coord[MAX_DIMS];
+ PyArrayIterObject *it;
+ PyArray_CopySwapFunc *copyswap;
+
+ mit->index = 0;
+
+ copyswap = mit->iters[0]->ao->descr->f->copyswap;
+
+ if (mit->subspace != NULL) {
+ memcpy(coord, mit->bscoord, sizeof(intp)*mit->ait->ao->nd);
+ PyArray_ITER_RESET(mit->subspace);
+ for (i=0; i<mit->numiter; i++) {
+ it = mit->iters[i];
+ PyArray_ITER_RESET(it);
+ j = mit->iteraxes[i];
+ copyswap(coord+j,it->dataptr,
+ !PyArray_ISNOTSWAPPED(it->ao),
+ sizeof(intp));
+ }
+ PyArray_ITER_GOTO(mit->ait, coord);
+ mit->subspace->dataptr = mit->ait->dataptr;
+ mit->dataptr = mit->subspace->dataptr;
+ }
+ else {
+ for (i=0; i<mit->numiter; i++) {
+ it = mit->iters[i];
+ PyArray_ITER_RESET(it);
+ copyswap(coord+i,it->dataptr,
+ !PyArray_ISNOTSWAPPED(it->ao),
+ sizeof(intp));
+ }
+ PyArray_ITER_GOTO(mit->ait, coord);
+ mit->dataptr = mit->ait->dataptr;
+ }
+ return;
+}
+
+/* This function needs to update the state of the map iterator
+ and point mit->dataptr to the memory-location of the next object
+*/
+static void
+PyArray_MapIterNext(PyArrayMapIterObject *mit)
+{
+ int i, j;
+ intp coord[MAX_DIMS];
+ PyArrayIterObject *it;
+ PyArray_CopySwapFunc *copyswap;
+
+ mit->index += 1;
+ if (mit->index >= mit->size) return;
+ copyswap = mit->iters[0]->ao->descr->f->copyswap;
+ /* Sub-space iteration */
+ if (mit->subspace != NULL) {
+ PyArray_ITER_NEXT(mit->subspace);
+ if (mit->subspace->index == mit->subspace->size) {
+ /* reset coord to coordinates of
+ beginning of the subspace */
+ memcpy(coord, mit->bscoord,
+ sizeof(intp)*mit->ait->ao->nd);
+ PyArray_ITER_RESET(mit->subspace);
+ for (i=0; i<mit->numiter; i++) {
+ it = mit->iters[i];
+ PyArray_ITER_NEXT(it);
+ j = mit->iteraxes[i];
+ copyswap(coord+j,it->dataptr,
+ !PyArray_ISNOTSWAPPED(it->ao),
+ sizeof(intp));
+ }
+ PyArray_ITER_GOTO(mit->ait, coord);
+ mit->subspace->dataptr = mit->ait->dataptr;
+ }
+ mit->dataptr = mit->subspace->dataptr;
+ }
+ else {
+ for (i=0; i<mit->numiter; i++) {
+ it = mit->iters[i];
+ PyArray_ITER_NEXT(it);
+ copyswap(coord+i,it->dataptr,
+ !PyArray_ISNOTSWAPPED(it->ao),
+ sizeof(intp));
+ }
+ PyArray_ITER_GOTO(mit->ait, coord);
+ mit->dataptr = mit->ait->dataptr;
+ }
+ return;
+}
+
+/* Bind a mapiteration to a particular array */
+
+/* Determine if subspace iteration is necessary. If so,
+ 1) Fill in mit->iteraxes
+ 2) Create subspace iterator
+ 3) Update nd, dimensions, and size.
+
+ Subspace iteration is necessary if: arr->nd > mit->numiter
+*/
+
+/* Need to check for index-errors somewhere.
+
+ Let's do it at bind time and also convert all <0 values to >0 here
+ as well.
+*/
+static void
+PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr)
+{
+ int subnd;
+ PyObject *sub, *obj=NULL;
+ int i, j, n, curraxis, ellipexp, noellip;
+ PyArrayIterObject *it;
+ intp dimsize;
+ intp *indptr;
+
+ subnd = arr->nd - mit->numiter;
+ if (subnd < 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "too many indices for array");
+ return;
+ }
+
+ mit->ait = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arr);
+ if (mit->ait == NULL) return;
+
+ /* If this is just a view, then do nothing more */
+ /* views are handled by just adjusting the strides
+ and dimensions of the object.
+ */
+
+ if (mit->view) return;
+
+ /* no subspace iteration needed. Finish up and Return */
+ if (subnd == 0) {
+ n = arr->nd;
+ for (i=0; i<n; i++) {
+ mit->iteraxes[i] = i;
+ }
+ goto finish;
+ }
+
+ /* all indexing arrays have been converted to 0
+ therefore we can extract the subspace with a simple
+ getitem call which will use view semantics
+ */
+
+ sub = PyObject_GetItem((PyObject *)arr, mit->indexobj);
+ if (sub == NULL) goto fail;
+ mit->subspace = (PyArrayIterObject *)PyArray_IterNew(sub);
+ Py_DECREF(sub);
+ if (mit->subspace == NULL) goto fail;
+
+ /* Expand dimensions of result */
+ n = mit->subspace->ao->nd;
+ for (i=0; i<n; i++)
+ mit->dimensions[mit->nd+i] = mit->subspace->ao->dimensions[i];
+ mit->nd += n;
+
+ /* Now, we still need to interpret the ellipsis and slice objects
+ to determine which axes the indexing arrays are referring to
+ */
+ n = PyTuple_GET_SIZE(mit->indexobj);
+
+ /* The number of dimensions an ellipsis takes up */
+ ellipexp = arr->nd - n + 1;
+ /* Now fill in iteraxes -- remember indexing arrays have been
+ converted to 0's in mit->indexobj */
+ curraxis = 0;
+ j = 0;
+ noellip = 1; /* Only expand the first ellipsis */
+ memset(mit->bscoord, 0, sizeof(intp)*arr->nd);
+ for (i=0; i<n; i++) {
+ /* We need to fill in the starting coordinates for
+ the subspace */
+ obj = PyTuple_GET_ITEM(mit->indexobj, i);
+ if (PyInt_Check(obj) || PyLong_Check(obj))
+ mit->iteraxes[j++] = curraxis++;
+ else if (noellip && obj == Py_Ellipsis) {
+ curraxis += ellipexp;
+ noellip = 0;
+ }
+ else {
+ intp start=0;
+ intp stop, step;
+ /* Should be slice object or
+ another Ellipsis */
+ if (obj == Py_Ellipsis) {
+ mit->bscoord[curraxis] = 0;
+ }
+ else if (!PySlice_Check(obj) || \
+ (slice_GetIndices((PySliceObject *)obj,
+ arr->dimensions[curraxis],
+ &start, &stop, &step,
+ &dimsize) < 0)) {
+ PyErr_Format(PyExc_ValueError,
+ "unexpected object " \
+ "(%s) in selection position %d",
+ obj->ob_type->tp_name, i);
+ goto fail;
+ }
+ else {
+ mit->bscoord[curraxis] = start;
+ }
+ curraxis += 1;
+ }
+ }
+ finish:
+ /* Here check the indexes (now that we have iteraxes) */
+ mit->size = PyArray_MultiplyList(mit->dimensions, mit->nd);
+ for (i=0; i<mit->numiter; i++) {
+ it = mit->iters[i];
+ PyArray_ITER_RESET(it);
+ dimsize = arr->dimensions[mit->iteraxes[i]];
+ while(it->index < it->size) {
+ indptr = ((intp *)it->dataptr);
+ if (*indptr < 0) *indptr += dimsize;
+ if (*indptr < 0 || *indptr >= dimsize) {
+ PyErr_Format(PyExc_IndexError,
+ "index (%d) out of range "\
+ "(0<=index<=%d) in dimension %d",
+ (int) *indptr, (int) (dimsize-1),
+ mit->iteraxes[i]);
+ goto fail;
+ }
+ PyArray_ITER_NEXT(it);
+ }
+ PyArray_ITER_RESET(it);
+ }
+ return;
+
+ fail:
+ Py_XDECREF(mit->subspace);
+ Py_XDECREF(mit->ait);
+ mit->subspace = NULL;
+ mit->ait = NULL;
+ return;
+}
+
+/* This function takes a Boolean array and constructs index objects and
+ iterators as if nonzero(Bool) had been called
+*/
+static int
+_nonzero_indices(PyObject *myBool, PyArrayIterObject **iters)
+{
+ PyArray_Descr *typecode;
+ PyArrayObject *ba =NULL, *new=NULL;
+ int nd, j;
+ intp size, i, count;
+ Bool *ptr;
+ intp coords[MAX_DIMS], dims_m1[MAX_DIMS];
+ intp *dptr[MAX_DIMS];
+
+ typecode=PyArray_DescrFromType(PyArray_BOOL);
+ ba = (PyArrayObject *)PyArray_FromAny(myBool, typecode, 0, 0,
+ CARRAY_FLAGS);
+ if (ba == NULL) return -1;
+ nd = ba->nd;
+ for (j=0; j<nd; j++) iters[j] = NULL;
+ size = PyArray_SIZE(ba);
+ ptr = (Bool *)ba->data;
+ count = 0;
+
+ /* pre-determine how many nonzero entries there are */
+ for (i=0; i<size; i++)
+ if (*(ptr++)) count++;
+
+ /* create count-sized index arrays for each dimension */
+ for (j=0; j<nd; j++) {
+ new = (PyArrayObject *)PyArray_New(&PyArray_Type, 1, &count,
+ PyArray_INTP, NULL, NULL,
+ 0, 0, NULL);
+ if (new == NULL) goto fail;
+ iters[j] = (PyArrayIterObject *) \
+ PyArray_IterNew((PyObject *)new);
+ Py_DECREF(new);
+ if (iters[j] == NULL) goto fail;
+ dptr[j] = (intp *)iters[j]->ao->data;
+ coords[j] = 0;
+ dims_m1[j] = ba->dimensions[j]-1;
+ }
+
+ ptr = (Bool *)ba->data;
+
+ if (count == 0) goto finish;
+
+ /* Loop through the Boolean array and copy coordinates
+ for non-zero entries */
+ for (i=0; i<size; i++) {
+ if (*(ptr++)) {
+ for (j=0; j<nd; j++)
+ *(dptr[j]++) = coords[j];
+ }
+ /* Borrowed from ITER_NEXT macro */
+ for (j=nd-1; j>=0; j--) {
+ if (coords[j] < dims_m1[j]) {
+ coords[j]++;
+ break;
+ }
+ else {
+ coords[j] = 0;
+ }
+ }
+ }
+
+ finish:
+ Py_DECREF(ba);
+ return nd;
+
+ fail:
+ for (j=0; j<nd; j++) {
+ Py_XDECREF(iters[j]);
+ }
+ Py_XDECREF(ba);
+ return -1;
+
+}
+
+static PyObject *
+PyArray_MapIterNew(PyObject *indexobj, int oned)
+{
+ PyArrayMapIterObject *mit;
+ int fancy=0;
+ PyArray_Descr *indtype;
+ PyObject *arr = NULL;
+ int i, n, started, nonindex;
+
+
+ mit = (PyArrayMapIterObject *)_pya_malloc(sizeof(PyArrayMapIterObject));
+ PyObject_Init((PyObject *)mit, &PyArrayMapIter_Type);
+ if (mit == NULL)
+ return NULL;
+ for (i=0; i<MAX_DIMS; i++)
+ mit->iters[i] = NULL;
+ mit->view = 0;
+ mit->index = 0;
+ mit->ait = NULL;
+ mit->subspace = NULL;
+ mit->numiter = 0;
+ mit->consec = 1;
+ fancy = fancy_indexing_check(indexobj);
+ Py_INCREF(indexobj);
+ mit->indexobj = indexobj;
+ if (fancy == SOBJ_NOTFANCY) { /* bail out */
+ mit->view = 1;
+ goto ret;
+ }
+
+ if (fancy == SOBJ_BADARRAY) {
+ PyErr_SetString(PyExc_IndexError, \
+ "arrays used as indices must be of " \
+ "integer type");
+ goto fail;
+ }
+ if (fancy == SOBJ_TOOMANY) {
+ PyErr_SetString(PyExc_IndexError, "too many indices");
+ goto fail;
+ }
+
+ if (fancy == SOBJ_LISTTUP) {
+ PyObject *newobj;
+ newobj = PySequence_Tuple(indexobj);
+ if (newobj == NULL) goto fail;
+ Py_DECREF(indexobj);
+ indexobj = newobj;
+ mit->indexobj = indexobj;
+ }
+
+#undef SOBJ_NOTFANCY
+#undef SOBJ_ISFANCY
+#undef SOBJ_BADARRAY
+#undef SOBJ_TOOMANY
+#undef SOBJ_LISTTUP
+
+ if (oned) return (PyObject *)mit;
+
+ /* Must have some kind of fancy indexing if we are here */
+ /* indexobj is either a list, an arrayobject, or a tuple
+ (with at least 1 list or arrayobject or Bool object), */
+
+ /* convert all inputs to iterators */
+ if (PyArray_Check(indexobj) && \
+ (PyArray_TYPE(indexobj) == PyArray_BOOL)) {
+ mit->numiter = _nonzero_indices(indexobj, mit->iters);
+ if (mit->numiter < 0) goto fail;
+ mit->nd = 1;
+ mit->dimensions[0] = mit->iters[0]->dims_m1[0]+1;
+ Py_DECREF(mit->indexobj);
+ mit->indexobj = PyTuple_New(mit->numiter);
+ if (mit->indexobj == NULL) goto fail;
+ for (i=0; i<mit->numiter; i++) {
+ PyTuple_SET_ITEM(mit->indexobj, i,
+ PyInt_FromLong(0));
+ }
+ }
+
+ else if (PyArray_Check(indexobj) || !PyTuple_Check(indexobj)) {
+ mit->numiter = 1;
+ indtype = PyArray_DescrFromType(PyArray_INTP);
+ arr = PyArray_FromAny(indexobj, indtype, 0, 0, FORCECAST);
+ if (arr == NULL) goto fail;
+ mit->iters[0] = (PyArrayIterObject *)PyArray_IterNew(arr);
+ if (mit->iters[0] == NULL) {Py_DECREF(arr); goto fail;}
+ mit->nd = PyArray_NDIM(arr);
+ memcpy(mit->dimensions,PyArray_DIMS(arr),mit->nd*sizeof(intp));
+ mit->size = PyArray_SIZE(arr);
+ Py_DECREF(arr);
+ Py_DECREF(mit->indexobj);
+ mit->indexobj = Py_BuildValue("(N)", PyInt_FromLong(0));
+ }
+ else { /* must be a tuple */
+ PyObject *obj;
+ PyArrayIterObject *iter;
+ PyObject *new;
+ /* Make a copy of the tuple -- we will be replacing
+ index objects with 0's */
+ n = PyTuple_GET_SIZE(indexobj);
+ new = PyTuple_New(n);
+ if (new == NULL) goto fail;
+ started = 0;
+ nonindex = 0;
+ for (i=0; i<n; i++) {
+ obj = PyTuple_GET_ITEM(indexobj,i);
+ if (_convert_obj(obj, &iter) < 0) {
+ Py_DECREF(new);
+ goto fail;
+ }
+ if (iter!= NULL) {
+ started = 1;
+ if (nonindex) mit->consec = 0;
+ mit->iters[(mit->numiter)++] = iter;
+ PyTuple_SET_ITEM(new,i,
+ PyInt_FromLong(0));
+ }
+ else {
+ if (started) nonindex = 1;
+ Py_INCREF(obj);
+ PyTuple_SET_ITEM(new,i,obj);
+ }
+ }
+ Py_DECREF(mit->indexobj);
+ mit->indexobj = new;
+ /* Store the number of iterators actually converted */
+ /* These will be mapped to actual axes at bind time */
+ if (PyArray_Broadcast((PyArrayMultiIterObject *)mit) < 0)
+ goto fail;
+ }
+
+ ret:
+ return (PyObject *)mit;
+
+ fail:
+ Py_DECREF(mit);
+ return NULL;
+}
+
+
+static void
+arraymapiter_dealloc(PyArrayMapIterObject *mit)
+{
+ int i;
+ Py_XDECREF(mit->indexobj);
+ Py_XDECREF(mit->ait);
+ Py_XDECREF(mit->subspace);
+ for (i=0; i<mit->numiter; i++)
+ Py_XDECREF(mit->iters[i]);
+ _pya_free(mit);
+}
+
+/* The mapiter object must be created new each time. It does not work
+ to bind to a new array, and continue.
+
+ This was the orginal intention, but currently that does not work.
+ Do not expose the MapIter_Type to Python.
+
+ It's not very useful anyway, since mapiter(indexobj); mapiter.bind(a);
+ mapiter is equivalent to a[indexobj].flat but the latter gets to use
+ slice syntax.
+*/
+
+static PyTypeObject PyArrayMapIter_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+ "scipy.mapiter", /* tp_name */
+ sizeof(PyArrayIterObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)arraymapiter_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ 0, /* tp_doc */
+ (traverseproc)0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ (iternextfunc)0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0 /* tp_weaklist */
+
+};
+
+/** END of Subscript Iterator **/
+
+
+/*OBJECT_API
+ Get MultiIterator,
+*/
+static PyObject *
+PyArray_MultiIterNew(int n, ...)
+{
+ va_list va;
+ PyArrayMultiIterObject *multi;
+ PyObject *current;
+ PyObject *arr;
+
+ int i, err=0;
+
+ if (n < 2 || n > MAX_DIMS) {
+ PyErr_Format(PyExc_ValueError,
+ "Need between 2 and (%d) " \
+ "array objects (inclusive).", MAX_DIMS);
+ }
+
+ /* fprintf(stderr, "multi new...");*/
+ multi = PyObject_New(PyArrayMultiIterObject, &PyArrayMultiIter_Type);
+ if (multi == NULL)
+ return NULL;
+
+ for (i=0; i<n; i++) multi->iters[i] = NULL;
+ multi->numiter = n;
+ multi->index = 0;
+
+ va_start(va, n);
+ for (i=0; i<n; i++) {
+ current = va_arg(va, PyObject *);
+ arr = PyArray_FROM_O(current);
+ if (arr==NULL) {
+ err=1; break;
+ }
+ else {
+ multi->iters[i] = (PyArrayIterObject *)PyArray_IterNew(arr);
+ Py_DECREF(arr);
+ }
+ }
+
+ va_end(va);
+
+ if (!err && PyArray_Broadcast(multi) < 0) err=1;
+
+ if (err) {
+ Py_DECREF(multi);
+ return NULL;
+ }
+
+ PyArray_MultiIter_RESET(multi);
+
+ return (PyObject *)multi;
+}
+
+static PyObject *
+arraymultiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
+{
+
+ int n, i;
+ PyArrayMultiIterObject *multi;
+ PyObject *arr;
+
+ if (kwds != NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "keyword arguments not accepted.");
+ return NULL;
+ }
+
+ n = PyTuple_Size(args);
+ if (n < 2 || n > MAX_DIMS) {
+ if (PyErr_Occurred()) return NULL;
+ PyErr_Format(PyExc_ValueError,
+ "Need at least two and fewer than (%d) " \
+ "array objects.", MAX_DIMS);
+ return NULL;
+ }
+
+ multi = _pya_malloc(sizeof(PyArrayMultiIterObject));
+ if (multi == NULL) return PyErr_NoMemory();
+ PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type);
+
+ multi->numiter = n;
+ multi->index = 0;
+ for (i=0; i<n; i++) multi->iters[i] = NULL;
+ for (i=0; i<n; i++) {
+ arr = PyArray_FromAny(PyTuple_GET_ITEM(args, i), NULL, 0, 0, 0);
+ if (arr == NULL) goto fail;
+ if ((multi->iters[i] = \
+ (PyArrayIterObject *)PyArray_IterNew(arr))==NULL)
+ goto fail;
+ Py_DECREF(arr);
+ }
+ if (PyArray_Broadcast(multi) < 0) goto fail;
+ PyArray_MultiIter_RESET(multi);
+
+ return (PyObject *)multi;
+
+ fail:
+ Py_DECREF(multi);
+ return NULL;
+}
+
+static PyObject *
+arraymultiter_next(PyArrayMultiIterObject *multi)
+{
+ PyObject *ret;
+ int i, n;
+
+ n = multi->numiter;
+ ret = PyTuple_New(n);
+ if (ret == NULL) return NULL;
+ if (multi->index < multi->size) {
+ for (i=0; i < n; i++) {
+ PyArrayIterObject *it=multi->iters[i];
+ PyTuple_SET_ITEM(ret, i,
+ PyArray_ToScalar(it->dataptr, it->ao));
+ PyArray_ITER_NEXT(it);
+ }
+ multi->index++;
+ return ret;
+ }
+ return NULL;
+}
+
+static void
+arraymultiter_dealloc(PyArrayMultiIterObject *multi)
+{
+ int i;
+
+ for (i=0; i<multi->numiter; i++)
+ Py_XDECREF(multi->iters[i]);
+ _pya_free(multi);
+}
+
+static PyObject *
+arraymultiter_size_get(PyArrayMultiIterObject *self)
+{
+#if SIZEOF_INTP <= SIZEOF_LONG
+ return PyInt_FromLong((long) self->size);
+#else
+ if (self->size < MAX_LONG)
+ return PyInt_FromLong((long) self->size);
+ else
+ return PyLong_FromLongLong((longlong) self->size);
+#endif
+}
+
+static PyObject *
+arraymultiter_index_get(PyArrayMultiIterObject *self)
+{
+#if SIZEOF_INTP <= SIZEOF_LONG
+ return PyInt_FromLong((long) self->index);
+#else
+ if (self->size < MAX_LONG)
+ return PyInt_FromLong((long) self->index);
+ else
+ return PyLong_FromLongLong((longlong) self->index);
+#endif
+}
+
+static PyObject *
+arraymultiter_shape_get(PyArrayMultiIterObject *self)
+{
+ return PyArray_IntTupleFromIntp(self->nd, self->dimensions);
+}
+
+static PyObject *
+arraymultiter_iters_get(PyArrayMultiIterObject *self)
+{
+ PyObject *res;
+ int i, n;
+ n = self->numiter;
+ res = PyTuple_New(n);
+ if (res == NULL) return res;
+ for (i=0; i<n; i++) {
+ Py_INCREF(self->iters[i]);
+ PyTuple_SET_ITEM(res, i, (PyObject *)self->iters[i]);
+ }
+ return res;
+}
+
+static PyGetSetDef arraymultiter_getsetlist[] = {
+ {"size",
+ (getter)arraymultiter_size_get,
+ NULL,
+ "total size of broadcasted result"},
+ {"index",
+ (getter)arraymultiter_index_get,
+ NULL,
+ "current index in broadcasted result"},
+ {"shape",
+ (getter)arraymultiter_shape_get,
+ NULL,
+ "shape of broadcasted result"},
+ {"iters",
+ (getter)arraymultiter_iters_get,
+ NULL,
+ "tuple of individual iterators"},
+ {NULL, NULL, NULL, NULL},
+};
+
+static PyMemberDef arraymultiter_members[] = {
+ {"numiter", T_INT, offsetof(PyArrayMultiIterObject, numiter),
+ RO, NULL},
+ {"nd", T_INT, offsetof(PyArrayMultiIterObject, nd), RO, NULL},
+ {NULL},
+};
+
+static PyObject *
+arraymultiter_reset(PyArrayMultiIterObject *self, PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+
+ PyArray_MultiIter_RESET(self);
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyMethodDef arraymultiter_methods[] = {
+ {"reset", (PyCFunction) arraymultiter_reset, METH_VARARGS, NULL},
+ {NULL, NULL},
+};
+
+static PyTypeObject PyArrayMultiIter_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+ "scipy.broadcast", /* tp_name */
+ sizeof(PyArrayMultiIterObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)arraymultiter_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ (iternextfunc)arraymultiter_next, /* tp_iternext */
+ arraymultiter_methods, /* tp_methods */
+ arraymultiter_members, /* tp_members */
+ arraymultiter_getsetlist, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)0, /* tp_init */
+ 0, /* tp_alloc */
+ arraymultiter_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0 /* tp_weaklist */
+};
+
+/*OBJECT_API*/
+static PyArray_Descr *
+PyArray_DescrNewFromType(int type_num)
+{
+ PyArray_Descr *old;
+ PyArray_Descr *new;
+
+ old = PyArray_DescrFromType(type_num);
+ new = PyArray_DescrNew(old);
+ Py_DECREF(old);
+ return new;
+}
+
+/*** Array Descr Objects for dynamic types **/
+
+/** There are some statically-defined PyArray_Descr objects corresponding
+ to the basic built-in types.
+ These can and should be DECREF'd and INCREF'd as appropriate, anyway.
+ If a mistake is made in reference counting, deallocation on these
+ builtins will be attempted leading to problems.
+
+ This let's us deal with all PyArray_Descr objects using reference
+ counting (regardless of whether they are statically or dynamically
+ allocated).
+**/
+
+/* base cannot be NULL */
+/*OBJECT_API*/
+static PyArray_Descr *
+PyArray_DescrNew(PyArray_Descr *base)
+{
+ PyArray_Descr *new;
+
+ new = PyObject_New(PyArray_Descr, &PyArrayDescr_Type);
+ if (new == NULL) return NULL;
+ /* Don't copy PyObject_HEAD part */
+ memcpy((char *)new+sizeof(PyObject),
+ (char *)base+sizeof(PyObject),
+ sizeof(PyArray_Descr)-sizeof(PyObject));
+
+ if (new->fields == Py_None) new->fields = NULL;
+ Py_XINCREF(new->fields);
+ if (new->subarray) {
+ new->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr));
+ memcpy(new->subarray, base->subarray,
+ sizeof(PyArray_ArrayDescr));
+ Py_INCREF(new->subarray->shape);
+ Py_INCREF(new->subarray->base);
+ }
+ Py_INCREF(new->typeobj);
+ return new;
+}
+
+/* should never be called for builtin-types unless
+ there is a reference-count problem
+*/
+static void
+arraydescr_dealloc(PyArray_Descr *self)
+{
+ Py_XDECREF(self->typeobj);
+ Py_XDECREF(self->fields);
+ if (self->subarray) {
+ Py_DECREF(self->subarray->shape);
+ Py_DECREF(self->subarray->base);
+ _pya_free(self->subarray);
+ }
+ self->ob_type->tp_free(self);
+}
+
+/* we need to be careful about setting attributes because these
+ objects are pointed to by arrays that depend on them for interpreting
+ data. Currently no attributes of dtypedescr objects can be set.
+*/
+static PyMemberDef arraydescr_members[] = {
+ {"dtype", T_OBJECT, offsetof(PyArray_Descr, typeobj), RO, NULL},
+ {"kind", T_CHAR, offsetof(PyArray_Descr, kind), RO, NULL},
+ {"char", T_CHAR, offsetof(PyArray_Descr, type), RO, NULL},
+ {"num", T_INT, offsetof(PyArray_Descr, type_num), RO, NULL},
+ {"byteorder", T_CHAR, offsetof(PyArray_Descr, byteorder), RO, NULL},
+ {"itemsize", T_INT, offsetof(PyArray_Descr, elsize), RO, NULL},
+ {"alignment", T_INT, offsetof(PyArray_Descr, alignment), RO, NULL},
+ {NULL},
+};
+
+static PyObject *
+arraydescr_subdescr_get(PyArray_Descr *self)
+{
+ if (self->subarray == NULL) {
+ Py_INCREF(Py_None);
+ return Py_None;
+ }
+ return Py_BuildValue("OO", (PyObject *)self->subarray->base,
+ self->subarray->shape);
+}
+
+static PyObject *
+arraydescr_protocol_typestr_get(PyArray_Descr *self)
+{
+ char basic_=self->kind;
+ char endian = self->byteorder;
+
+ if (endian == '=') {
+ endian = '<';
+ if (!PyArray_IsNativeByteOrder(endian)) endian = '>';
+ }
+
+ return PyString_FromFormat("%c%c%d", endian, basic_,
+ self->elsize);
+}
+
+static PyObject *
+arraydescr_protocol_descr_get(PyArray_Descr *self)
+{
+ PyObject *dobj, *res;
+
+ if (self->fields == NULL || self->fields == Py_None) {
+ /* get default */
+ dobj = PyTuple_New(2);
+ if (dobj == NULL) return NULL;
+ PyTuple_SET_ITEM(dobj, 0, PyString_FromString(""));
+ PyTuple_SET_ITEM(dobj, 1, \
+ arraydescr_protocol_typestr_get(self));
+ res = PyList_New(1);
+ if (res == NULL) {Py_DECREF(dobj); return NULL;}
+ PyList_SET_ITEM(res, 0, dobj);
+ return res;
+ }
+
+ return PyObject_CallMethod(_scipy_internal, "_array_descr",
+ "O", self);
+}
+
+/* returns 1 for a builtin type
+ and 2 for a user-defined data-type descriptor
+ return 0 if neither (i.e. it's a copy of one)
+*/
+static PyObject *
+arraydescr_isbuiltin_get(PyArray_Descr *self)
+{
+ long val;
+ val = 0;
+ if (self->fields == Py_None) val = 1;
+ if (PyTypeNum_ISUSERDEF(self->type_num)) val = 2;
+ return PyInt_FromLong(val);
+}
+
+static PyObject *
+arraydescr_isnative_get(PyArray_Descr *self)
+{
+ PyObject *ret;
+
+ ret = (PyArray_ISNBO(self->byteorder) ? Py_True : Py_False);
+ Py_INCREF(ret);
+ return ret;
+}
+
+static PyObject *
+arraydescr_fields_get(PyArray_Descr *self)
+{
+ if (self->fields == NULL || self->fields == Py_None) {
+ Py_INCREF(Py_None);
+ return Py_None;
+ }
+ return PyDictProxy_New(self->fields);
+}
+
+static PyGetSetDef arraydescr_getsets[] = {
+ {"subdescr",
+ (getter)arraydescr_subdescr_get,
+ NULL,
+ "A tuple of (descr, shape) or None."},
+ {"arrdescr",
+ (getter)arraydescr_protocol_descr_get,
+ NULL,
+ "The array_protocol type descriptor."},
+ {"dtypestr",
+ (getter)arraydescr_protocol_typestr_get,
+ NULL,
+ "The array_protocol typestring."},
+ {"isbuiltin",
+ (getter)arraydescr_isbuiltin_get,
+ NULL,
+ "Is this a buillt-in data-type descriptor?"},
+ {"isnative",
+ (getter)arraydescr_isnative_get,
+ NULL,
+ "Is the byte-order of this descriptor native?"},
+ {"fields",
+ (getter)arraydescr_fields_get,
+ NULL,
+ NULL},
+ {NULL, NULL, NULL, NULL},
+};
+
+static PyArray_Descr *_convert_from_list(PyObject *obj, int align, int try_descr);
+static PyArray_Descr *_convert_from_dict(PyObject *obj, int align);
+static PyArray_Descr *_convert_from_commastring(PyObject *obj, int align);
+static PyArray_Descr *_convert_from_array_descr(PyObject *obj);
+
+static PyObject *
+arraydescr_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
+{
+ PyObject *odescr;
+ PyArray_Descr *descr, *conv;
+ int align=0;
+ Bool copy=FALSE;
+
+ if (!PyArg_ParseTuple(args, "O|iO&", &odescr, &align,
+ PyArray_BoolConverter, &copy))
+ return NULL;
+
+ if (align) {
+ conv = NULL;
+ if PyDict_Check(odescr)
+ conv = _convert_from_dict(odescr, 1);
+ else if PyList_Check(odescr)
+ conv = _convert_from_list(odescr, 1, 0);
+ else if PyString_Check(odescr)
+ conv = _convert_from_commastring(odescr,
+ 1);
+ else {
+ PyErr_SetString(PyExc_ValueError,
+ "align can only be non-zero for" \
+ "dictionary, list, and string objects.");
+ }
+ if (conv) return (PyObject *)conv;
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ValueError,
+ "data-type-descriptor not understood");
+ }
+ return NULL;
+ }
+
+ if PyList_Check(odescr) {
+ conv = _convert_from_array_descr(odescr);
+ if (!conv) {
+ PyErr_Clear();
+ conv = _convert_from_list(odescr, 0, 0);
+ }
+ return (PyObject *)conv;
+ }
+
+ if (!PyArray_DescrConverter(odescr, &conv))
+ return NULL;
+ /* Get a new copy of it unless it's already a copy */
+ if (copy && conv->fields == Py_None) {
+ descr = PyArray_DescrNew(conv);
+ Py_DECREF(conv);
+ conv = descr;
+ }
+ return (PyObject *)conv;
+}
+
+static char doc_arraydescr_reduce[] = "self.__reduce__() for pickling.";
+
+/* return a tuple of (callable object, args, state) */
+static PyObject *
+arraydescr_reduce(PyArray_Descr *self, PyObject *args)
+{
+ PyObject *ret, *mod, *obj;
+ PyObject *state;
+ char endian;
+ int elsize, alignment;
+
+ ret = PyTuple_New(3);
+ if (ret == NULL) return NULL;
+ mod = PyImport_ImportModule("scipy.base.multiarray");
+ if (mod == NULL) {Py_DECREF(ret); return NULL;}
+ obj = PyObject_GetAttrString(mod, "dtypedescr");
+ Py_DECREF(mod);
+ if (obj == NULL) {Py_DECREF(ret); return NULL;}
+ PyTuple_SET_ITEM(ret, 0, obj);
+ if (PyTypeNum_ISUSERDEF(self->type_num) || \
+ ((self->type_num == PyArray_VOID && \
+ self->typeobj != &PyVoidArrType_Type))) {
+ obj = (PyObject *)self->typeobj;
+ Py_INCREF(obj);
+ }
+ else {
+ obj = PyString_FromFormat("%c%d",self->kind, self->elsize);
+ }
+ PyTuple_SET_ITEM(ret, 1, Py_BuildValue("(Nii)", obj, 0, 1));
+
+ /* Now return the state which is at least
+ byteorder, subarray, and fields */
+ endian = self->byteorder;
+ if (endian == '=') {
+ endian = '<';
+ if (!PyArray_IsNativeByteOrder(endian)) endian = '>';
+ }
+ state = PyTuple_New(5);
+ PyTuple_SET_ITEM(state, 0, PyString_FromFormat("%c", endian));
+ PyTuple_SET_ITEM(state, 1, arraydescr_subdescr_get(self));
+ if (self->fields && self->fields != Py_None) {
+ Py_INCREF(self->fields);
+ PyTuple_SET_ITEM(state, 2, self->fields);
+ }
+ else {
+ PyTuple_SET_ITEM(state, 2, Py_None);
+ Py_INCREF(Py_None);
+ }
+
+ /* for extended types it also includes elsize and alignment */
+ if (PyTypeNum_ISEXTENDED(self->type_num)) {
+ elsize = self->elsize;
+ alignment = self->alignment;
+ }
+ else {elsize = -1; alignment = -1;}
+
+ PyTuple_SET_ITEM(state, 3, PyInt_FromLong(elsize));
+ PyTuple_SET_ITEM(state, 4, PyInt_FromLong(alignment));
+
+ PyTuple_SET_ITEM(ret, 2, state);
+ return ret;
+}
+
+/* state is at least byteorder, subarray, and fields but could include elsize
+ and alignment for EXTENDED arrays
+*/
+static char doc_arraydescr_setstate[] = "self.__setstate__() for pickling.";
+
+static PyObject *
+arraydescr_setstate(PyArray_Descr *self, PyObject *args)
+{
+ int elsize = -1, alignment = -1;
+ char endian;
+ PyObject *subarray, *fields;
+
+ if (self->fields == Py_None) {Py_INCREF(Py_None); return Py_None;}
+
+ if (!PyArg_ParseTuple(args, "(cOOii)", &endian, &subarray, &fields,
+ &elsize, &alignment)) return NULL;
+
+ if (PyArray_IsNativeByteOrder(endian)) endian = '=';
+
+ self->byteorder = endian;
+ if (self->subarray) {
+ Py_XDECREF(self->subarray->base);
+ Py_XDECREF(self->subarray->shape);
+ _pya_free(self->subarray);
+ }
+ self->subarray = NULL;
+
+ if (subarray != Py_None) {
+ self->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr));
+ self->subarray->base = (PyArray_Descr *)PyTuple_GET_ITEM(subarray, 0);
+ Py_INCREF(self->subarray->base);
+ self->subarray->shape = PyTuple_GET_ITEM(subarray, 1);
+ Py_INCREF(self->subarray->shape);
+ }
+
+ if (fields != Py_None) {
+ Py_XDECREF(self->fields);
+ self->fields = fields;
+ Py_INCREF(fields);
+ }
+
+ if (PyTypeNum_ISEXTENDED(self->type_num)) {
+ self->elsize = elsize;
+ self->alignment = alignment;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+
+/* returns a copy of the PyArray_Descr structure with the byteorder
+ altered:
+ no arguments: The byteorder is swapped (in all subfields as well)
+ single argument: The byteorder is forced to the given state
+ (in all subfields as well)
+
+ Valid states: ('big', '>') or ('little' or '<')
+ ('native', or '=')
+
+ If a descr structure with | is encountered it's own
+ byte-order is not changed but any fields are:
+*/
+
+/*OBJECT_API
+ Deep bytorder change of a data-type descriptor
+*/
+static PyArray_Descr *
+PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian)
+{
+ PyArray_Descr *new;
+ char endian;
+
+ new = PyArray_DescrNew(self);
+ endian = new->byteorder;
+ if (endian != PyArray_IGNORE) {
+ if (newendian == PyArray_SWAP) { /* swap byteorder */
+ if PyArray_ISNBO(endian) endian = PyArray_OPPBYTE;
+ else endian = PyArray_NATBYTE;
+ new->byteorder = endian;
+ }
+ else if (newendian != PyArray_IGNORE) {
+ new->byteorder = newendian;
+ }
+ }
+ if (new->fields) {
+ PyObject *newfields;
+ PyObject *key, *value;
+ PyObject *newvalue;
+ PyObject *old;
+ PyArray_Descr *newdescr;
+ int pos = 0, len, i;
+ newfields = PyDict_New();
+ /* make new dictionary with replaced */
+ /* PyArray_Descr Objects */
+ while(PyDict_Next(self->fields, &pos, &key, &value)) {
+ if (PyInt_Check(key) && \
+ PyInt_AsLong(key) == -1) {
+ PyDict_SetItem(newfields, key, value);
+ continue;
+ }
+ if (!PyString_Check(key) || \
+ !PyTuple_Check(value) || \
+ ((len=PyTuple_GET_SIZE(value)) < 2))
+ continue;
+
+ old = PyTuple_GET_ITEM(value, 0);
+ if (!PyArray_DescrCheck(old)) continue;
+ newdescr = PyArray_DescrNewByteorder \
+ ((PyArray_Descr *)old, newendian);
+ if (newdescr == NULL) {
+ Py_DECREF(newfields); Py_DECREF(new);
+ return NULL;
+ }
+ newvalue = PyTuple_New(len);
+ PyTuple_SET_ITEM(newvalue, 0, \
+ (PyObject *)newdescr);
+ for(i=1; i<len; i++) {
+ old = PyTuple_GET_ITEM(value, i);
+ Py_INCREF(old);
+ PyTuple_SET_ITEM(newvalue, i, old);
+ }
+ PyDict_SetItem(newfields, key, newvalue);
+ Py_DECREF(newvalue);
+ }
+ Py_DECREF(new->fields);
+ new->fields = newfields;
+ }
+ if (new->subarray) {
+ Py_DECREF(new->subarray->base);
+ new->subarray->base = PyArray_DescrNewByteorder \
+ (self->subarray->base, newendian);
+ }
+ return new;
+}
+
+
+static char doc_arraydescr_newbyteorder[] = "self.newbyteorder(<endian>)"
+ " returns a copy of the dtypedescr object\n"
+ " with altered byteorders. If <endian> is not given all byteorders\n"
+ " are swapped. Otherwise endian can be '>', '<', or '=' to force\n"
+ " a byteorder. Descriptors in all fields are also updated in the\n"
+ " new dtypedescr object.";
+
+static PyObject *
+arraydescr_newbyteorder(PyArray_Descr *self, PyObject *args)
+{
+ char endian=PyArray_SWAP;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter,
+ &endian)) return NULL;
+
+ return (PyObject *)PyArray_DescrNewByteorder(self, endian);
+}
+
+static PyMethodDef arraydescr_methods[] = {
+ /* for pickling */
+ {"__reduce__", (PyCFunction)arraydescr_reduce, METH_VARARGS,
+ doc_arraydescr_reduce},
+ {"__setstate__", (PyCFunction)arraydescr_setstate, METH_VARARGS,
+ doc_arraydescr_setstate},
+
+ {"newbyteorder", (PyCFunction)arraydescr_newbyteorder, METH_VARARGS,
+ doc_arraydescr_newbyteorder},
+ {NULL, NULL} /* sentinel */
+};
+
+static PyObject *
+arraydescr_str(PyArray_Descr *self)
+{
+ PyObject *sub;
+
+ if (self->fields && self->fields != Py_None) {
+ PyObject *lst;
+ lst = arraydescr_protocol_descr_get(self);
+ if (!lst) sub = PyString_FromString("<err>");
+ else sub = PyObject_Str(lst);
+ Py_XDECREF(lst);
+ if (self->type_num != PyArray_VOID) {
+ PyObject *p;
+ PyObject *t=PyString_FromString("'");
+ p = arraydescr_protocol_typestr_get(self);
+ PyString_Concat(&p, t);
+ PyString_ConcatAndDel(&t, p);
+ p = PyString_FromString("(");
+ PyString_ConcatAndDel(&p, t);
+ PyString_ConcatAndDel(&p, PyString_FromString(", "));
+ PyString_ConcatAndDel(&p, sub);
+ PyString_ConcatAndDel(&p, PyString_FromString(")"));
+ sub = p;
+ }
+ }
+ else if (self->subarray) {
+ PyObject *p;
+ PyObject *t = PyString_FromString("(");
+ p = arraydescr_str(self->subarray->base);
+ PyString_ConcatAndDel(&t, p);
+ PyString_ConcatAndDel(&t, PyString_FromString(","));
+ PyString_ConcatAndDel(&t, PyObject_Str(self->subarray->shape));
+ PyString_ConcatAndDel(&t, PyString_FromString(")"));
+ sub = t;
+ }
+ else {
+ PyObject *t=PyString_FromString("'");
+ sub = arraydescr_protocol_typestr_get(self);
+ PyString_Concat(&sub, t);
+ PyString_ConcatAndDel(&t, sub);
+ sub = t;
+ }
+ return sub;
+}
+
+static PyObject *
+arraydescr_repr(PyArray_Descr *self)
+{
+ PyObject *sub, *s;
+ s = PyString_FromString("dtypedescr(");
+ sub = arraydescr_str(self);
+ PyString_ConcatAndDel(&s, sub);
+ sub = PyString_FromString(")");
+ PyString_ConcatAndDel(&s, sub);
+ return s;
+}
+
+static int
+arraydescr_compare(PyArray_Descr *self, PyObject *other)
+{
+ if (!PyArray_DescrCheck(other)) {
+ PyErr_SetString(PyExc_TypeError,
+ "not a dtypedescr object.");
+ return -1;
+ }
+ if (PyArray_EquivTypes(self, (PyArray_Descr *)other)) return 0;
+ if (PyArray_CanCastTo(self, (PyArray_Descr *)other)) return -1;
+ return 1;
+}
+
+static PyTypeObject PyArrayDescr_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+ "scipy.dtypedescr", /* tp_name */
+ sizeof(PyArray_Descr), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)arraydescr_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ (cmpfunc)arraydescr_compare, /* tp_compare */
+ (reprfunc)arraydescr_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ (reprfunc)arraydescr_str, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ arraydescr_methods, /* tp_methods */
+ arraydescr_members, /* tp_members */
+ arraydescr_getsets, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ arraydescr_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0 /* tp_weaklist */
+};
diff --git a/numpy/base/src/arraytypes.inc.src b/numpy/base/src/arraytypes.inc.src
new file mode 100644
index 000000000..322eafef8
--- /dev/null
+++ b/numpy/base/src/arraytypes.inc.src
@@ -0,0 +1,1913 @@
+/* -*- c -*- */
+
+static ulong
+MyPyLong_AsUnsignedLong(PyObject *vv)
+{
+ if ((vv != NULL) && PyInt_Check(vv)) {
+ long val = PyInt_AsLong(vv);
+ if (val < 0) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (ulong) -1;
+ }
+ return val;
+ }
+ return PyLong_AsUnsignedLong(vv);
+}
+
+static ulonglong
+MyPyLong_AsUnsignedLongLong(PyObject *vv)
+{
+ if ((vv != NULL) && PyInt_Check(vv)) {
+ longlong val = PyInt_AsLong(vv);
+ if (val < 0) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (ulonglong) -1;
+ }
+ return val;
+ }
+ return PyLong_AsUnsignedLongLong(vv);
+}
+
+/****************** getitem and setitem **********************/
+
+/**begin repeat
+
+#TYP=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,LONG,UINT,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE#
+#func1=PyBool_FromLong, PyInt_FromLong*6, PyLong_FromUnsignedLong*2, PyLong_FromLongLong, PyLong_FromUnsignedLongLong, PyFloat_FromDouble*2#
+#func2=PyObject_IsTrue, PyInt_AsLong*6, MyPyLong_AsUnsignedLong*2, PyLong_AsLongLong, MyPyLong_AsUnsignedLongLong, PyFloat_AsDouble*2#
+#typ=Bool, byte, ubyte, short, ushort, int, long, uint, ulong, longlong, ulonglong, float, double#
+#typ1=long*7, ulong*2, longlong, ulonglong, float, double#
+#kind=Bool, Byte, UByte, Short, UShort, Int, Long, UInt, ULong, LongLong, ULongLong, Float, Double#
+*/
+
+static PyObject *
+@TYP@_getitem(char *ip, PyArrayObject *ap) {
+ @typ@ t1;
+
+ if ((ap==NULL) || PyArray_ISBEHAVED_RO(ap)) {
+ t1 = *((@typ@ *)ip);
+ return @func1@((@typ1@)t1);
+ }
+ else {
+ ap->descr->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap),
+ ap->descr->elsize);
+ return @func1@((@typ1@)t1);
+ }
+}
+
+static int
+@TYP@_setitem(PyObject *op, char *ov, PyArrayObject *ap) {
+ @typ@ temp; /* ensures alignment */
+
+ if (PyArray_IsScalar(op, @kind@)) {
+ temp = ((Py@kind@ScalarObject *)op)->obval;
+ }
+ else {
+ temp = (@typ@)@func2@(op);
+ }
+ if (PyErr_Occurred()) return -1;
+ if (ap == NULL || PyArray_ISBEHAVED(ap))
+ *((@typ@ *)ov)=temp;
+ else {
+ ap->descr->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap),
+ ap->descr->elsize);
+ }
+
+ return 0;
+}
+
+/**end repeat**/
+
+
+/**begin repeat
+
+#TYP=CFLOAT,CDOUBLE#
+#typ=float, double#
+*/
+
+static PyObject *
+@TYP@_getitem(char *ip, PyArrayObject *ap) {
+ @typ@ t1, t2;
+
+ if ((ap==NULL) || PyArray_ISBEHAVED_RO(ap)) {
+ return PyComplex_FromDoubles((double)((@typ@ *)ip)[0],
+ (double)((@typ@ *)ip)[1]);
+ }
+ else {
+ int size = sizeof(@typ@);
+ Bool swap = !PyArray_ISNOTSWAPPED(ap);
+ copy_and_swap(&t1, ip, size, 1, 0, swap);
+ copy_and_swap(&t2, ip+size, size, 1, 0, swap);
+ return PyComplex_FromDoubles((double)t1, (double)t2);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#TYP=CFLOAT, CDOUBLE, CLONGDOUBLE#
+#typ=float, double, longdouble#
+#kind=CFloat, CDouble, CLongDouble#
+*/
+static int
+@TYP@_setitem(PyObject *op, char *ov, PyArrayObject *ap)
+{
+ Py_complex oop;
+ PyObject *op2;
+ c@typ@ temp;
+ int rsize;
+
+ if (!(PyArray_IsScalar(op, @kind@))) {
+ if (PyArray_Check(op) && (PyArray_NDIM(op)==0)) {
+ op2 = ((PyArrayObject *)op)->descr->f->getitem \
+ (((PyArrayObject *)op)->data,
+ (PyArrayObject *)op);
+ }
+ else {
+ op2 = op; Py_INCREF(op);
+ }
+ oop = PyComplex_AsCComplex (op2);
+ Py_DECREF(op2);
+ if (PyErr_Occurred()) return -1;
+ temp.real = (@typ@) oop.real;
+ temp.imag = (@typ@) oop.imag;
+ }
+ else {
+ temp = ((Py@kind@ScalarObject *)op)->obval;
+ }
+
+ memcpy(ov, &temp, ap->descr->elsize);
+ if (!PyArray_ISNOTSWAPPED(ap))
+ byte_swap_vector(ov, 2, sizeof(@typ@));
+
+ rsize = sizeof(@typ@);
+ copy_and_swap(ov, &temp, rsize, 2, rsize, !PyArray_ISNOTSWAPPED(ap));
+ return 0;
+}
+/**end repeat**/
+
+static PyObject *
+LONGDOUBLE_getitem(char *ip, PyArrayObject *ap)
+{
+ return PyArray_Scalar(ip, ap->descr, NULL);
+}
+
+static int
+LONGDOUBLE_setitem(PyObject *op, char *ov, PyArrayObject *ap) {
+ longdouble temp; /* ensures alignment */
+
+ if (PyArray_IsScalar(op, LongDouble)) {
+ temp = ((PyLongDoubleScalarObject *)op)->obval;
+ }
+ else {
+ temp = (longdouble)PyFloat_AsDouble(op);
+ }
+ if (PyErr_Occurred()) return -1;
+ if (ap == NULL || PyArray_ISBEHAVED(ap))
+ *((longdouble *)ov)=temp;
+ else {
+ copy_and_swap(ov, &temp, ap->descr->elsize, 1, 0,
+ !PyArray_ISNOTSWAPPED(ap));
+ }
+
+ return 0;
+}
+
+static PyObject *
+CLONGDOUBLE_getitem(char *ip, PyArrayObject *ap)
+{
+ return PyArray_Scalar(ip, ap->descr, NULL);
+}
+
+
+
+/* UNICODE */
+static PyObject *
+UNICODE_getitem(char *ip, PyArrayObject *ap)
+{
+ PyObject *obj;
+ size_t size = sizeof(Py_UNICODE);
+
+ obj = PyUnicode_FromUnicode((const Py_UNICODE *)ip,
+ ap->descr->elsize / size);
+ if (!PyArray_ISNOTSWAPPED(ap) && (obj != NULL)) {
+ byte_swap_vector(PyUnicode_AS_UNICODE(obj),
+ ap->descr->elsize / size, size);
+ }
+ return obj;
+}
+
+static int
+UNICODE_setitem(PyObject *op, char *ov, PyArrayObject *ap)
+{
+ PyObject *temp;
+ Py_UNICODE *ptr;
+ int datalen;
+ size_t size = sizeof(Py_UNICODE);
+
+ if ((temp=PyObject_Unicode(op)) == NULL) return -1;
+
+ ptr = PyUnicode_AS_UNICODE(temp);
+ if ((ptr == NULL) || (PyErr_Occurred())) {
+ Py_DECREF(temp);
+ return -1;
+ }
+ datalen = PyUnicode_GET_DATA_SIZE(op);
+
+ memcpy(ov, ptr, MIN(ap->descr->elsize, datalen));
+ /* Fill in the rest of the space with 0 */
+ if (ap->descr->elsize > datalen) {
+ memset(ov + datalen, 0, (ap->descr->elsize - datalen));
+ }
+
+ if (!PyArray_ISNOTSWAPPED(ap))
+ byte_swap_vector(ov, ap->descr->elsize / size, size);
+
+ Py_DECREF(temp);
+ return 0;
+}
+
+/* STRING -- can handle both NULL-terminated and not NULL-terminated cases */
+static PyObject *
+STRING_getitem(char *ip, PyArrayObject *ap)
+{
+ if (ip[ap->descr->elsize-1])
+ return PyString_FromStringAndSize(ip,ap->descr->elsize);
+ else
+ return PyString_FromString(ip);
+}
+
+static int
+STRING_setitem(PyObject *op, char *ov, PyArrayObject *ap)
+{
+ char *ptr;
+ int len;
+ PyObject *temp=PyObject_Str(op);
+
+ if (temp == NULL) return -1;
+
+ if (PyString_AsStringAndSize(temp, &ptr, &len) == -1) {
+ Py_DECREF(temp);
+ return -1;
+ }
+ memcpy(ov, ptr, MIN(ap->descr->elsize,len));
+ if (ap->descr->elsize > len) {
+ memset(ov + len, 0, (ap->descr->elsize - len));
+ }
+ Py_DECREF(temp);
+ return 0;
+}
+
+/* OBJECT */
+
+static PyObject *
+OBJECT_getitem(char *ip, PyArrayObject *ap)
+{
+ Py_INCREF(*(PyObject **)ip);
+ return *(PyObject **)ip;
+}
+
+static int
+OBJECT_setitem(PyObject *op, char *ov, PyArrayObject *ap)
+{
+ Py_XDECREF(*(PyObject **)ov);
+ Py_INCREF(op);
+ *(PyObject **)ov = op;
+ return PyErr_Occurred() ? -1:0;
+}
+
+/* VOID */
+
+static PyObject *
+VOID_getitem(char *ip, PyArrayObject *ap)
+{
+ PyObject *u=NULL;
+ PyArray_Descr* descr;
+ int itemsize;
+
+ descr = ap->descr;
+ if (descr->fields && descr->fields != Py_None) {
+ PyObject *key;
+ PyObject *names;
+ int i, n;
+ PyObject *ret;
+ PyObject *tup, *title;
+ PyArray_Descr *new;
+ int offset;
+ int savedflags;
+
+ /* get the names from the fields dictionary*/
+ key = PyInt_FromLong(-1);
+ names = PyDict_GetItem(descr->fields, key);
+ Py_DECREF(key);
+ if (!names) goto finish;
+ n = PyList_GET_SIZE(names);
+ ret = PyTuple_New(n);
+ savedflags = ap->flags;
+ for (i=0; i<n; i++) {
+ key = PyList_GET_ITEM(names, i);
+ tup = PyDict_GetItem(descr->fields, key);
+ if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset,
+ &title)) {
+ Py_DECREF(ret);
+ ap->descr = descr;
+ return NULL;
+ }
+ ap->descr = new;
+ /* update alignment based on offset */
+ if ((new->alignment > 1) && \
+ ((((intp)(ip+offset)) % new->alignment) != 0))
+ ap->flags &= ~ALIGNED;
+ else
+ ap->flags |= ALIGNED;
+
+ PyTuple_SET_ITEM(ret, i, \
+ new->f->getitem(ip+offset, ap));
+ ap->flags = savedflags;
+ }
+ ap->descr = descr;
+ return ret;
+ }
+
+ if (descr->subarray) {
+ /* return an array of the basic type */
+ PyArray_Dims shape={NULL,-1};
+ PyObject *ret;
+ if (!(PyArray_IntpConverter(descr->subarray->shape,
+ &shape))) {
+ PyDimMem_FREE(shape.ptr);
+ PyErr_SetString(PyExc_ValueError,
+ "invalid shape in fixed-type tuple.");
+ return NULL;
+ }
+ ret = PyArray_NewFromDescr(&PyArray_Type,
+ descr->subarray->base,
+ shape.len, shape.ptr,
+ NULL, ip, ap->flags, NULL);
+ PyDimMem_FREE(shape.ptr);
+ if (!ret) return NULL;
+ PyArray_BASE(ret) = (PyObject *)ap;
+ Py_INCREF(ap);
+ PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL_FLAGS);
+ return ret;
+ }
+
+ finish:
+ itemsize=ap->descr->elsize;
+ if (PyArray_ISWRITEABLE(ap))
+ u = PyBuffer_FromReadWriteMemory(ip, itemsize);
+ else
+ u = PyBuffer_FromMemory(ip, itemsize);
+ if (u==NULL) goto fail;
+
+ /* default is to return buffer object pointing to current item */
+ /* a view of it */
+ return u;
+
+ fail:
+ return NULL;
+}
+
+
+
+static int PyArray_CopyObject(PyArrayObject *, PyObject *);
+
+static int
+VOID_setitem(PyObject *op, char *ip, PyArrayObject *ap)
+{
+ PyArray_Descr* descr;
+ int itemsize=ap->descr->elsize;
+ int res;
+
+ descr = ap->descr;
+ if (descr->fields && (descr->fields != Py_None) && \
+ PyTuple_Check(op)) {
+ PyObject *key;
+ PyObject *names;
+ int i, n;
+ PyObject *tup, *title;
+ PyArray_Descr *new;
+ int offset;
+ int savedflags;
+ res = -1;
+ /* get the names from the fields dictionary*/
+ key = PyInt_FromLong(-1);
+ names = PyDict_GetItem(descr->fields, key);
+ Py_DECREF(key);
+ if (!names) goto finish;
+ n = PyList_GET_SIZE(names);
+ if (PyTuple_GET_SIZE(op) != n) {
+ PyErr_SetString(PyExc_ValueError,
+ "size of tuple must match"\
+ "number of fields.");
+ return -1;
+ }
+ savedflags = ap->flags;
+ for (i=0; i<n; i++) {
+ key = PyList_GET_ITEM(names, i);
+ tup = PyDict_GetItem(descr->fields, key);
+ if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset,
+ &title)) {
+ ap->descr = descr;
+ return -1;
+ }
+ ap->descr = new;
+ /* remember to update alignment flags */
+ if ((new->alignment > 1) && \
+ ((((intp)(ip+offset)) % new->alignment) != 0))
+ ap->flags &= ~ALIGNED;
+ else
+ ap->flags |= ALIGNED;
+
+ res = new->f->setitem(PyTuple_GET_ITEM(op, i),
+ ip+offset, ap);
+ ap->flags = savedflags;
+ if (res < 0) break;
+ }
+ ap->descr = descr;
+ return res;
+ }
+
+ if (descr->subarray) {
+ /* copy into an array of the same basic type */
+ PyArray_Dims shape={NULL,-1};
+ PyObject *ret;
+ if (!(PyArray_IntpConverter(descr->subarray->shape,
+ &shape))) {
+ PyDimMem_FREE(shape.ptr);
+ PyErr_SetString(PyExc_ValueError,
+ "invalid shape in fixed-type tuple.");
+ return -1;
+ }
+ ret = PyArray_NewFromDescr(&PyArray_Type,
+ descr->subarray->base,
+ shape.len, shape.ptr,
+ NULL, ip, ap->flags, NULL);
+ PyDimMem_FREE(shape.ptr);
+ if (!ret) return -1;
+ PyArray_BASE(ret) = (PyObject *)ap;
+ Py_INCREF(ap);
+ PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL_FLAGS);
+ res = PyArray_CopyObject((PyArrayObject *)ret, op);
+ Py_DECREF(ret);
+ return res;
+ }
+
+ finish:
+ /* Default is to use buffer interface to set item */
+ {
+ const void *buffer;
+ int buflen;
+ res = PyObject_AsReadBuffer(op, &buffer, &buflen);
+ if (res == -1) goto fail;
+ memcpy(ip, buffer, MIN(buflen, itemsize));
+ }
+ return 0;
+
+ fail:
+ return -1;
+}
+
+
+/****************** XXX_to_YYY *******************************/
+
+/* Assumes contiguous, and aligned, from and to */
+
+
+/**begin repeat
+#to=(BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*16#
+#from=BYTE*13,UBYTE*13,SHORT*13,USHORT*13,INT*13,UINT*13,LONG*13,ULONG*13,LONGLONG*13,ULONGLONG*13,FLOAT*13,DOUBLE*13,LONGDOUBLE*13,CFLOAT*13,CDOUBLE*13,CLONGDOUBLE*13#
+#totyp=(byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*16#
+#fromtyp=byte*13, ubyte*13, short*13, ushort*13, int*13, uint*13, long*13, ulong*13, longlong*13, ulonglong*13, float*13, double*13, longdouble*13, float*13, double*13, longdouble*13#
+#incr= ip++*169,ip+=2*39#
+*/
+static void
+@from@_to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip,
+ PyArrayObject *aop) {
+ register intp i;
+ for(i=0;i<n;i++,op++) {
+ *op = (@totyp@)*ip;
+ @incr@;
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+#from=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#fromtyp=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble#
+*/
+static void
+@from@_to_BOOL(@fromtyp@ *ip, Bool *op, intp n, PyArrayObject *aip,
+ PyArrayObject *aop) {
+ register intp i;
+ for(i=0;i<n;i++,op++,ip++) {
+ *op = (Bool)(*ip != FALSE);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+#from=CFLOAT, CDOUBLE, CLONGDOUBLE#
+#fromtyp=cfloat, cdouble, clongdouble#
+*/
+static void
+@from@_to_BOOL(@fromtyp@ *ip, Bool *op, intp n, PyArrayObject *aip,
+ PyArrayObject *aop) {
+ register intp i;
+ for(i=0;i<n;i++,op++,ip++) {
+ *op = (Bool)(((*ip).real != FALSE) || ((*ip).imag != FALSE));
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+#to=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#totyp=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble#
+*/
+static void
+BOOL_to_@to@(Bool *ip, @totyp@ *op, intp n, PyArrayObject *aip,
+ PyArrayObject *aop) {
+ register intp i;
+ for(i=0;i<n;i++,op++,ip++) {
+ *op = (@totyp@)(*ip != FALSE);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#to=(CFLOAT,CDOUBLE,CLONGDOUBLE)*14#
+#from=BOOL*3,BYTE*3,UBYTE*3,SHORT*3,USHORT*3,INT*3,UINT*3,LONG*3,ULONG*3,LONGLONG*3,ULONGLONG*3,FLOAT*3,DOUBLE*3,LONGDOUBLE*3#
+#fromtyp=Bool*3,byte*3, ubyte*3, short*3, ushort*3, int*3, uint*3, long*3, ulong*3, longlong*3, ulonglong*3, float*3, double*3, longdouble*3#
+#totyp= (float, double, longdouble)*14#
+*/
+static void
+@from@_to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip,
+ PyArrayObject *aop) {
+ register intp i;
+ for(i=0;i<n;i++,ip++) {
+ *op++ = (@totyp@)*ip;
+ *op++ = 0.0;
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#to=(CFLOAT,CDOUBLE,CLONGDOUBLE)*3#
+#from=CFLOAT*3,CDOUBLE*3,CLONGDOUBLE*3#
+#totyp=(float, double, longdouble)*3#
+#fromtyp=float*3, double*3, longdouble*3#
+*/
+static void
+@from@_to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip,
+ PyArrayObject *aop) {
+ register intp i;
+ for(i=0;i<2*n;i++,ip++,op++) {
+ *op = (@totyp@)*ip;
+ }
+}
+
+/**end repeat**/
+
+/**begin repeat
+
+#from=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE, STRING, UNICODE, VOID, OBJECT#
+#fromtyp=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, char, char, char, PyObject *#
+#skip= 1*17, aip->descr->elsize*3, 1#
+*/
+static void
+@from@_to_OBJECT(@fromtyp@ *ip, PyObject **op, intp n, PyArrayObject *aip,
+ PyArrayObject *aop)
+{
+ register intp i;
+ int skip=@skip@;
+ for(i=0;i<n;i++,ip+=skip,op++) {
+ Py_XDECREF(*op);
+ *op = @from@_getitem((char *)ip, aip);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#to=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE, STRING, UNICODE, VOID#
+#totyp=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, char, char, char#
+#skip= 1*17, aip->descr->elsize*3#
+*/
+static void
+OBJECT_to_@to@(PyObject **ip, @totyp@ *op, intp n, PyArrayObject *aip,
+ PyArrayObject *aop)
+{
+ register intp i;
+ int skip=@skip@;
+ for(i=0;i<n;i++,ip++,op+=skip) {
+ @to@_setitem(*ip, (char *)op, aop);
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+
+#from=STRING*20, UNICODE*20, VOID*20#
+#fromtyp=char*60#
+#to=(BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,STRING,UNICODE,VOID)*3#
+#totyp=(Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, char, char, void)*3#
+#oskip=(1*17,aop->descr->elsize*3)*3#
+#convert=1*17,0*3,1*17,0*3,0*20#
+#convstr=(Int*9,Long*2,Float*3,Complex*3,Tuple*3)*3#
+*/
+static void
+@from@_to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip,
+ PyArrayObject *aop)
+{
+ register intp i;
+ PyObject *temp=NULL;
+ int skip=aip->descr->elsize;
+ int oskip=@oskip@;
+ for(i=0; i<n; i++, ip+=skip, op+=oskip) {
+ temp = @from@_getitem((char *)ip, aip);
+ if (temp==NULL) return;
+ /* convert from Python object to needed one */
+ if (@convert@) {
+ PyObject *new, *args;
+ /* call out to the Python builtin given by convstr */
+ args = Py_BuildValue("(N)", temp);
+ new = Py@convstr@_Type.tp_new(&Py@convstr@_Type, args, NULL);
+ Py_DECREF(args);
+ temp = new;
+ if (temp==NULL) return;
+ }
+
+ @to@_setitem(temp,(char *)op, aop);
+ Py_DECREF(temp);
+ }
+}
+
+/**end repeat**/
+
+/**begin repeat
+
+#to=STRING*17, UNICODE*17, VOID*17#
+#totyp=char*17, char*17, char*17#
+#from=(BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE)*3#
+#fromtyp=(Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble)*3#
+*/
+
+static void
+@from@_to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip,
+ PyArrayObject *aop)
+{
+ register intp i;
+ PyObject *temp=NULL;
+ int skip=1;
+ int oskip=aop->descr->elsize;
+ for(i=0; i<n; i++, ip+=skip, op+=oskip) {
+ temp = @from@_getitem((char *)ip, aip);
+ if (temp==NULL) {
+ Py_INCREF(Py_False);
+ temp = Py_False;
+ }
+ @to@_setitem(temp,(char *)op, aop);
+ Py_DECREF(temp);
+ }
+}
+
+/**end repeat**/
+
+
+/****************** scan *************************************/
+
+/**begin repeat
+
+#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#type=short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble#
+#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT,"f","lf","Lf"#
+*/
+static int
+@fname@_scan (FILE *fp, @type@ *ip, char *sep, void *ignore)
+{
+ int num;
+ num = fscanf(fp, "%"@format@, ip);
+ if (num != 1) {
+ if (num == 0) return -3;
+ if (num == EOF) return -4;
+ return -5;
+ }
+ if (sep != NULL) {
+ num = fscanf(fp, sep);
+ if (num == 0) return 0;
+ if (num == EOF) return -1;
+ }
+ return 0;
+}
+
+/**end repeat**/
+
+/**begin repeat
+#fname=BOOL,BYTE,UBYTE,CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT,STRING,UNICODE,VOID#
+*/
+#define @fname@_scan NULL
+/**end repeat**/
+
+
+
+/****************** copyswapn *************************************/
+
+/**begin repeat
+
+#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#fsize=SHORT,SHORT,INT,INT,LONG,LONG,LONGLONG,LONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#type=short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble#
+*/
+static void
+@fname@_copyswapn (void *dst, void *src, intp n, int swap, int itemsize)
+{
+ if (src != NULL) /* copy first if needed */
+ memcpy(dst, src, n*sizeof(@type@));
+
+ if (swap) {
+ register char *a, *b, c;
+ for (a = (char *)dst; n>0; n--) {
+#if SIZEOF_@fsize@ == 2
+ b = a + 1;
+ c = *a; *a++ = *b; *b = c;
+ a += 1;
+#elif SIZEOF_@fsize@ == 4
+ b = a + 3;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 2;
+#elif SIZEOF_@fsize@ == 8
+ b = a + 7;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 4;
+#elif SIZEOF_@fsize@ == 10
+ b = a + 9;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 5;
+#elif SIZEOF_@fsize@ == 12
+ b = a + 11;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 6;
+#elif SIZEOF_@fsize@ == 16
+ b = a + 15;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 8;
+#else
+ register int i, nn;
+ b = a + (SIZEOF_@fsize@-1);
+ nn = SIZEOF_@fsize@ / 2;
+ for (i=0; i<nn; i++) {
+ c=*a; *a++ = *b; *b-- = c;
+ }
+ a += nn / 2;
+#endif
+ }
+ }
+}
+
+static void
+@fname@_copyswap (void *dst, void *src, int swap, int itemsize)
+{
+
+ if (src != NULL) /* copy first if needed */
+ memcpy(dst, src, sizeof(@type@));
+
+ if (swap) {
+ register char *a, *b, c;
+ a = (char *)dst;
+#if SIZEOF_@fsize@ == 2
+ b = a + 1;
+ c = *a; *a++ = *b; *b = c;
+#elif SIZEOF_@fsize@ == 4
+ b = a + 3;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+#elif SIZEOF_@fsize@ == 8
+ b = a + 7;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+#elif SIZEOF_@fsize@ == 10
+ b = a + 9;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+#elif SIZEOF_@fsize@ == 12
+ b = a + 11;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+#elif SIZEOF_@fsize@ == 16
+ b = a + 15;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+#else
+ {
+ register int i, nn;
+ b = a + (SIZEOF_@fsize@-1);
+ nn = SIZEOF_@fsize@ / 2;
+ for (i=0; i<nn; i++) {
+ c=*a; *a++ = *b; *b-- = c;
+ }
+ }
+#endif
+ }
+}
+
+
+/**end repeat**/
+
+/**begin repeat
+
+#fname=BOOL, BYTE, UBYTE#
+#type=Bool, byte, ubyte#
+*/
+static void
+@fname@_copyswapn (void *dst, void *src, intp n, int swap, int itemsize)
+{
+ if (src != NULL) /* copy first if needed */
+ memcpy(dst, src, n*sizeof(@type@));
+ /* ignore swap */
+}
+
+static void
+@fname@_copyswap (void *dst, void *src, int swap, int itemsize)
+{
+ if (src != NULL) /* copy first if needed */
+ memcpy(dst, src, sizeof(@type@));
+ /* ignore swap */
+}
+
+/**end repeat**/
+
+
+
+/**begin repeat
+
+#fname=CFLOAT,CDOUBLE,CLONGDOUBLE#
+#type=cfloat, cdouble, clongdouble#
+#fsize=FLOAT,DOUBLE,LONGDOUBLE#
+*/
+static void
+@fname@_copyswapn (void *dst, void *src, intp n, int swap, int itemsize)
+{
+
+ if (src != NULL) /* copy first if needed */
+ memcpy(dst, src, n*sizeof(@type@));
+
+ if (swap) {
+ register char *a, *b, c;
+ /* complex type -- swap twice as many */
+ register intp nn = 2*n;
+ for (a = (char *)dst; nn>0; nn--) {
+#if SIZEOF_@fsize@ == 4
+ b = a + 3;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 2;
+#elif SIZEOF_@fsize@ == 8
+ b = a + 7;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 4;
+#elif SIZEOF_@fsize@ == 10
+ b = a + 9;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 5;
+#elif SIZEOF_@fsize@ == 12
+ b = a + 11;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 6;
+#elif SIZEOF_@fsize@ == 16
+ b = a + 15;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 8;
+#else
+ register int i, kn;
+
+ b = a + (SIZEOF_@fsize@-1);
+ kn = SIZEOF_@fsize@ / 2;
+ for (i=0; i<kn; i++) {
+ c=*a; *a++ = *b; *b-- = c;
+ }
+ a += kn / 2;
+#endif
+ }
+ }
+}
+
+static void
+@fname@_copyswap (void *dst, void *src, int swap, int itemsize)
+{
+ if (src != NULL) /* copy first if needed */
+ memcpy(dst, src, sizeof(@type@));
+
+ if (swap) {
+ register char *a, *b, c;
+ a = (char *)dst;
+#if SIZEOF_@fsize@ == 4
+ b = a + 3;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 2;
+ b = a + 3;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+#elif SIZEOF_@fsize@ == 8
+ b = a + 7;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 4;
+ b = a + 7;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+#elif SIZEOF_@fsize@ == 10
+ b = a + 9;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 5;
+ b = a + 9;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+#elif SIZEOF_@fsize@ == 12
+ b = a + 11;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 6;
+ b = a + 11;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+#elif SIZEOF_@fsize@ == 16
+ b = a + 15;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+ a += 8;
+ b = a + 15;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
+#else
+ {
+ register int i, nn;
+ b = a + (SIZEOF_@fsize@-1);
+ nn = SIZEOF_@fsize@ / 2;
+ for (i=0; i<nn; i++) {
+ c=*a; *a++ = *b; *b-- = c;
+ }
+ a += nn / 2;
+ b = a + (SIZEOF_@fsize@-1);
+ nn = SIZEOF_@fsize@ / 2;
+ for (i=0; i<nn; i++) {
+ c=*a; *a++ = *b; *b-- = c;
+ }
+ }
+#endif
+ }
+}
+
+
+/**end repeat**/
+
+static void
+OBJECT_copyswapn (PyObject **dst, PyObject **src, intp n, int swap, int itemsize)
+{
+ register int i, nn=n;
+ PyObject **dp=dst, **sp=src;
+ if (src != NULL) {
+ for (i=0; i<nn; i++) {
+ Py_XDECREF(*dp);
+ Py_INCREF(*sp);
+ *dp++ = *sp++;
+ }
+ }
+ /* ignore swap */
+ return;
+}
+
+/* ignore swap */
+static void
+STRING_copyswapn (char *dst, char *src, intp n, int swap, int itemsize)
+{
+ if (src != NULL)
+ memcpy(dst, src, itemsize * n);
+
+ return;
+}
+
+/* ignore swap */
+static void
+VOID_copyswapn (char *dst, char *src, intp n, int swap, int itemsize)
+{
+ if (src != NULL)
+ memcpy(dst, src, itemsize * n);
+ return;
+}
+
+static void
+UNICODE_copyswapn (char *dst, char *src, intp n, int swap, int itemsize)
+{
+ int size = sizeof(Py_UNICODE);
+
+ if (src != NULL)
+ memcpy(dst, src, itemsize * n);
+
+ if (swap) {
+ register char *a, *b, c;
+ int j, i = size / 2;
+ for (a = (char *)dst; n>0; n--) {
+ b = a + (size-1);
+ for (j=0; j<i; j++) {
+ c=*a; *a++ = *b; *b-- = c;
+ }
+ a += i / 2;
+ }
+ }
+}
+
+
+static void
+OBJECT_copyswap (PyObject **dst, PyObject **src, int swap, int itemsize)
+{
+ OBJECT_copyswapn(dst, src, 1, swap, itemsize);
+}
+
+static void
+STRING_copyswap (char *dst, char *src, int swap, int itemsize)
+{
+ if (src != NULL)
+ memcpy(dst, src, itemsize);
+
+}
+
+/* ignore swap */
+static void
+VOID_copyswap (char *dst, char *src, int swap, int itemsize)
+{
+ if (src != NULL)
+ memcpy(dst, src, itemsize);
+ return;
+}
+
+static void
+UNICODE_copyswap (char *dst, char *src, int swap, int itemsize)
+{
+ int size = sizeof(Py_UNICODE);
+
+ if (src != NULL)
+ memcpy(dst, src, itemsize);
+
+ if (swap) {
+ register char *a, *b, c;
+ int j, i = size / 2;
+ a = (char *)dst;
+ b = a + (size-1);
+ for (j=0; j<i; j++) {
+ c=*a; *a++ = *b; *b-- = c;
+ }
+ }
+}
+
+
+/****************** nonzero **********************************/
+
+/**begin repeat
+#fname=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#type=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble#
+*/
+static Bool
+@fname@_nonzero (@type@ *ip, PyArrayObject *ap)
+{
+ @type@ t1;
+ if (ap==NULL || PyArray_ISBEHAVED_RO(ap))
+ return (Bool) (*ip != 0);
+ else {
+ /* don't worry about swap, since we are just testing
+ whether or not equal to 0 */
+ memcpy(&t1, ip, sizeof(@type@));
+ return (Bool) (t1 != 0);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+#fname=CFLOAT,CDOUBLE,CLONGDOUBLE#
+#type=cfloat, cdouble, clongdouble#
+*/
+static Bool
+@fname@_nonzero (@type@ *ip, PyArrayObject *ap)
+{
+ @type@ t1;
+ if (ap==NULL || PyArray_ISBEHAVED_RO(ap))
+ return (Bool) ((ip->real != 0) || (ip->imag != 0));
+ else {
+ /* don't worry about swap, since we are just testing
+ whether or not equal to 0 */
+ memcpy(&t1, ip, sizeof(@type@));
+ return (Bool) ((t1.real != 0) || (t1.imag != 0));
+ }
+}
+/**end repeat**/
+
+
+
+#define WHITESPACE " \t\n\r\v\f"
+#define WHITELEN 6
+
+static Bool
+Py_STRING_ISSPACE(char ch)
+{
+ char white[] = WHITESPACE;
+ int j;
+ Bool space=FALSE;
+ for (j=0; j<WHITELEN; j++) {
+ if (ch == white[j]) {
+ space=TRUE;
+ break;
+ }
+ }
+ return space;
+}
+
+static Bool
+STRING_nonzero (char *ip, PyArrayObject *ap)
+{
+ int len = ap->descr->elsize;
+ int i;
+ Bool nonz = FALSE;
+
+ for (i=0; i<len; i++) {
+ if (!Py_STRING_ISSPACE(*ip)) {
+ nonz = TRUE;
+ break;
+ }
+ ip++;
+ }
+ return nonz;
+}
+
+static Bool
+UNICODE_nonzero (Py_UNICODE *ip, PyArrayObject *ap)
+{
+ int len = ap->descr->elsize >> 1;
+ int i;
+ Bool nonz = FALSE;
+
+ for (i=0; i<len; i++) {
+ if (!Py_UNICODE_ISSPACE(*ip)) {
+ nonz = TRUE;
+ break;
+ }
+ ip++;
+ }
+ return nonz;
+}
+
+static Bool
+OBJECT_nonzero (PyObject **ip, PyArrayObject *ap)
+{
+ return (Bool) PyObject_IsTrue(*ip);
+}
+
+/* If subclass has _nonzero method call it with buffer
+ object wrapping current item. Otherwise, just compare with '\0'.
+*/
+static Bool
+VOID_nonzero (char *ip, PyArrayObject *ap)
+{
+ int i;
+ int len = ap->descr->elsize;
+ Bool nonz = FALSE;
+
+ for (i=0; i<len; i++) {
+ if (*ip != '\0') {
+ nonz = TRUE;
+ break;
+ }
+ ip++;
+ }
+ return nonz;
+}
+
+
+/****************** compare **********************************/
+
+static int
+BOOL_compare(Bool *ip1, Bool *ip2, PyArrayObject *ap)
+{
+ return (*ip1 ? (*ip2 ? 0 : 1) : (*ip2 ? -1 : 0));
+}
+
+/**begin repeat
+#fname=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#type=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble#
+*/
+
+static int
+@fname@_compare (@type@ *ip1, @type@ *ip2, PyArrayObject *ap)
+{
+ return *ip1 < *ip2 ? -1 : *ip1 == *ip2 ? 0 : 1;
+}
+
+/**end repeat**/
+
+/* compare imaginary part first, then complex if equal imaginary */
+/**begin repeat
+#fname=CFLOAT, CDOUBLE, CLONGDOUBLE#
+#type= float, double, longdouble#
+*/
+
+static int
+@fname@_compare (@type@ *ip1, @type@ *ip2, PyArrayObject *ap)
+{
+ if (*ip1 == *ip2) {
+ return ip1[1]<ip2[1] ? -1 : (ip1[1] == ip2[1] ? 0 : 1);
+ }
+ else {
+ return *ip1 < *ip2 ? -1 : 1;
+ }
+}
+ /**end repeat**/
+
+static int
+OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *ap)
+{
+ return PyObject_Compare(*ip1, *ip2);
+}
+
+static int
+STRING_compare(char *ip1, char *ip2, PyArrayObject *ap)
+{
+ return strncmp(ip1, ip2, ap->descr->elsize);
+}
+
+/* taken from Python */
+static int
+UNICODE_compare(register Py_UNICODE *ip1, register Py_UNICODE *ip2,
+ PyArrayObject *ap)
+{
+ register int itemsize=ap->descr->elsize;
+ register Py_UNICODE c1, c2;
+
+ if (itemsize < 0) return 0;
+
+ while(itemsize-- > 0) {
+ c1 = *ip1++;
+ c2 = *ip2++;
+
+ if (c1 != c2)
+ return (c1 < c2) ? -1 : 1;
+ }
+ return 0;
+}
+
+/* possibly redefine compare in terms of fields and subarrays if any */
+
+/* as it is, it compares raw-bytes as it they were strings */
+#define VOID_compare STRING_compare
+
+/****************** argfunc **********************************/
+
+/**begin repeat
+
+#fname= BOOL,BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE#
+#type= Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, float, double, longdouble#
+#incr= ip++*14, ip+=2*3#
+*/
+
+static int
+@fname@_argmax(@type@ *ip, intp n, intp *max_ind, PyArrayObject *aip)
+{
+ register intp i;
+ @type@ mp=*ip;
+ *max_ind=0;
+ for (i=1; i<n; i++) {
+ @incr@;
+ if (*ip > mp) {
+ mp = *ip;
+ *max_ind = i;
+ }
+ }
+ return 0;
+}
+
+/**end repeat**/
+
+static int
+OBJECT_argmax(PyObject **ip, intp n, intp *max_ind, PyArrayObject *aip)
+{
+ register intp i;
+ PyObject *mp=ip[0]; *max_ind=0;
+ for(i=1; i<n; i++) {
+ ip++;
+ if (PyObject_Compare(*ip,mp) > 0) {
+ mp = *ip;
+ *max_ind=i;
+ }
+ }
+ return 0;
+}
+
+/**begin repeat
+
+#fname= STRING, UNICODE#
+#type= char, Py_UNICODE#
+
+*/
+static int
+@fname@_argmax(@type@ *ip, intp n, intp *max_ind, PyArrayObject *aip)
+{
+ register intp i;
+ int elsize = aip->descr->elsize;
+ @type@ *mp = (@type@ *)_pya_malloc(elsize);
+
+ if (mp==NULL) return 0;
+ memcpy(mp, ip, elsize);
+ *max_ind = 0;
+ for(i=1; i<n; i++) {
+ ip += elsize;
+ if (@fname@_compare(ip,mp,aip) > 0) {
+ memcpy(mp, ip, elsize);
+ *max_ind=i;
+ }
+ }
+ _pya_free(mp);
+ return 0;
+}
+
+/**end repeat**/
+
+#define VOID_argmax NULL
+
+static void
+BOOL_dot(char *ip1, intp is1, char *ip2, intp is2, char *op, intp n,
+ void *ignore)
+{
+ register Bool tmp=FALSE;
+ register intp i;
+ for(i=0;i<n;i++,ip1+=is1,ip2+=is2) {
+ if ((*((Bool *)ip1) != 0) && (*((Bool *)ip2) != 0)) {
+ tmp = TRUE;
+ break;
+ }
+ }
+ *((Bool *)op) = tmp;
+}
+
+/**begin repeat
+#name=BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE#
+#type= byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble#
+#out= long, ulong, long, ulong, long, ulong, long, ulong, longlong, ulonglong, float, double, longdouble#
+*/
+static void
+@name@_dot(char *ip1, intp is1, char *ip2, intp is2, char *op, intp n,
+ void *ignore)
+{
+ register @out@ tmp=(@out@)0;
+ register intp i;
+ for(i=0;i<n;i++,ip1+=is1,ip2+=is2) {
+ tmp += (@out@)(*((@type@ *)ip1)) * \
+ (@out@)(*((@type@ *)ip2));
+ }
+ *((@type@ *)op) = (@type@) tmp;
+}
+/**end repeat**/
+
+
+/**begin repeat
+#name=CFLOAT, CDOUBLE, CLONGDOUBLE#
+#type= float, double, longdouble#
+*/
+static void @name@_dot(char *ip1, intp is1, char *ip2, intp is2,
+ char *op, intp n, void *ignore)
+{
+ @type@ tmpr=(@type@)0.0, tmpi=(@type@)0.0;
+ intp i;
+ for(i=0;i<n;i++,ip1+=is1,ip2+=is2) {
+ tmpr += ((@type@ *)ip1)[0] * ((@type@ *)ip2)[0]
+ - ((@type@ *)ip1)[1] * ((@type@ *)ip2)[1];
+ tmpi += ((@type@ *)ip1)[1] * ((@type@ *)ip2)[0]
+ + ((@type@ *)ip1)[0] * ((@type@ *)ip2)[1];
+ }
+ ((@type@ *)op)[0] = tmpr; ((@type@ *)op)[1] = tmpi;
+}
+
+/**end repeat**/
+
+static void
+OBJECT_dot(char *ip1, intp is1, char *ip2, intp is2, char *op, intp n,
+ void *ignore)
+{
+ intp i;
+ PyObject *tmp1, *tmp2, *tmp=NULL;
+ PyObject **tmp3;
+ for(i=0;i<n;i++,ip1+=is1,ip2+=is2) {
+ tmp1 = PyNumber_Multiply(*((PyObject **)ip1),
+ *((PyObject **)ip2));
+ if (!tmp1) { Py_XDECREF(tmp); return;}
+ if (i == 0) {
+ tmp = tmp1;
+ } else {
+ tmp2 = PyNumber_Add(tmp, tmp1);
+ Py_XDECREF(tmp);
+ Py_XDECREF(tmp1);
+ if (!tmp2) return;
+ tmp = tmp2;
+ }
+ }
+ tmp3 = (PyObject**) op;
+ tmp2 = *tmp3;
+ *((PyObject **)op) = tmp;
+ Py_XDECREF(tmp2);
+}
+
+#define BOOL_fill NULL
+
+/* this requires buffer to be filled with objects or NULL */
+static void
+OBJECT_fill(PyObject **buffer, intp length, void *ignored)
+{
+ intp i;
+ PyObject *start = buffer[0];
+ PyObject *delta = buffer[1];
+ delta = PyNumber_Subtract(delta, start);
+ if (!delta) return;
+ start = PyNumber_Add(start, delta);
+ if (!start) goto finish;
+ buffer += 2;
+
+ for (i=2; i<length; i++, buffer++) {
+ start = PyNumber_Add(start, delta);
+ if (!start) goto finish;
+ Py_XDECREF(*buffer);
+ *buffer = start;
+ }
+
+ finish:
+ Py_DECREF(delta);
+ return;
+}
+
+/**begin repeat
+#NAME=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#typ=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble#
+*/
+static void
+@NAME@_fill(@typ@ *buffer, intp length, void *ignored)
+{
+ intp i;
+ @typ@ start = buffer[0];
+ @typ@ delta = buffer[1];
+ delta -= start;
+ start += (delta + delta);
+ buffer += 2;
+ for (i=2; i<length; i++, buffer++) {
+ *buffer = start;
+ start += delta;
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+#NAME=CFLOAT,CDOUBLE,CLONGDOUBLE#
+#typ=cfloat,cdouble,clongdouble#
+*/
+static void
+@NAME@_fill(@typ@ *buffer, intp length, void *ignored)
+{
+ intp i;
+ @typ@ start;
+ @typ@ delta;
+
+ start.real = buffer->real;
+ start.imag = buffer->imag;
+ delta.real = buffer[1].real;
+ delta.imag = buffer[1].imag;
+ delta.real -= start.real;
+ delta.imag -= start.imag;
+ start.real += (delta.real + delta.real);
+ start.imag += (delta.imag + delta.imag);
+ buffer += 2;
+ for (i=2; i<length; i++, buffer++) {
+ buffer->real = start.real;
+ buffer->imag = start.imag;
+ start.real += delta.real;
+ start.imag += delta.imag;
+ }
+}
+/**end repeat**/
+
+
+#define _ALIGN(type) offsetof(struct {char c; type v;},v)
+
+/**begin repeat
+
+#from= VOID, STRING, UNICODE#
+#align= char, char, Py_UNICODE#
+#NAME= Void, String, Unicode#
+#endian= |, |, =#
+*/
+
+static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = {
+ {
+ (PyArray_VectorUnaryFunc*)@from@_to_BOOL,
+ (PyArray_VectorUnaryFunc*)@from@_to_BYTE,
+ (PyArray_VectorUnaryFunc*)@from@_to_UBYTE,
+ (PyArray_VectorUnaryFunc*)@from@_to_SHORT,
+ (PyArray_VectorUnaryFunc*)@from@_to_USHORT,
+ (PyArray_VectorUnaryFunc*)@from@_to_INT,
+ (PyArray_VectorUnaryFunc*)@from@_to_UINT,
+ (PyArray_VectorUnaryFunc*)@from@_to_LONG,
+ (PyArray_VectorUnaryFunc*)@from@_to_ULONG,
+ (PyArray_VectorUnaryFunc*)@from@_to_LONGLONG,
+ (PyArray_VectorUnaryFunc*)@from@_to_ULONGLONG,
+ (PyArray_VectorUnaryFunc*)@from@_to_FLOAT,
+ (PyArray_VectorUnaryFunc*)@from@_to_DOUBLE,
+ (PyArray_VectorUnaryFunc*)@from@_to_LONGDOUBLE,
+ (PyArray_VectorUnaryFunc*)@from@_to_CFLOAT,
+ (PyArray_VectorUnaryFunc*)@from@_to_CDOUBLE,
+ (PyArray_VectorUnaryFunc*)@from@_to_CLONGDOUBLE,
+ (PyArray_VectorUnaryFunc*)@from@_to_OBJECT,
+ (PyArray_VectorUnaryFunc*)@from@_to_STRING,
+ (PyArray_VectorUnaryFunc*)@from@_to_UNICODE,
+ (PyArray_VectorUnaryFunc*)@from@_to_VOID
+ },
+ (PyArray_GetItemFunc*)@from@_getitem,
+ (PyArray_SetItemFunc*)@from@_setitem,
+ (PyArray_CompareFunc*)@from@_compare,
+ (PyArray_ArgFunc*)@from@_argmax,
+ (PyArray_DotFunc*)NULL,
+ (PyArray_ScanFunc*)@from@_scan,
+ (PyArray_CopySwapNFunc*)@from@_copyswapn,
+ (PyArray_CopySwapFunc*)@from@_copyswap,
+ (PyArray_NonzeroFunc*)@from@_nonzero,
+ (PyArray_FillFunc*)NULL,
+ {
+ NULL, NULL, NULL, NULL
+ },
+ {
+ NULL, NULL, NULL, NULL
+ }
+};
+
+static PyArray_Descr @from@_Descr = {
+ PyObject_HEAD_INIT(&PyArrayDescr_Type)
+ &Py@NAME@ArrType_Type,
+ PyArray_@from@LTR,
+ PyArray_@from@LTR,
+ '@endian@',
+ PyArray_@from@, 0,
+ _ALIGN(@align@),
+ NULL,
+ NULL,
+ &_Py@NAME@_ArrFuncs,
+};
+
+/**end repeat**/
+
+
+/**begin repeat
+
+#from= BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT#
+#num= 1*14,2*3,1#
+#fromtyp= Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, float, double, longdouble, PyObject *#
+#NAME= Bool, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble, Object#
+#kind= GENBOOL, SIGNED, UNSIGNED, SIGNED, UNSIGNED, SIGNED, UNSIGNED, SIGNED, UNSIGNED, SIGNED, UNSIGNED, FLOATING, FLOATING, FLOATING, COMPLEX, COMPLEX, COMPLEX, OBJECT#
+#endian= |*3, =*14, |#
+*/
+
+static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = {
+ {
+ (PyArray_VectorUnaryFunc*)@from@_to_BOOL,
+ (PyArray_VectorUnaryFunc*)@from@_to_BYTE,
+ (PyArray_VectorUnaryFunc*)@from@_to_UBYTE,
+ (PyArray_VectorUnaryFunc*)@from@_to_SHORT,
+ (PyArray_VectorUnaryFunc*)@from@_to_USHORT,
+ (PyArray_VectorUnaryFunc*)@from@_to_INT,
+ (PyArray_VectorUnaryFunc*)@from@_to_UINT,
+ (PyArray_VectorUnaryFunc*)@from@_to_LONG,
+ (PyArray_VectorUnaryFunc*)@from@_to_ULONG,
+ (PyArray_VectorUnaryFunc*)@from@_to_LONGLONG,
+ (PyArray_VectorUnaryFunc*)@from@_to_ULONGLONG,
+ (PyArray_VectorUnaryFunc*)@from@_to_FLOAT,
+ (PyArray_VectorUnaryFunc*)@from@_to_DOUBLE,
+ (PyArray_VectorUnaryFunc*)@from@_to_LONGDOUBLE,
+ (PyArray_VectorUnaryFunc*)@from@_to_CFLOAT,
+ (PyArray_VectorUnaryFunc*)@from@_to_CDOUBLE,
+ (PyArray_VectorUnaryFunc*)@from@_to_CLONGDOUBLE,
+ (PyArray_VectorUnaryFunc*)@from@_to_OBJECT,
+ (PyArray_VectorUnaryFunc*)@from@_to_STRING,
+ (PyArray_VectorUnaryFunc*)@from@_to_UNICODE,
+ (PyArray_VectorUnaryFunc*)@from@_to_VOID
+ },
+ (PyArray_GetItemFunc*)@from@_getitem,
+ (PyArray_SetItemFunc*)@from@_setitem,
+ (PyArray_CompareFunc*)@from@_compare,
+ (PyArray_ArgFunc*)@from@_argmax,
+ (PyArray_DotFunc*)@from@_dot,
+ (PyArray_ScanFunc*)@from@_scan,
+ (PyArray_CopySwapNFunc*)@from@_copyswapn,
+ (PyArray_CopySwapFunc*)@from@_copyswap,
+ (PyArray_NonzeroFunc*)@from@_nonzero,
+ (PyArray_FillFunc*)@from@_fill,
+ {
+ NULL, NULL, NULL, NULL
+ },
+ {
+ NULL, NULL, NULL, NULL
+ }
+};
+
+static PyArray_Descr @from@_Descr = {
+ PyObject_HEAD_INIT(&PyArrayDescr_Type)
+ &Py@NAME@ArrType_Type,
+ PyArray_@kind@LTR,
+ PyArray_@from@LTR,
+ '@endian@',
+ PyArray_@from@,
+ @num@*sizeof(@fromtyp@),
+ _ALIGN(@fromtyp@),
+ NULL,
+ NULL,
+ &_Py@NAME@_ArrFuncs,
+};
+
+/**end repeat**/
+
+#define _MAX_LETTER 128
+static char _letter_to_num[_MAX_LETTER];
+
+static PyArray_Descr *_builtin_descrs[] = {
+ &BOOL_Descr,
+ &BYTE_Descr,
+ &UBYTE_Descr,
+ &SHORT_Descr,
+ &USHORT_Descr,
+ &INT_Descr,
+ &UINT_Descr,
+ &LONG_Descr,
+ &ULONG_Descr,
+ &LONGLONG_Descr,
+ &ULONGLONG_Descr,
+ &FLOAT_Descr,
+ &DOUBLE_Descr,
+ &LONGDOUBLE_Descr,
+ &CFLOAT_Descr,
+ &CDOUBLE_Descr,
+ &CLONGDOUBLE_Descr,
+ &OBJECT_Descr,
+ &STRING_Descr,
+ &UNICODE_Descr,
+ &VOID_Descr,
+};
+
+/*OBJECT_API
+ Get the PyArray_Descr structure for a type.
+*/
+static PyArray_Descr *
+PyArray_DescrFromType(int type)
+{
+ PyArray_Descr *ret=NULL;
+
+ if (type < PyArray_NTYPES) {
+ ret = _builtin_descrs[type];
+ }
+ else if (type == PyArray_NOTYPE) {
+ /* This needs to not raise an error so
+ that PyArray_DescrFromType(PyArray_NOTYPE)
+ works for backwards-compatible C-API
+ */
+ return NULL;
+ }
+ else if PyTypeNum_ISUSERDEF(type) {
+ ret = userdescrs[type-PyArray_USERDEF];
+ }
+ else {
+ int num=PyArray_NTYPES;
+ if (type < _MAX_LETTER)
+ num = (int) _letter_to_num[type];
+ if (num >= PyArray_NTYPES)
+ ret = NULL;
+ else
+ ret = _builtin_descrs[num];
+ }
+ if (ret==NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "Invalid type for array");
+ }
+ else Py_INCREF(ret);
+ return ret;
+}
+
+
+static int
+set_typeinfo(PyObject *dict)
+{
+ PyObject *infodict, *s;
+ int i;
+
+ for (i=0; i<_MAX_LETTER; i++) {
+ _letter_to_num[i] = PyArray_NTYPES;
+ }
+
+/**begin repeat
+#name=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,INTP,UINTP,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT,STRING,UNICODE,VOID#
+*/
+ _letter_to_num[PyArray_@name@LTR] = PyArray_@name@;
+/**end repeat**/
+ _letter_to_num[PyArray_STRINGLTR2] = PyArray_STRING;
+
+
+/**begin repeat
+#name=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT,STRING,UNICODE,VOID#
+*/
+ @name@_Descr.fields = Py_None;
+/**end repeat**/
+
+
+
+ /* Set a dictionary with type information */
+ infodict = PyDict_New();
+ if (infodict == NULL) return -1;
+
+#define BITSOF_INTP CHAR_BIT*SIZEOF_PY_INTPTR_T
+#define BITSOF_BYTE CHAR_BIT
+
+/**begin repeat
+
+#name=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,INTP,UINTP,LONG,ULONG,LONGLONG,ULONGLONG#
+#uname=BOOL,BYTE*2,SHORT*2,INT*2,INTP*2,LONG*2,LONGLONG*2#
+#Name=Bool,Byte,UByte,Short,UShort,Int,UInt,Intp,UIntp,Long,ULong,LongLong,ULongLong#
+#type=Bool,byte,ubyte,short,ushort,int,uint,intp,uintp,long,ulong,longlong,ulonglong#
+#max=1,MAX_BYTE,MAX_UBYTE,MAX_SHORT,MAX_USHORT,MAX_INT,PyLong_FromUnsignedLong(MAX_UINT),PyLong_FromLongLong((longlong) MAX_INTP),PyLong_FromUnsignedLongLong((ulonglong) MAX_UINTP),MAX_LONG,PyLong_FromUnsignedLong((unsigned long) MAX_ULONG),PyLong_FromLongLong((longlong) MAX_LONGLONG), PyLong_FromUnsignedLongLong((ulonglong) MAX_ULONGLONG)#
+#min=0,MIN_BYTE,0,MIN_SHORT,0,MIN_INT,0,PyLong_FromLongLong((longlong) MIN_INTP),0,MIN_LONG,0,PyLong_FromLongLong((longlong) MIN_LONGLONG),0#
+#cx=i*6,N,N,N,l,N,N,N#
+#cn=i*7,N,i,l,i,N,i#
+*/
+ PyDict_SetItemString(infodict, "@name@",
+ s=Py_BuildValue("ciii@cx@@cn@O",
+ PyArray_@name@LTR,
+ PyArray_@name@,
+ BITSOF_@uname@,
+ _ALIGN(@type@),
+ @max@, @min@,
+ (PyObject *)&Py@Name@ArrType_Type));
+ Py_DECREF(s);
+/**end repeat**/
+
+#define BITSOF_CFLOAT 2*BITSOF_FLOAT
+#define BITSOF_CDOUBLE 2*BITSOF_DOUBLE
+#define BITSOF_CLONGDOUBLE 2*BITSOF_LONGDOUBLE
+
+/**begin repeat
+
+#type=float,double,longdouble,cfloat,cdouble,clongdouble#
+#name=FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE#
+#Name=Float,Double,LongDouble,CFloat,CDouble,CLongDouble#
+*/
+ PyDict_SetItemString(infodict, "@name@",
+ s=Py_BuildValue("ciiiO", PyArray_@name@LTR,
+ PyArray_@name@, BITSOF_@name@,
+ _ALIGN(@type@),
+ (PyObject *)\
+ &Py@Name@ArrType_Type));
+ Py_DECREF(s);
+/**end repeat**/
+
+ PyDict_SetItemString(infodict, "OBJECT",
+ s=Py_BuildValue("ciiiO", PyArray_OBJECTLTR,
+ PyArray_OBJECT,
+ sizeof(PyObject *)*CHAR_BIT,
+ _ALIGN(PyObject *),
+ (PyObject *)\
+ &PyObjectArrType_Type));
+ Py_DECREF(s);
+ PyDict_SetItemString(infodict, "STRING",
+ s=Py_BuildValue("ciiiO", PyArray_STRINGLTR,
+ PyArray_STRING, 0,
+ _ALIGN(char),
+ (PyObject *)\
+ &PyStringArrType_Type));
+ Py_DECREF(s);
+ PyDict_SetItemString(infodict, "UNICODE",
+ s=Py_BuildValue("ciiiO", PyArray_UNICODELTR,
+ PyArray_UNICODE, 0,
+ _ALIGN(Py_UNICODE),
+ (PyObject *)\
+ &PyUnicodeArrType_Type));
+ Py_DECREF(s);
+ PyDict_SetItemString(infodict, "VOID",
+ s=Py_BuildValue("ciiiO", PyArray_VOIDLTR,
+ PyArray_VOID, 0,
+ _ALIGN(char),
+ (PyObject *)\
+ &PyVoidArrType_Type));
+ Py_DECREF(s);
+
+#define SETTYPE(name) \
+ Py_INCREF(&Py##name##ArrType_Type); \
+ PyDict_SetItemString(infodict, #name, \
+ (PyObject *)&Py##name##ArrType_Type);
+
+ SETTYPE(Generic)
+ SETTYPE(Numeric)
+ SETTYPE(Integer)
+ SETTYPE(Inexact)
+ SETTYPE(SignedInteger)
+ SETTYPE(UnsignedInteger)
+ SETTYPE(Floating)
+ SETTYPE(ComplexFloating)
+ SETTYPE(Flexible)
+ SETTYPE(Character)
+
+#undef SETTYPE
+
+ PyDict_SetItemString(dict, "typeinfo", infodict);
+ Py_DECREF(infodict);
+ return 0;
+}
+
+#undef _MAX_LETTER
+
diff --git a/numpy/base/src/multiarraymodule.c b/numpy/base/src/multiarraymodule.c
new file mode 100644
index 000000000..2cce9ad03
--- /dev/null
+++ b/numpy/base/src/multiarraymodule.c
@@ -0,0 +1,5507 @@
+/*
+ Python Multiarray Module -- A useful collection of functions for creating and
+ using ndarrays
+
+ Original file
+ Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu
+
+ Modified for scipy_core in 2005
+
+ Travis E. Oliphant
+ Assistant Professor at
+ Brigham Young University
+
+*/
+
+/* $Id: multiarraymodule.c,v 1.36 2005/09/14 00:14:00 teoliphant Exp $ */
+
+#include "Python.h"
+#include "structmember.h"
+/*#include <string.h>
+#include <math.h>
+*/
+
+#define _MULTIARRAYMODULE
+#include "scipy/arrayobject.h"
+
+#define PyAO PyArrayObject
+
+static PyObject *typeDict=NULL; /* Must be explicitly loaded */
+static PyObject *_scipy_internal=NULL; /* A Python module for callbacks */
+
+
+static PyArray_Descr *
+_arraydescr_fromobj(PyObject *obj)
+{
+ PyObject *dtypedescr;
+ PyArray_Descr *new;
+ int ret;
+
+ dtypedescr = PyObject_GetAttrString(obj, "dtypedescr");
+ PyErr_Clear();
+ if (dtypedescr) {
+ ret = PyArray_DescrConverter(dtypedescr, &new);
+ Py_DECREF(dtypedescr);
+ if (ret) return new;
+ PyErr_Clear();
+ }
+ return NULL;
+}
+
+
+/* Including this file is the only way I know how to declare functions
+ static in each file, and store the pointers from functions in both
+ arrayobject.c and multiarraymodule.c for the C-API
+
+ Declarying an external pointer-containing variable in arrayobject.c
+ and trying to copy it to PyArray_API, did not work.
+
+ Think about two modules with a common api that import each other...
+
+ This file would just be the module calls.
+*/
+
+#include "arrayobject.c"
+
+
+/* An Error object -- rarely used? */
+static PyObject *MultiArrayError;
+
+/*MULTIARRAY_API
+ Multiply a List of ints
+*/
+static int
+PyArray_MultiplyIntList(register int *l1, register int n)
+{
+ register int s=1;
+ while (n--) s *= (*l1++);
+ return s;
+}
+
+/*MULTIARRAY_API
+ Multiply a List
+*/
+static intp
+PyArray_MultiplyList(register intp *l1, register int n)
+{
+ register intp s=1;
+ while (n--) s *= (*l1++);
+ return s;
+}
+
+/*MULTIARRAY_API
+ Produce a pointer into array
+*/
+static char *
+PyArray_GetPtr(PyArrayObject *obj, register intp* ind)
+{
+ register int n = obj->nd;
+ register intp *strides = obj->strides;
+ register char *dptr = obj->data;
+
+ while (n--) dptr += (*strides++) * (*ind++);
+ return dptr;
+}
+
+/*MULTIARRAY_API
+ Get axis from an object (possibly None) -- a converter function,
+*/
+static int
+PyArray_AxisConverter(PyObject *obj, int *axis)
+{
+ if (obj == Py_None) {
+ *axis = MAX_DIMS;
+ }
+ else {
+ *axis = (int) PyInt_AsLong(obj);
+ if (PyErr_Occurred()) {
+ return PY_FAIL;
+ }
+ }
+ return PY_SUCCEED;
+}
+
+/*MULTIARRAY_API
+ Compare Lists
+*/
+static int
+PyArray_CompareLists(intp *l1, intp *l2, int n)
+{
+ int i;
+ for(i=0;i<n;i++) {
+ if (l1[i] != l2[i]) return 0;
+ }
+ return 1;
+}
+
+/* steals a reference to type -- accepts NULL */
+/*MULTIARRAY_API
+ View
+*/
+static PyObject *
+PyArray_View(PyArrayObject *self, PyArray_Descr *type)
+{
+ PyObject *new=NULL;
+
+ Py_INCREF(self->descr);
+ new = PyArray_NewFromDescr(self->ob_type,
+ self->descr,
+ self->nd, self->dimensions,
+ self->strides,
+ self->data,
+ self->flags, (PyObject *)self);
+
+ if (new==NULL) return NULL;
+ Py_INCREF(self);
+ PyArray_BASE(new) = (PyObject *)self;
+
+ if (type != NULL) {
+ if (PyObject_SetAttrString(new, "dtypedescr",
+ (PyObject *)type) < 0) {
+ Py_DECREF(new);
+ Py_DECREF(type);
+ return NULL;
+ }
+ Py_DECREF(type);
+ }
+ return new;
+}
+
+/*MULTIARRAY_API
+ Ravel
+*/
+static PyObject *
+PyArray_Ravel(PyArrayObject *a, int fortran)
+{
+ PyArray_Dims newdim = {NULL,1};
+ intp val[1] = {-1};
+
+ if (fortran < 0) fortran = PyArray_ISFORTRAN(a);
+
+ newdim.ptr = val;
+ if (!fortran && PyArray_ISCONTIGUOUS(a)) {
+ if (a->nd == 1) {
+ Py_INCREF(a);
+ return (PyObject *)a;
+ }
+ return PyArray_Newshape(a, &newdim);
+ }
+ else
+ return PyArray_Flatten(a, fortran);
+}
+
+/*MULTIARRAY_API
+ Flatten
+*/
+static PyObject *
+PyArray_Flatten(PyArrayObject *a, int fortran)
+{
+ PyObject *ret, *new;
+ intp size;
+
+ if (fortran < 0) fortran = PyArray_ISFORTRAN(a);
+
+ size = PyArray_SIZE(a);
+ Py_INCREF(a->descr);
+ ret = PyArray_NewFromDescr(a->ob_type,
+ a->descr,
+ 1, &size,
+ NULL,
+ NULL,
+ 0, (PyObject *)a);
+
+ if (ret== NULL) return NULL;
+ if (fortran) {
+ new = PyArray_Transpose(a, NULL);
+ if (new == NULL) {
+ Py_DECREF(ret);
+ return NULL;
+ }
+ }
+ else {
+ Py_INCREF(a);
+ new = (PyObject *)a;
+ }
+ if (PyArray_CopyInto((PyArrayObject *)ret, (PyArrayObject *)new) < 0) {
+ Py_DECREF(ret);
+ Py_DECREF(new);
+ return NULL;
+ }
+ Py_DECREF(new);
+ return ret;
+}
+
+
+/* For back-ward compatability *
+
+/ * Not recommended */
+
+/*MULTIARRAY_API
+ Reshape an array
+*/
+static PyObject *
+PyArray_Reshape(PyArrayObject *self, PyObject *shape)
+{
+ PyObject *ret;
+ PyArray_Dims newdims;
+
+ if (!PyArray_IntpConverter(shape, &newdims)) return NULL;
+ ret = PyArray_Newshape(self, &newdims);
+ PyDimMem_FREE(newdims.ptr);
+ return ret;
+}
+
+static int
+_check_ones(PyArrayObject *self, int newnd, intp* newdims, intp *strides)
+{
+ int nd;
+ intp *dims;
+ Bool done=FALSE;
+ int j, k;
+
+ nd = self->nd;
+ dims = self->dimensions;
+
+ for (k=0, j=0; !done && (j<nd || k<newnd);) {
+ if ((j<nd) && (k<newnd) && (newdims[k]==dims[j])) {
+ strides[k] = self->strides[j];
+ j++; k++;
+ }
+ else if ((k<newnd) && (newdims[k]==1)) {
+ strides[k] = 0;
+ k++;
+ }
+ else if ((j<nd) && (dims[j]==1)) {
+ j++;
+ }
+ else done=TRUE;
+ }
+ if (done) return -1;
+ return 0;
+}
+
+/* Returns a new array
+ with the new shape from the data
+ in the old array
+*/
+
+/*MULTIARRAY_API
+ New shape for an array
+*/
+static PyObject *
+PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims)
+{
+ intp i, s_original, i_unknown, s_known;
+ intp *dimensions = newdims->ptr;
+ PyArrayObject *ret;
+ char msg[] = "total size of new array must be unchanged";
+ int n = newdims->len;
+ Bool same;
+ intp *strides = NULL;
+ intp newstrides[MAX_DIMS];
+
+ /* Quick check to make sure anything needs to be done */
+ if (n == self->nd) {
+ same = TRUE;
+ i=0;
+ while(same && i<n) {
+ if (PyArray_DIM(self,i) != dimensions[i])
+ same=FALSE;
+ i++;
+ }
+ if (same) return PyArray_View(self, NULL);
+ }
+
+ /* Returns a pointer to an appropriate strides array
+ if all we are doing is inserting ones into the shape,
+ or removing ones from the shape
+ or doing a combination of the two*/
+ i=_check_ones(self, n, dimensions, newstrides);
+ if (i==0) strides=newstrides;
+
+ if (strides==NULL) {
+ if (!PyArray_ISCONTIGUOUS(self)) {
+ PyErr_SetString(PyExc_ValueError,
+ "changing shape that way "\
+ "only works on contiguous arrays");
+ return NULL;
+ }
+
+ s_known = 1;
+ i_unknown = -1;
+
+ for(i=0; i<n; i++) {
+ if (dimensions[i] < 0) {
+ if (i_unknown == -1) {
+ i_unknown = i;
+ } else {
+ PyErr_SetString(PyExc_ValueError,
+ "can only specify one" \
+ " unknown dimension");
+ return NULL;
+ }
+ } else {
+ s_known *= dimensions[i];
+ }
+ }
+
+ s_original = PyArray_SIZE(self);
+
+ if (i_unknown >= 0) {
+ if ((s_known == 0) || (s_original % s_known != 0)) {
+ PyErr_SetString(PyExc_ValueError, msg);
+ return NULL;
+ }
+ dimensions[i_unknown] = s_original/s_known;
+ } else {
+ if (s_original != s_known) {
+ PyErr_SetString(PyExc_ValueError, msg);
+ return NULL;
+ }
+ }
+ }
+
+ Py_INCREF(self->descr);
+ ret = (PyAO *)PyArray_NewFromDescr(self->ob_type,
+ self->descr,
+ n, dimensions,
+ strides,
+ self->data,
+ self->flags, (PyObject *)self);
+
+ if (ret== NULL) return NULL;
+
+ Py_INCREF(self);
+ ret->base = (PyObject *)self;
+ PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN);
+
+ return (PyObject *)ret;
+}
+
+/* return a new view of the array object with all of its unit-length
+ dimensions squeezed out if needed, otherwise
+ return the same array.
+ */
+
+/*MULTIARRAY_API*/
+static PyObject *
+PyArray_Squeeze(PyArrayObject *self)
+{
+ int nd = self->nd;
+ int newnd = nd;
+ intp dimensions[MAX_DIMS];
+ intp strides[MAX_DIMS];
+ int i,j;
+ PyObject *ret;
+
+ if (nd == 0) {
+ Py_INCREF(self);
+ return (PyObject *)self;
+ }
+ for (j=0, i=0; i<nd; i++) {
+ if (self->dimensions[i] == 1) {
+ newnd -= 1;
+ }
+ else {
+ dimensions[j] = self->dimensions[i];
+ strides[j++] = self->strides[i];
+ }
+ }
+
+ Py_INCREF(self->descr);
+ ret = PyArray_NewFromDescr(self->ob_type,
+ self->descr,
+ newnd, dimensions,
+ strides, self->data,
+ self->flags,
+ (PyObject *)self);
+ if (ret == NULL) return NULL;
+ PyArray_FLAGS(ret) &= ~OWN_DATA;
+ PyArray_BASE(ret) = (PyObject *)self;
+ Py_INCREF(self);
+ return (PyObject *)ret;
+}
+
+
+/*MULTIARRAY_API
+ Mean
+*/
+static PyObject *
+PyArray_Mean(PyArrayObject *self, int axis, int rtype)
+{
+ PyObject *obj1=NULL, *obj2=NULL;
+ PyObject *new, *ret;
+
+ if ((new = _check_axis(self, &axis, 0))==NULL) return NULL;
+
+ obj1 = PyArray_GenericReduceFunction((PyAO *)new, n_ops.add, axis,
+ rtype);
+ obj2 = PyFloat_FromDouble((double) PyArray_DIM(new,axis));
+ Py_DECREF(new);
+ if (obj1 == NULL || obj2 == NULL) {
+ Py_XDECREF(obj1);
+ Py_XDECREF(obj2);
+ return NULL;
+ }
+
+ ret = PyNumber_Divide(obj1, obj2);
+ Py_DECREF(obj1);
+ Py_DECREF(obj2);
+ return ret;
+}
+
+/* Set variance to 1 to by-pass square-root calculation and return variance */
+/*MULTIARRAY_API
+ Std
+*/
+static PyObject *
+PyArray_Std(PyArrayObject *self, int axis, int rtype, int variance)
+{
+ PyObject *obj1=NULL, *obj2=NULL, *new=NULL;
+ PyObject *ret=NULL, *newshape=NULL;
+ int i, n;
+ intp val;
+
+ if ((new = _check_axis(self, &axis, 0))==NULL) return NULL;
+
+ /* Compute and reshape mean */
+ obj1 = PyArray_EnsureArray(PyArray_Mean((PyAO *)new, axis, rtype));
+ if (obj1 == NULL) {Py_DECREF(new); return NULL;}
+ n = PyArray_NDIM(new);
+ newshape = PyTuple_New(n);
+ if (newshape == NULL) {Py_DECREF(obj1); Py_DECREF(new); return NULL;}
+ for (i=0; i<n; i++) {
+ if (i==axis) val = 1;
+ else val = PyArray_DIM(new,i);
+ PyTuple_SET_ITEM(newshape, i, PyInt_FromLong((long)val));
+ }
+ obj2 = PyArray_Reshape((PyAO *)obj1, newshape);
+ Py_DECREF(obj1);
+ Py_DECREF(newshape);
+ if (obj2 == NULL) {Py_DECREF(new); return NULL;}
+
+ /* Compute x = x - mx */
+ obj1 = PyNumber_Subtract((PyObject *)new, obj2);
+ Py_DECREF(obj2);
+ if (obj1 == NULL) {Py_DECREF(new); return NULL;}
+
+ /* Compute x * x */
+ obj2 = PyArray_EnsureArray(PyNumber_Multiply(obj1, obj1));
+ Py_DECREF(obj1);
+ if (obj2 == NULL) {Py_DECREF(new); return NULL;}
+
+ /* Compute add.reduce(x*x,axis) */
+ obj1 = PyArray_GenericReduceFunction((PyAO *)obj2, n_ops.add,
+ axis, rtype);
+ Py_DECREF(obj2);
+ if (obj1 == NULL) {Py_DECREF(new); return NULL;}
+
+ n = PyArray_DIM(new,axis)-1;
+ Py_DECREF(new);
+ if (n<=0) n=1;
+ obj2 = PyFloat_FromDouble(1.0/((double )n));
+ if (obj2 == NULL) {Py_DECREF(obj1); return NULL;}
+ ret = PyNumber_Multiply(obj1, obj2);
+ Py_DECREF(obj1);
+ Py_DECREF(obj2);
+
+ if (variance) return ret;
+
+ ret = PyArray_EnsureArray(ret);
+
+ /* sqrt() */
+ obj1 = PyArray_GenericUnaryFunction((PyAO *)ret, n_ops.sqrt);
+ Py_DECREF(ret);
+
+ return obj1;
+}
+
+
+/*MULTIARRAY_API
+ Sum
+*/
+static PyObject *
+PyArray_Sum(PyArrayObject *self, int axis, int rtype)
+{
+ PyObject *new, *ret;
+
+ if ((new = _check_axis(self, &axis, 0))==NULL) return NULL;
+
+ ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.add, axis,
+ rtype);
+ Py_DECREF(new);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ Prod
+*/
+static PyObject *
+PyArray_Prod(PyArrayObject *self, int axis, int rtype)
+{
+ PyObject *new, *ret;
+
+ if ((new = _check_axis(self, &axis, 0))==NULL) return NULL;
+
+ ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.multiply, axis,
+ rtype);
+ Py_DECREF(new);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ CumSum
+*/
+static PyObject *
+PyArray_CumSum(PyArrayObject *self, int axis, int rtype)
+{
+ PyObject *new, *ret;
+
+ if ((new = _check_axis(self, &axis, 0))==NULL) return NULL;
+
+ ret = PyArray_GenericAccumulateFunction((PyAO *)new, n_ops.add, axis,
+ rtype);
+ Py_DECREF(new);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ CumProd
+*/
+static PyObject *
+PyArray_CumProd(PyArrayObject *self, int axis, int rtype)
+{
+ PyObject *new, *ret;
+
+ if ((new = _check_axis(self, &axis, 0))==NULL) return NULL;
+
+ ret = PyArray_GenericAccumulateFunction((PyAO *)new,
+ n_ops.multiply, axis,
+ rtype);
+ Py_DECREF(new);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ Any
+*/
+static PyObject *
+PyArray_Any(PyArrayObject *self, int axis)
+{
+ PyObject *new, *ret;
+
+ if ((new = _check_axis(self, &axis, 0))==NULL) return NULL;
+
+ ret = PyArray_GenericReduceFunction((PyAO *)new,
+ n_ops.logical_or, axis,
+ PyArray_BOOL);
+ Py_DECREF(new);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ All
+*/
+static PyObject *
+PyArray_All(PyArrayObject *self, int axis)
+{
+ PyObject *new, *ret;
+
+ if ((new = _check_axis(self, &axis, 0))==NULL) return NULL;
+
+ ret = PyArray_GenericReduceFunction((PyAO *)new,
+ n_ops.logical_and, axis,
+ PyArray_BOOL);
+ Py_DECREF(new);
+ return ret;
+}
+
+
+/*MULTIARRAY_API
+ Compress
+*/
+static PyObject *
+PyArray_Compress(PyArrayObject *self, PyObject *condition, int axis)
+{
+ PyArrayObject *cond;
+ PyObject *res, *ret;
+
+ cond = (PyAO *)PyArray_FromAny(condition, NULL, 0, 0, 0);
+ if (cond == NULL) return NULL;
+
+ if (cond->nd != 1) {
+ Py_DECREF(cond);
+ PyErr_SetString(PyExc_ValueError,
+ "condition must be 1-d array");
+ return NULL;
+ }
+
+ res = PyArray_Nonzero(cond);
+ Py_DECREF(cond);
+ ret = PyArray_Take(self, res, axis);
+ Py_DECREF(res);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ Nonzero
+*/
+static PyObject *
+PyArray_Nonzero(PyArrayObject *self)
+{
+ int n=self->nd, j;
+ intp count=0, i, size;
+ PyArrayIterObject *it=NULL;
+ PyObject *ret=NULL, *item;
+ intp *dptr[MAX_DIMS];
+
+ it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self);
+ if (it==NULL) return NULL;
+
+ size = it->size;
+ for (i=0; i<size; i++) {
+ if (self->descr->f->nonzero(it->dataptr, self)) count++;
+ PyArray_ITER_NEXT(it);
+ }
+
+ PyArray_ITER_RESET(it);
+ if (n==1) {
+ ret = PyArray_New(self->ob_type, 1, &count, PyArray_INTP,
+ NULL, NULL, 0, 0, (PyObject *)self);
+ if (ret == NULL) goto fail;
+ dptr[0] = (intp *)PyArray_DATA(ret);
+
+ for (i=0; i<size; i++) {
+ if (self->descr->f->nonzero(it->dataptr, self))
+ *(dptr[0])++ = i;
+ PyArray_ITER_NEXT(it);
+ }
+ }
+ else {
+ ret = PyTuple_New(n);
+ for (j=0; j<n; j++) {
+ item = PyArray_New(self->ob_type, 1, &count,
+ PyArray_INTP, NULL, NULL, 0, 0,
+ (PyObject *)self);
+ if (item == NULL) goto fail;
+ PyTuple_SET_ITEM(ret, j, item);
+ dptr[j] = (intp *)PyArray_DATA(item);
+ }
+
+ /* reset contiguous so that coordinates gets updated */
+ it->contiguous = 0;
+ for (i=0; i<size; i++) {
+ if (self->descr->f->nonzero(it->dataptr, self))
+ for (j=0; j<n; j++)
+ *(dptr[j])++ = it->coordinates[j];
+ PyArray_ITER_NEXT(it);
+ }
+ }
+
+ Py_DECREF(it);
+ return ret;
+
+ fail:
+ Py_XDECREF(ret);
+ Py_XDECREF(it);
+ return NULL;
+
+}
+
+/*MULTIARRAY_API
+ Clip
+*/
+static PyObject *
+PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max)
+{
+ PyObject *selector=NULL, *newtup=NULL, *ret=NULL;
+ PyObject *res1=NULL, *res2=NULL, *res3=NULL;
+ PyObject *two;
+
+ two = PyInt_FromLong((long)2);
+ res1 = PyArray_GenericBinaryFunction(self, max, n_ops.greater);
+ res2 = PyArray_GenericBinaryFunction(self, min, n_ops.less);
+ if ((res1 == NULL) || (res2 == NULL)) {
+ Py_DECREF(two);
+ Py_XDECREF(res1);
+ Py_XDECREF(res2);
+ }
+ res3 = PyNumber_Multiply(two, res1);
+ Py_DECREF(two);
+ Py_DECREF(res1);
+ if (res3 == NULL) return NULL;
+
+ selector = PyArray_EnsureArray(PyNumber_Add(res2, res3));
+ Py_DECREF(res2);
+ Py_DECREF(res3);
+ if (selector == NULL) return NULL;
+
+ newtup = Py_BuildValue("(OOO)", (PyObject *)self, min, max);
+ if (newtup == NULL) {Py_DECREF(selector); return NULL;}
+ ret = PyArray_Choose((PyAO *)selector, newtup);
+ Py_DECREF(selector);
+ Py_DECREF(newtup);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ Conjugate
+*/
+static PyObject *
+PyArray_Conjugate(PyArrayObject *self)
+{
+ if (PyArray_ISCOMPLEX(self)) {
+ PyObject *new;
+ intp size, i;
+ /* Make a copy */
+ new = PyArray_NewCopy(self, -1);
+ if (new==NULL) return NULL;
+ size = PyArray_SIZE(new);
+ if (self->descr->type_num == PyArray_CFLOAT) {
+ cfloat *dptr = (cfloat *) PyArray_DATA(new);
+ for (i=0; i<size; i++) {
+ dptr->imag = -dptr->imag;
+ dptr++;
+ }
+ }
+ else if (self->descr->type_num == PyArray_CDOUBLE) {
+ cdouble *dptr = (cdouble *)PyArray_DATA(new);
+ for (i=0; i<size; i++) {
+ dptr->imag = -dptr->imag;
+ dptr++;
+ }
+ }
+ else if (self->descr->type_num == PyArray_CLONGDOUBLE) {
+ clongdouble *dptr = (clongdouble *)PyArray_DATA(new);
+ for (i=0; i<size; i++) {
+ dptr->imag = -dptr->imag;
+ dptr++;
+ }
+ }
+ return new;
+ }
+ else {
+ Py_INCREF(self);
+ return (PyObject *) self;
+ }
+}
+
+/*MULTIARRAY_API
+ Trace
+*/
+static PyObject *
+PyArray_Trace(PyArrayObject *self, int offset, int axis1, int axis2,
+int rtype)
+{
+ PyObject *diag=NULL, *ret=NULL;
+
+ diag = PyArray_Diagonal(self, offset, axis1, axis2);
+ if (diag == NULL) return NULL;
+ ret = PyArray_GenericReduceFunction((PyAO *)diag, n_ops.add, -1, rtype);
+ Py_DECREF(diag);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ Diagonal
+*/
+static PyObject *
+PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2)
+{
+ int n = self->nd;
+ PyObject *new;
+ PyArray_Dims newaxes;
+ intp dims[MAX_DIMS];
+ int i, pos;
+
+ newaxes.ptr = dims;
+ if (n < 2) {
+ PyErr_SetString(PyExc_ValueError,
+ "array.ndim must be >= 2");
+ return NULL;
+ }
+ if (axis1 < 0) axis1 += n;
+ if (axis2 < 0) axis2 += n;
+ if ((axis1 == axis2) || (axis1 < 0) || (axis1 >= n) || \
+ (axis2 < 0) || (axis2 >= n)) {
+ PyErr_Format(PyExc_ValueError, "axis1(=%d) and axis2(=%d) "\
+ "must be different and within range (nd=%d)",
+ axis1, axis2, n);
+ return NULL;
+ }
+
+ newaxes.len = n;
+ /* insert at the end */
+ newaxes.ptr[n-2] = axis1;
+ newaxes.ptr[n-1] = axis2;
+ pos = 0;
+ for (i=0; i<n; i++) {
+ if ((i==axis1) || (i==axis2)) continue;
+ newaxes.ptr[pos++] = i;
+ }
+ new = PyArray_Transpose(self, &newaxes);
+ if (new == NULL) return NULL;
+ self = (PyAO *)new;
+
+ if (n == 2) {
+ PyObject *a=NULL, *indices=NULL, *ret=NULL;
+ intp n1, n2, start, stop, step, count;
+ intp *dptr;
+ n1 = self->dimensions[0];
+ n2 = self->dimensions[1];
+ step = n2+1;
+ if (offset < 0) {
+ start = -n2 * offset;
+ stop = MIN(n2, n1+offset)*(n2+1) - n2*offset;
+ }
+ else {
+ start = offset;
+ stop = MIN(n1, n2-offset)*(n2+1) + offset;
+ }
+
+ /* count = ceil((stop-start)/step) */
+ count = ((stop-start) / step) + (((stop-start) % step) != 0);
+
+ indices = PyArray_New(&PyArray_Type, 1, &count,
+ PyArray_INTP, NULL, NULL, 0, 0, NULL);
+ if (indices == NULL) {
+ Py_DECREF(self); return NULL;
+ }
+ dptr = (intp *)PyArray_DATA(indices);
+ for (n1=start; n1<stop; n1+=step) *dptr++ = n1;
+ a = PyArray_IterNew((PyObject *)self);
+ Py_DECREF(self);
+ if (a == NULL) {Py_DECREF(indices); return NULL;}
+ ret = PyObject_GetItem(a, indices);
+ Py_DECREF(a);
+ Py_DECREF(indices);
+ return ret;
+ }
+
+ else {
+ /*
+ my_diagonal = []
+ for i in range (s [0]) :
+ my_diagonal.append (diagonal (a [i], offset))
+ return array (my_diagonal)
+ */
+ PyObject *mydiagonal=NULL, *new=NULL, *ret=NULL, *sel=NULL;
+ intp i, n1;
+ int res;
+ PyArray_Descr *typecode;
+
+ typecode = self->descr;
+
+ mydiagonal = PyList_New(0);
+ if (mydiagonal == NULL) {Py_DECREF(self); return NULL;}
+ n1 = self->dimensions[0];
+ for (i=0; i<n1; i++) {
+ new = PyInt_FromLong((long) i);
+ sel = PyArray_EnsureArray(PyObject_GetItem((PyObject *)self, new));
+ Py_DECREF(new);
+ if (sel == NULL) {
+ Py_DECREF(self);
+ Py_DECREF(mydiagonal);
+ return NULL;
+ }
+ new = PyArray_Diagonal((PyAO *)sel, offset, n-3, n-2);
+ Py_DECREF(sel);
+ if (new == NULL) {
+ Py_DECREF(self);
+ Py_DECREF(mydiagonal);
+ return NULL;
+ }
+ res = PyList_Append(mydiagonal, new);
+ Py_DECREF(new);
+ if (res < 0) {
+ Py_DECREF(self);
+ Py_DECREF(mydiagonal);
+ return NULL;
+ }
+ }
+ Py_DECREF(self);
+ Py_INCREF(typecode);
+ ret = PyArray_FromAny(mydiagonal, typecode, 0, 0, 0);
+ Py_DECREF(mydiagonal);
+ return ret;
+ }
+}
+
+/* simulates a C-style 1-3 dimensional array which can be accesed using
+ ptr[i] or ptr[i][j] or ptr[i][j][k] -- requires pointer allocation
+ for 2-d and 3-d.
+
+ For 2-d and up, ptr is NOT equivalent to a statically defined
+ 2-d or 3-d array. In particular, it cannot be passed into a
+ function that requires a true pointer to a fixed-size array.
+*/
+
+/* steals a reference to typedescr -- can be NULL*/
+/*MULTIARRAY_API
+ Simulat a C-array
+*/
+static int
+PyArray_AsCArray(PyObject **op, void *ptr, intp *dims, int nd,
+ PyArray_Descr* typedescr)
+{
+ PyArrayObject *ap;
+ intp n, m, i, j;
+ char **ptr2;
+ char ***ptr3;
+
+ if ((nd < 1) || (nd > 3)) {
+ PyErr_SetString(PyExc_ValueError,
+ "C arrays of only 1-3 dimensions available");
+ Py_XDECREF(typedescr);
+ return -1;
+ }
+ if ((ap = (PyArrayObject*)PyArray_FromAny(*op, typedescr, nd, nd,
+ CARRAY_FLAGS)) == NULL)
+ return -1;
+ switch(nd) {
+ case 1:
+ *((char **)ptr) = ap->data;
+ break;
+ case 2:
+ n = ap->dimensions[0];
+ ptr2 = (char **)_pya_malloc(n * sizeof(char *));
+ if (!ptr2) goto fail;
+ for (i=0; i<n; i++) {
+ ptr2[i] = ap->data + i*ap->strides[0];
+ }
+ *((char ***)ptr) = ptr2;
+ break;
+ case 3:
+ n = ap->dimensions[0];
+ m = ap->dimensions[1];
+ ptr3 = (char ***)_pya_malloc(n*(m+1) * sizeof(char *));
+ if (!ptr3) goto fail;
+ for (i=0; i<n; i++) {
+ ptr3[i] = ptr3[n + (m-1)*i];
+ for (j=0; j<m; j++) {
+ ptr3[i][j] = ap->data + i*ap->strides[0] + \
+ j*ap->strides[1];
+ }
+ }
+ *((char ****)ptr) = ptr3;
+ }
+ memcpy(dims, ap->dimensions, nd*sizeof(intp));
+ *op = (PyObject *)ap;
+ return 0;
+
+ fail:
+ PyErr_SetString(PyExc_MemoryError, "no memory");
+ return -1;
+}
+
+/* Deprecated --- Use PyArray_AsCArray instead */
+
+/*MULTIARRAY_API
+ Convert to a 1D C-array
+*/
+static int
+PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode)
+{
+ intp newd1;
+ PyArray_Descr *descr;
+
+ descr = PyArray_DescrFromType(typecode);
+ if (PyArray_AsCArray(op, (void *)ptr, &newd1, 1, descr) == -1)
+ return -1;
+ *d1 = (int) newd1;
+ return 0;
+}
+
+/*MULTIARRAY_API
+ Convert to a 2D C-array
+*/
+static int
+PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode)
+{
+ intp newdims[2];
+ PyArray_Descr *descr;
+
+ descr = PyArray_DescrFromType(typecode);
+ if (PyArray_AsCArray(op, (void *)ptr, newdims, 2, descr) == -1)
+ return -1;
+
+ *d1 = (int ) newdims[0];
+ *d2 = (int ) newdims[1];
+ return 0;
+}
+
+/* End Deprecated */
+
+/*MULTIARRAY_API
+ Free pointers created if As2D is called
+*/
+static int
+PyArray_Free(PyObject *op, void *ptr)
+{
+ PyArrayObject *ap = (PyArrayObject *)op;
+
+ if ((ap->nd < 1) || (ap->nd > 3))
+ return -1;
+ if (ap->nd >= 2) {
+ _pya_free(ptr);
+ }
+ Py_DECREF(ap);
+ return 0;
+}
+
+
+static PyObject *
+_swap_and_concat(PyObject *op, int axis, int n)
+{
+ PyObject *newtup=NULL;
+ PyObject *otmp, *arr;
+ int i;
+
+ newtup = PyTuple_New(n);
+ if (newtup==NULL) return NULL;
+ for (i=0; i<n; i++) {
+ otmp = PySequence_GetItem(op, i);
+ arr = PyArray_FROM_O(otmp);
+ Py_DECREF(otmp);
+ if (arr==NULL) goto fail;
+ otmp = PyArray_SwapAxes((PyArrayObject *)arr, axis, 0);
+ Py_DECREF(arr);
+ if (otmp == NULL) goto fail;
+ PyTuple_SET_ITEM(newtup, i, otmp);
+ }
+ otmp = PyArray_Concatenate(newtup, 0);
+ Py_DECREF(newtup);
+ if (otmp == NULL) return NULL;
+ arr = PyArray_SwapAxes((PyArrayObject *)otmp, axis, 0);
+ Py_DECREF(otmp);
+ return arr;
+
+ fail:
+ Py_DECREF(newtup);
+ return NULL;
+}
+
+/*op is a python object supporting the sequence interface.
+ Its elements will be concatenated together to form a single
+ multidimensional array.*/
+/* If axis is MAX_DIMS or bigger, then each sequence object will
+ be flattened before concatenation
+*/
+/*MULTIARRAY_API
+ Concatenate an arbitrary Python sequence into an array.
+*/
+static PyObject *
+PyArray_Concatenate(PyObject *op, int axis)
+{
+ PyArrayObject *ret, **mps;
+ PyObject *otmp;
+ int i, n, tmp, nd=0, new_dim;
+ char *data;
+ PyTypeObject *subtype;
+ double prior1, prior2;
+ intp numbytes;
+
+ n = PySequence_Length(op);
+ if (n == -1) {
+ return NULL;
+ }
+ if (n == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "concatenation of zero-length sequences is "\
+ "impossible");
+ return NULL;
+ }
+
+ if ((axis < 0) || ((0 < axis) && (axis < MAX_DIMS)))
+ return _swap_and_concat(op, axis, n);
+
+ mps = PyArray_ConvertToCommonType(op, &n);
+ if (mps == NULL) return NULL;
+
+ /* Make sure these arrays are legal to concatenate. */
+ /* Must have same dimensions except d0 */
+
+ prior1 = 0.0;
+ subtype = &PyArray_Type;
+ ret = NULL;
+ for(i=0; i<n; i++) {
+ if (axis >= MAX_DIMS) {
+ otmp = PyArray_Ravel(mps[i],0);
+ Py_DECREF(mps[i]);
+ mps[i] = (PyArrayObject *)otmp;
+ }
+ prior2 = PyArray_GetPriority((PyObject *)(mps[i]), 0.0);
+ if (prior2 > prior1) {
+ prior1 = prior2;
+ subtype = mps[i]->ob_type;
+ ret = mps[i];
+ }
+ }
+
+ new_dim = 0;
+ for(i=0; i<n; i++) {
+ if (mps[i] == NULL) goto fail;
+ if (i == 0) nd = mps[i]->nd;
+ else {
+ if (nd != mps[i]->nd) {
+ PyErr_SetString(PyExc_ValueError,
+ "arrays must have same "\
+ "number of dimensions");
+ goto fail;
+ }
+ if (!PyArray_CompareLists(mps[0]->dimensions+1,
+ mps[i]->dimensions+1,
+ nd-1)) {
+ PyErr_SetString(PyExc_ValueError,
+ "array dimensions must "\
+ "agree except for d_0");
+ goto fail;
+ }
+ }
+ if (nd == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "0-d arrays can't be concatenated");
+ goto fail;
+ }
+ new_dim += mps[i]->dimensions[0];
+ }
+
+ tmp = mps[0]->dimensions[0];
+ mps[0]->dimensions[0] = new_dim;
+ Py_INCREF(mps[0]->descr);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
+ mps[0]->descr, nd,
+ mps[0]->dimensions,
+ NULL, NULL, 0,
+ (PyObject *)ret);
+ mps[0]->dimensions[0] = tmp;
+
+ if (ret == NULL) goto fail;
+
+ data = ret->data;
+ for(i=0; i<n; i++) {
+ numbytes = PyArray_NBYTES(mps[i]);
+ memcpy(data, mps[i]->data, numbytes);
+ data += numbytes;
+ }
+
+ PyArray_INCREF(ret);
+ for(i=0; i<n; i++) Py_XDECREF(mps[i]);
+ PyDataMem_FREE(mps);
+ return (PyObject *)ret;
+
+ fail:
+ Py_XDECREF(ret);
+ for(i=0; i<n; i++) Py_XDECREF(mps[i]);
+ PyDataMem_FREE(mps);
+ return NULL;
+}
+
+/*MULTIARRAY_API
+ SwapAxes
+*/
+static PyObject *
+PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2)
+{
+ PyArray_Dims new_axes;
+ intp dims[MAX_DIMS];
+ int n, i, val;
+ PyObject *ret;
+
+ if (a1 == a2) {
+ Py_INCREF(ap);
+ return (PyObject *)ap;
+ }
+
+ n = ap->nd;
+ if (n <= 1) {
+ Py_INCREF(ap);
+ return (PyObject *)ap;
+ }
+
+ if (a1 < 0) a1 += n;
+ if (a2 < 0) a2 += n;
+ if ((a1 < 0) || (a1 >= n)) {
+ PyErr_SetString(PyExc_ValueError,
+ "bad axis1 argument to swapaxes");
+ return NULL;
+ }
+ if ((a2 < 0) || (a2 >= n)) {
+ PyErr_SetString(PyExc_ValueError,
+ "bad axis2 argument to swapaxes");
+ return NULL;
+ }
+ new_axes.ptr = dims;
+ new_axes.len = n;
+
+ for (i=0; i<n; i++) {
+ if (i == a1) val = a2;
+ else if (i == a2) val = a1;
+ else val = i;
+ new_axes.ptr[i] = val;
+ }
+ ret = PyArray_Transpose(ap, &new_axes);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ Return Transpose.
+*/
+static PyObject *
+PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute)
+{
+ intp *axes, axis;
+ intp i, n;
+ intp permutation[MAX_DIMS];
+ PyArrayObject *ret = NULL;
+
+ if (permute == NULL) {
+ n = ap->nd;
+ for(i=0; i<n; i++)
+ permutation[i] = n-1-i;
+ } else {
+ n = permute->len;
+ axes = permute->ptr;
+ if (n > ap->nd) {
+ PyErr_SetString(PyExc_ValueError,
+ "too many axes for this array");
+ return NULL;
+ }
+ for(i=0; i<n; i++) {
+ axis = axes[i];
+ if (axis < 0) axis = ap->nd+axis;
+ if (axis < 0 || axis >= ap->nd) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid axis for this array");
+ return NULL;
+ }
+ permutation[i] = axis;
+ }
+ }
+
+ /* this allocates memory for dimensions and strides (but fills them
+ incorrectly), sets up descr, and points data at ap->data. */
+ Py_INCREF(ap->descr);
+ ret = (PyArrayObject *)\
+ PyArray_NewFromDescr(ap->ob_type,
+ ap->descr,
+ n, permutation,
+ NULL, ap->data, ap->flags,
+ (PyObject *)ap);
+ if (ret == NULL) return NULL;
+
+ /* point at true owner of memory: */
+ ret->base = (PyObject *)ap;
+ Py_INCREF(ap);
+
+ for(i=0; i<n; i++) {
+ ret->dimensions[i] = ap->dimensions[permutation[i]];
+ ret->strides[i] = ap->strides[permutation[i]];
+ }
+ PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN);
+
+ return (PyObject *)ret;
+}
+
+/*MULTIARRAY_API
+ Repeat the array.
+*/
+static PyObject *
+PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis)
+{
+ intp *counts;
+ intp n, n_outer, i, j, k, chunk, total;
+ intp tmp;
+ int nd;
+ PyArrayObject *repeats=NULL;
+ PyObject *ap=NULL;
+ PyArrayObject *ret=NULL;
+ char *new_data, *old_data;
+
+ repeats = (PyAO *)PyArray_ContiguousFromAny(op, PyArray_INTP, 0, 1);
+ if (repeats == NULL) return NULL;
+ nd = repeats->nd;
+ counts = (intp *)repeats->data;
+
+ if ((ap=_check_axis(aop, &axis, CARRAY_FLAGS))==NULL) {
+ Py_DECREF(repeats);
+ return NULL;
+ }
+
+ aop = (PyAO *)ap;
+
+ if (nd == 1)
+ n = repeats->dimensions[0];
+ else /* nd == 0 */
+ n = aop->dimensions[axis];
+
+ if (aop->dimensions[axis] != n) {
+ PyErr_SetString(PyExc_ValueError,
+ "a.shape[axis] != len(repeats)");
+ goto fail;
+ }
+
+
+ if (nd == 0)
+ total = counts[0]*n;
+ else {
+
+ total = 0;
+ for(j=0; j<n; j++) {
+ if (counts[j] < 0) {
+ PyErr_SetString(PyExc_ValueError, "count < 0");
+ goto fail;
+ }
+ total += counts[j];
+ }
+ }
+
+
+ /* Construct new array */
+ aop->dimensions[axis] = total;
+ Py_INCREF(aop->descr);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(aop->ob_type,
+ aop->descr,
+ aop->nd,
+ aop->dimensions,
+ NULL, NULL, 0,
+ (PyObject *)aop);
+ aop->dimensions[axis] = n;
+
+ if (ret == NULL) goto fail;
+
+ new_data = ret->data;
+ old_data = aop->data;
+
+ chunk = aop->descr->elsize;
+ for(i=axis+1; i<aop->nd; i++) {
+ chunk *= aop->dimensions[i];
+ }
+
+ n_outer = 1;
+ for(i=0; i<axis; i++) n_outer *= aop->dimensions[i];
+
+ for(i=0; i<n_outer; i++) {
+ for(j=0; j<n; j++) {
+ tmp = (nd ? counts[j] : counts[0]);
+ for(k=0; k<tmp; k++) {
+ memcpy(new_data, old_data, chunk);
+ new_data += chunk;
+ }
+ old_data += chunk;
+ }
+ }
+
+ Py_DECREF(repeats);
+ PyArray_INCREF(ret);
+ Py_XDECREF(aop);
+ return (PyObject *)ret;
+
+ fail:
+ Py_DECREF(repeats);
+ Py_XDECREF(aop);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+/*OBJECT_API*/
+static PyArrayObject **
+PyArray_ConvertToCommonType(PyObject *op, int *retn)
+{
+ int i, n, allscalars=0;
+ PyArrayObject **mps=NULL;
+ PyObject *otmp;
+ PyArray_Descr *intype=NULL, *stype=NULL;
+ PyArray_Descr *newtype=NULL;
+
+
+ *retn = n = PySequence_Length(op);
+ if (PyErr_Occurred()) {*retn = 0; return NULL;}
+
+ mps = (PyArrayObject **)PyDataMem_NEW(n*sizeof(PyArrayObject *));
+ if (mps == NULL) {
+ *retn = 0;
+ return (void*)PyErr_NoMemory();
+ }
+
+ for(i=0; i<n; i++) {
+ otmp = PySequence_GetItem(op, i);
+ if (!PyArray_CheckAnyScalar(otmp)) {
+ newtype = PyArray_DescrFromObject(otmp, intype);
+ Py_XDECREF(intype);
+ intype = newtype;
+ mps[i] = NULL;
+ }
+ else {
+ newtype = PyArray_DescrFromObject(otmp, stype);
+ Py_XDECREF(stype);
+ stype = newtype;
+ mps[i] = (PyArrayObject *)Py_None;
+ Py_INCREF(Py_None);
+ }
+ Py_XDECREF(otmp);
+ }
+ if (intype==NULL) { /* all scalars */
+ allscalars = 1;
+ intype = stype;
+ Py_INCREF(intype);
+ for (i=0; i<n; i++) {
+ Py_XDECREF(mps[i]);
+ mps[i] = NULL;
+ }
+ }
+ /* Make sure all arrays are actual array objects. */
+ for(i=0; i<n; i++) {
+ int flags = CARRAY_FLAGS;
+ if ((otmp = PySequence_GetItem(op, i)) == NULL)
+ goto fail;
+ if (!allscalars && ((PyObject *)(mps[i]) == Py_None)) {
+ /* forcecast scalars */
+ flags |= FORCECAST;
+ Py_DECREF(Py_None);
+ }
+ mps[i] = (PyArrayObject*)
+ PyArray_FromAny(otmp, intype, 0, 0, flags);
+ Py_DECREF(otmp);
+ Py_XDECREF(stype);
+ }
+ return mps;
+
+ fail:
+ Py_XDECREF(intype);
+ Py_XDECREF(stype);
+ *retn = 0;
+ for (i=0; i<n; i++) Py_XDECREF(mps[i]);
+ PyDataMem_FREE(mps);
+ return NULL;
+}
+
+
+/*MULTIARRAY_API
+ Numeric.choose()
+*/
+static PyObject *
+PyArray_Choose(PyArrayObject *ip, PyObject *op)
+{
+ intp *sizes, offset;
+ int i,n,m,elsize;
+ char *ret_data;
+ PyArrayObject **mps, *ap, *ret;
+ intp *self_data, mi;
+ ap = NULL;
+ ret = NULL;
+
+ /* Convert all inputs to arrays of a common type */
+ mps = PyArray_ConvertToCommonType(op, &n);
+ if (mps == NULL) return NULL;
+
+ sizes = (intp *)_pya_malloc(n*sizeof(intp));
+ if (sizes == NULL) goto fail;
+
+ ap = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)ip,
+ PyArray_INTP,
+ 0, 0);
+ if (ap == NULL) goto fail;
+
+ /* Check the dimensions of the arrays */
+ for(i=0; i<n; i++) {
+ if (mps[i] == NULL) goto fail;
+ if (ap->nd < mps[i]->nd) {
+ PyErr_SetString(PyExc_ValueError,
+ "too many dimensions");
+ goto fail;
+ }
+ if (!PyArray_CompareLists(ap->dimensions+(ap->nd-mps[i]->nd),
+ mps[i]->dimensions, mps[i]->nd)) {
+ PyErr_SetString(PyExc_ValueError,
+ "array dimensions must agree");
+ goto fail;
+ }
+ sizes[i] = PyArray_NBYTES(mps[i]);
+ }
+
+ Py_INCREF(mps[0]->descr);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(ap->ob_type,
+ mps[0]->descr,
+ ap->nd,
+ ap->dimensions,
+ NULL, NULL, 0,
+ (PyObject *)ap);
+ if (ret == NULL) goto fail;
+
+ elsize = ret->descr->elsize;
+ m = PyArray_SIZE(ret);
+ self_data = (intp *)ap->data;
+ ret_data = ret->data;
+
+ for (i=0; i<m; i++) {
+ mi = *self_data;
+ if (mi < 0 || mi >= n) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid entry in choice array");
+ goto fail;
+ }
+ offset = i*elsize;
+ if (offset >= sizes[mi]) {offset = offset % sizes[mi]; }
+ memmove(ret_data, mps[mi]->data+offset, elsize);
+ ret_data += elsize; self_data++;
+ }
+
+ PyArray_INCREF(ret);
+ for(i=0; i<n; i++) Py_XDECREF(mps[i]);
+ Py_DECREF(ap);
+ PyDataMem_FREE(mps);
+ _pya_free(sizes);
+
+ return (PyObject *)ret;
+
+ fail:
+ for(i=0; i<n; i++) Py_XDECREF(mps[i]);
+ Py_XDECREF(ap);
+ PyDataMem_FREE(mps);
+ _pya_free(sizes);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+static void
+_strided_copy(char *dst, intp dststride, char *src, intp srcstride, intp num, int elsize)
+{
+ while(num--) {
+ memcpy(dst, src, elsize);
+ dst += dststride;
+ src += srcstride;
+ }
+}
+
+/* These algorithms use special sorting. They are not called unless the
+ underlying sort function for the type is available. Note that axis is already
+ valid. The sort functions require 1-d contiguous and well-behaved data.
+ Therefore, a copy will be made of the data if needed before handing it to the
+ sorting routine.
+ An iterator is constructed and adjusted to walk over all but the desired sorting
+ axis.
+*/
+static int
+_new_sort(PyArrayObject *op, int axis, PyArray_SORTKIND which)
+{
+ PyArrayIterObject *it;
+ int needcopy=0;
+ intp N, size;
+ int elsize;
+ intp astride;
+ PyArray_SortFunc *sort;
+ BEGIN_THREADS_DEF
+
+ it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)op, axis);
+ if (it == NULL) return -1;
+
+ BEGIN_THREADS
+ sort = op->descr->f->sort[which];
+ size = it->size;
+ N = op->dimensions[axis];
+ elsize = op->descr->elsize;
+ astride = op->strides[axis];
+
+ needcopy = !(op->flags & ALIGNED) || (astride != (intp) elsize);
+
+ if (needcopy) {
+ char *buffer;
+ buffer = PyDataMem_NEW(N*elsize);
+ while (size--) {
+ _strided_copy(buffer, (intp) elsize, it->dataptr,
+ astride, N, elsize);
+ if (sort(buffer, N, op) < 0) {
+ PyDataMem_FREE(buffer); goto fail;
+ }
+ _strided_copy(it->dataptr, astride, buffer,
+ (intp) elsize, N, elsize);
+ PyArray_ITER_NEXT(it);
+ }
+ PyDataMem_FREE(buffer);
+ }
+ else {
+ while (size--) {
+ if (sort(it->dataptr, N, op) < 0) goto fail;
+ PyArray_ITER_NEXT(it);
+ }
+ }
+
+ END_THREADS
+
+ Py_DECREF(it);
+ return 0;
+
+ fail:
+ END_THREADS
+
+ Py_DECREF(it);
+ return 0;
+}
+
+static PyObject*
+_new_argsort(PyArrayObject *op, int axis, PyArray_SORTKIND which)
+{
+
+ PyArrayIterObject *it=NULL;
+ PyArrayIterObject *rit=NULL;
+ PyObject *ret;
+ int needcopy=0, i;
+ intp N, size;
+ int elsize;
+ intp astride, rstride, *iptr;
+ PyArray_ArgSortFunc *argsort;
+ BEGIN_THREADS_DEF
+
+ ret = PyArray_New(op->ob_type, op->nd,
+ op->dimensions, PyArray_INTP,
+ NULL, NULL, 0, 0, (PyObject *)op);
+ if (ret == NULL) return NULL;
+
+ it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)op, axis);
+ rit = (PyArrayIterObject *)PyArray_IterAllButAxis(ret, axis);
+ if (rit == NULL || it == NULL) goto fail;
+
+ BEGIN_THREADS
+
+ argsort = op->descr->f->argsort[which];
+ size = it->size;
+ N = op->dimensions[axis];
+ elsize = op->descr->elsize;
+ astride = op->strides[axis];
+ rstride = PyArray_STRIDE(ret,axis);
+
+ needcopy = !(op->flags & ALIGNED) || (astride != (intp) elsize) || \
+ (rstride != sizeof(intp));
+
+ if (needcopy) {
+ char *valbuffer, *indbuffer;
+ valbuffer = PyDataMem_NEW(N*(elsize+sizeof(intp)));
+ indbuffer = valbuffer + (N*elsize);
+ while (size--) {
+ _strided_copy(valbuffer, (intp) elsize, it->dataptr,
+ astride, N, elsize);
+ iptr = (intp *)indbuffer;
+ for (i=0; i<N; i++) *iptr++ = i;
+ if (argsort(valbuffer, (intp *)indbuffer, N, op) < 0) {
+ PyDataMem_FREE(valbuffer); goto fail;
+ }
+ _strided_copy(rit->dataptr, rstride, indbuffer,
+ sizeof(intp), N, sizeof(intp));
+ PyArray_ITER_NEXT(it);
+ PyArray_ITER_NEXT(rit);
+ }
+ PyDataMem_FREE(valbuffer);
+ }
+ else {
+ while (size--) {
+ iptr = (intp *)rit->dataptr;
+ for (i=0; i<N; i++) *iptr++ = i;
+ if (argsort(it->dataptr, (intp *)rit->dataptr,
+ N, op) < 0) goto fail;
+ PyArray_ITER_NEXT(it);
+ PyArray_ITER_NEXT(rit);
+ }
+ }
+
+ END_THREADS
+
+ Py_DECREF(it);
+ Py_DECREF(rit);
+ return ret;
+
+ fail:
+
+ END_THREADS
+
+ Py_DECREF(ret);
+ Py_XDECREF(it);
+ Py_XDECREF(rit);
+ return NULL;
+}
+
+
+/* Be sure to save this global_compare when necessary */
+
+static PyArrayObject *global_obj;
+
+static int
+qsortCompare (const void *a, const void *b)
+{
+ return global_obj->descr->f->compare(a,b,global_obj);
+}
+
+/* Consumes reference to ap (op gets it)
+ op contains a version of the array with axes swapped if
+ local variable axis is not the last dimension.
+ orign must be defined locally.
+*/
+
+#define SWAPAXES(op, ap) { \
+ orign = (ap)->nd-1; \
+ if (axis != orign) { \
+ (op) = (PyAO *)PyArray_SwapAxes((ap), axis, orign); \
+ Py_DECREF((ap)); \
+ if ((op) == NULL) return NULL; \
+ } \
+ else (op) = (ap); \
+ }
+
+/* Consumes reference to ap (op gets it)
+ origin must be previously defined locally.
+ SWAPAXES must have been called previously.
+ op contains the swapped version of the array.
+*/
+#define SWAPBACK(op, ap) { \
+ if (axis != orign) { \
+ (op) = (PyAO *)PyArray_SwapAxes((ap), axis, orign); \
+ Py_DECREF((ap)); \
+ if ((op) == NULL) return NULL; \
+ } \
+ else (op) = (ap); \
+ }
+
+/* These swap axes in-place if necessary */
+#define SWAPINTP(a,b) {intp c; c=(a); (a) = (b); (b) = c;}
+#define SWAPAXES2(ap) { \
+ orign = (ap)->nd-1; \
+ if (axis != orign) { \
+ SWAPINTP(ap->dimensions[axis], ap->dimensions[orign]); \
+ SWAPINTP(ap->strides[axis], ap->strides[orign]); \
+ PyArray_UpdateFlags(ap, CONTIGUOUS | FORTRAN); \
+ } \
+ }
+
+#define SWAPBACK2(ap) { \
+ if (axis != orign) { \
+ SWAPINTP(ap->dimensions[axis], ap->dimensions[orign]); \
+ SWAPINTP(ap->strides[axis], ap->strides[orign]); \
+ PyArray_UpdateFlags(ap, CONTIGUOUS | FORTRAN); \
+ } \
+ }
+
+/*MULTIARRAY_API
+ Sort an array in-place
+*/
+static int
+PyArray_Sort(PyArrayObject *op, int axis, PyArray_SORTKIND which)
+{
+ PyArrayObject *ap=NULL, *store_arr=NULL;
+ char *ip;
+ int i, n, m, elsize, orign;
+
+ n = op->nd;
+ if ((n==0) || (PyArray_SIZE(op)==1)) return 0;
+
+ if (axis < 0) axis += n;
+ if ((axis < 0) || (axis >= n)) {
+ PyErr_Format(PyExc_ValueError,
+ "axis(=%d) out of bounds", axis);
+ return -1;
+ }
+ if (!PyArray_ISWRITEABLE(op)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "attempted sort on unwriteable array.");
+ return -1;
+ }
+
+ /* Determine if we should use type-specific algorithm or not */
+ if (op->descr->f->sort[which] != NULL) {
+ return _new_sort(op, axis, which);
+ }
+
+ if ((which != PyArray_QUICKSORT) || \
+ op->descr->f->compare == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "desired sort not supported for this type");
+ return -1;
+ }
+
+ SWAPAXES2(op);
+
+ ap = (PyArrayObject *)PyArray_FromAny((PyObject *)op,
+ NULL, 1, 0,
+ DEFAULT_FLAGS | UPDATEIFCOPY);
+ if (ap == NULL) goto fail;
+
+ elsize = ap->descr->elsize;
+ m = ap->dimensions[ap->nd-1];
+ if (m == 0) goto finish;
+
+ n = PyArray_SIZE(ap)/m;
+
+ /* Store global -- allows re-entry -- restore before leaving*/
+ store_arr = global_obj;
+ global_obj = ap;
+
+ for (ip=ap->data, i=0; i<n; i++, ip+=elsize*m) {
+ qsort(ip, m, elsize, qsortCompare);
+ }
+
+ global_obj = store_arr;
+
+ if (PyErr_Occurred()) goto fail;
+
+ finish:
+ Py_DECREF(ap); /* Should update op if needed */
+ SWAPBACK2(op);
+ return 0;
+ fail:
+ Py_XDECREF(ap);
+ SWAPBACK2(op);
+ return -1;
+}
+
+
+static char *global_data;
+
+static int
+argsort_static_compare(const void *ip1, const void *ip2)
+{
+ int isize = global_obj->descr->elsize;
+ const intp *ipa = ip1;
+ const intp *ipb = ip2;
+ return global_obj->descr->f->compare(global_data + (isize * *ipa),
+ global_data + (isize * *ipb),
+ global_obj);
+}
+
+/*MULTIARRAY_API
+ ArgSort an array
+*/
+static PyObject *
+PyArray_ArgSort(PyArrayObject *op, int axis, PyArray_SORTKIND which)
+{
+ PyArrayObject *ap=NULL, *ret, *store;
+ intp *ip;
+ intp i, j, n, m, orign;
+ int argsort_elsize;
+ char *store_ptr;
+
+ n = op->nd;
+ if ((n==0) || (PyArray_SIZE(op)==1)) {
+ ret = (PyArrayObject *)PyArray_New(op->ob_type, op->nd,
+ op->dimensions,
+ PyArray_INTP,
+ NULL, NULL, 0, 0,
+ (PyObject *)op);
+ if (ret == NULL) return NULL;
+ *((intp *)ret->data) = 0;
+ return (PyObject *)ret;
+ }
+ if (axis < 0) axis += n;
+ if ((axis < 0) || (axis >= n)) {
+ PyErr_Format(PyExc_ValueError,
+ "axis(=%d) out of bounds", axis);
+ return NULL;
+ }
+
+ /* Determine if we should use new algorithm or not */
+ if (op->descr->f->argsort[which] != NULL) {
+ return _new_argsort(op, axis, which);
+ }
+
+ if ((which != PyArray_QUICKSORT) || op->descr->f->compare == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "requested sort not available for type");
+ goto fail;
+ }
+
+ SWAPAXES(ap, op);
+
+ op = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)ap,
+ PyArray_NOTYPE,
+ 1, 0);
+
+ if (op == NULL) return NULL;
+
+ ret = (PyArrayObject *)PyArray_New(op->ob_type, op->nd,
+ op->dimensions, PyArray_INTP,
+ NULL, NULL, 0, 0, (PyObject *)op);
+ if (ret == NULL) goto fail;
+
+
+ ip = (intp *)ret->data;
+ argsort_elsize = op->descr->elsize;
+ m = op->dimensions[op->nd-1];
+ if (m == 0) goto finish;
+
+ n = PyArray_SIZE(op)/m;
+ store_ptr = global_data;
+ global_data = op->data;
+ store = global_obj;
+ global_obj = op;
+ for (i=0; i<n; i++, ip+=m, global_data += m*argsort_elsize) {
+ for(j=0; j<m; j++) ip[j] = j;
+ qsort((char *)ip, m, sizeof(intp),
+ argsort_static_compare);
+ }
+ global_data = store_ptr;
+ global_obj = store;
+
+
+ finish:
+ Py_DECREF(op);
+ SWAPBACK(op, ret);
+ return (PyObject *)op;
+
+ fail:
+ Py_XDECREF(ap);
+ Py_XDECREF(ret);
+ return NULL;
+
+}
+
+
+/*MULTIARRAY_API
+ LexSort an array providing indices that will sort a collection of arrays
+ lexicographically. The first key is sorted on first, followed by the second key
+ -- requires that arg"merge"sort is available for each sort_key
+
+ Returns an index array that shows the indexes for the lexicographic sort along
+ the given axis.
+*/
+static PyObject *
+PyArray_LexSort(PyObject *sort_keys, int axis)
+{
+ PyArrayObject **mps;
+ PyArrayIterObject **its;
+ PyArrayObject *ret=NULL;
+ PyArrayIterObject *rit=NULL;
+ int n;
+ int nd;
+ int needcopy=0, i,j;
+ intp N, size;
+ int elsize;
+ intp astride, rstride, *iptr;
+ PyArray_ArgSortFunc *argsort;
+
+ if (!PyTuple_Check(sort_keys) || \
+ ((n=PyTuple_GET_SIZE(sort_keys)) <= 0)) {
+ PyErr_SetString(PyExc_TypeError,
+ "need tuple of keys with len > 0 in lexsort");
+ return NULL;
+ }
+ mps = (PyArrayObject **) _pya_malloc(n*sizeof(PyArrayObject));
+ if (mps==NULL) return PyErr_NoMemory();
+ its = (PyArrayIterObject **) _pya_malloc(n*sizeof(PyArrayIterObject));
+ if (its == NULL) {_pya_free(mps); return PyErr_NoMemory();}
+ for (i=0; i<n; i++) {mps[i] = NULL; its[i] = NULL;}
+ for (i=0; i<n; i++) {
+ mps[i] = (PyArrayObject *)PyArray_FROM_O\
+ (PyTuple_GET_ITEM(sort_keys, i));
+ if (mps[i] == NULL) goto fail;
+ if (i>0) {
+ if ((mps[i]->nd != mps[0]->nd) || \
+ (!PyArray_CompareLists(mps[i]->dimensions,
+ mps[0]->dimensions,
+ mps[0]->nd))) {
+ PyErr_SetString(PyExc_ValueError,
+ "all keys need to be the same shape");
+ goto fail;
+ }
+ }
+ if (!mps[i]->descr->f->argsort[PyArray_MERGESORT]) {
+ PyErr_Format(PyExc_TypeError,
+ "merge sort not available for item %d", i);
+ goto fail;
+ }
+ its[i] = (PyArrayIterObject *)PyArray_IterAllButAxis \
+ ((PyObject *)mps[i], axis);
+ if (its[i]==NULL) goto fail;
+ }
+
+ /* Now we can check the axis */
+ nd = mps[0]->nd;
+ if ((nd==0) || (PyArray_SIZE(mps[0])==1)) {
+ ret = (PyArrayObject *)PyArray_New(&PyArray_Type, mps[0]->nd,
+ mps[0]->dimensions,
+ PyArray_INTP,
+ NULL, NULL, 0, 0, NULL);
+ if (ret == NULL) return NULL;
+ *((intp *)(ret->data)) = 0;
+ return (PyObject *)ret;
+ }
+ if (axis < 0) axis += nd;
+ if ((axis < 0) || (axis >= nd)) {
+ PyErr_Format(PyExc_ValueError,
+ "axis(=%d) out of bounds", axis);
+ goto fail;
+ }
+
+ /* Now do the sorting */
+
+ ret = (PyArrayObject *)PyArray_New(&PyArray_Type, mps[0]->nd,
+ mps[0]->dimensions, PyArray_INTP,
+ NULL, NULL, 0, 0, NULL);
+ if (ret == NULL) goto fail;
+
+ rit = (PyArrayIterObject *)\
+ PyArray_IterAllButAxis((PyObject *)ret, axis);
+ if (rit == NULL) goto fail;
+
+ size = rit->size;
+ N = mps[0]->dimensions[axis];
+ rstride = PyArray_STRIDE(ret,axis);
+
+ needcopy = (rstride != sizeof(intp));
+ for (j=0; j<n && !needcopy; j++) {
+ needcopy = !(mps[j]->flags & ALIGNED) || \
+ (mps[j]->strides[axis] != (intp)mps[j]->descr->elsize);
+ }
+
+ if (needcopy) {
+ char *valbuffer, *indbuffer;
+ valbuffer = PyDataMem_NEW(N*(elsize+sizeof(intp)));
+ indbuffer = valbuffer + (N*elsize);
+ while (size--) {
+ iptr = (intp *)indbuffer;
+ for (i=0; i<N; i++) *iptr++ = i;
+ for (j=0; j<n; j++) {
+ elsize = mps[j]->descr->elsize;
+ astride = mps[j]->strides[axis];
+ argsort = mps[j]->descr->f->argsort[PyArray_MERGESORT];
+ _strided_copy(valbuffer, (intp) elsize, its[j]->dataptr,
+ astride, N, elsize);
+ if (argsort(valbuffer, (intp *)indbuffer, N, mps[j]) < 0) {
+ PyDataMem_FREE(valbuffer); goto fail;
+ }
+ PyArray_ITER_NEXT(its[j]);
+ }
+ _strided_copy(rit->dataptr, rstride, indbuffer,
+ sizeof(intp), N, sizeof(intp));
+ PyArray_ITER_NEXT(rit);
+ }
+ PyDataMem_FREE(valbuffer);
+ }
+ else {
+ while (size--) {
+ iptr = (intp *)rit->dataptr;
+ for (i=0; i<N; i++) *iptr++ = i;
+ for (j=0; j<n; j++) {
+ argsort = mps[j]->descr->f->argsort[PyArray_MERGESORT];
+ if (argsort(its[j]->dataptr, (intp *)rit->dataptr,
+ N, mps[j]) < 0) goto fail;
+ PyArray_ITER_NEXT(its[j]);
+ }
+ PyArray_ITER_NEXT(rit);
+ }
+ }
+
+ for (i=0; i<n; i++) {Py_XDECREF(mps[i]); Py_XDECREF(its[i]);}
+ Py_DECREF(rit);
+ _pya_free(mps);
+ _pya_free(its);
+ return (PyObject *)ret;
+
+ fail:
+ Py_XDECREF(rit);
+ Py_XDECREF(ret);
+ for (i=0; i<n; i++) {Py_XDECREF(mps[i]); Py_XDECREF(its[i]);}
+ _pya_free(mps);
+ _pya_free(its);
+ return NULL;
+
+}
+
+
+static void
+local_where(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject *ret)
+{
+ PyArray_CompareFunc *compare = ap2->descr->f->compare;
+ intp min_i, max_i, i, j;
+ int location, elsize = ap1->descr->elsize;
+ intp elements = ap1->dimensions[ap1->nd-1];
+ intp n = PyArray_SIZE(ap2);
+ intp *rp = (intp *)ret->data;
+ char *ip = ap2->data;
+ char *vp = ap1->data;
+
+ for (j=0; j<n; j++, ip+=elsize, rp++) {
+ min_i = 0;
+ max_i = elements;
+ while (min_i != max_i) {
+ i = (max_i-min_i)/2 + min_i;
+ location = compare(ip, vp+elsize*i, ap2);
+ if (location == 0) {
+ while (i > 0) {
+ if (compare(ip, vp+elsize*(--i), ap2) \
+ != 0) {
+ i = i+1; break;
+ }
+ }
+ min_i = i;
+ break;
+ }
+ else if (location < 0) {
+ max_i = i;
+ } else {
+ min_i = i+1;
+ }
+ }
+ *rp = min_i;
+ }
+}
+
+/*MULTIARRAY_API
+ Numeric.searchsorted(a,v)
+*/
+static PyObject *
+PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2)
+{
+ PyArrayObject *ap1=NULL, *ap2=NULL, *ret=NULL;
+ int typenum = 0;
+
+ /*
+ PyObject *args;
+ args = Py_BuildValue("O",op2);
+ Py_DELEGATE_ARGS(((PyObject *)op1), searchsorted, args);
+ Py_XDECREF(args);
+ */
+
+ typenum = PyArray_ObjectType((PyObject *)op1, 0);
+ typenum = PyArray_ObjectType(op2, typenum);
+ ret = NULL;
+ ap1 = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)op1,
+ typenum,
+ 1, 1);
+ if (ap1 == NULL) return NULL;
+ ap2 = (PyArrayObject *)PyArray_ContiguousFromAny(op2, typenum,
+ 0, 0);
+ if (ap2 == NULL) goto fail;
+
+ ret = (PyArrayObject *)PyArray_New(ap2->ob_type, ap2->nd,
+ ap2->dimensions, PyArray_INTP,
+ NULL, NULL, 0, 0, (PyObject *)ap2);
+ if (ret == NULL) goto fail;
+
+ if (ap2->descr->f->compare == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "compare not supported for type");
+ goto fail;
+ }
+
+ local_where(ap1, ap2, ret);
+
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return (PyObject *)ret;
+
+ fail:
+ Py_XDECREF(ap1);
+ Py_XDECREF(ap2);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+
+
+/* Could perhaps be redone to not make contiguous arrays
+ */
+
+/*MULTIARRAY_API
+ Numeric.innerproduct(a,v)
+*/
+static PyObject *
+PyArray_InnerProduct(PyObject *op1, PyObject *op2)
+{
+ PyArrayObject *ap1, *ap2, *ret=NULL;
+ intp i, j, l, i1, i2, n1, n2;
+ int typenum;
+ intp is1, is2, os;
+ char *ip1, *ip2, *op;
+ intp dimensions[MAX_DIMS], nd;
+ PyArray_DotFunc *dot;
+ PyTypeObject *subtype;
+ double prior1, prior2;
+
+ typenum = PyArray_ObjectType(op1, 0);
+ typenum = PyArray_ObjectType(op2, typenum);
+
+ ap1 = (PyArrayObject *)PyArray_ContiguousFromAny(op1, typenum,
+ 0, 0);
+ if (ap1 == NULL) return NULL;
+ ap2 = (PyArrayObject *)PyArray_ContiguousFromAny(op2, typenum,
+ 0, 0);
+ if (ap2 == NULL) goto fail;
+
+ if (ap1->nd == 0 || ap2->nd == 0) {
+ ret = (ap1->nd == 0 ? ap1 : ap2);
+ ret = (PyArrayObject *)ret->ob_type->tp_as_number->\
+ nb_multiply((PyObject *)ap1, (PyObject *)ap2);
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return (PyObject *)ret;
+ }
+
+ l = ap1->dimensions[ap1->nd-1];
+
+ if (ap2->dimensions[ap2->nd-1] != l) {
+ PyErr_SetString(PyExc_ValueError, "matrices are not aligned");
+ goto fail;
+ }
+
+ if (l == 0) n1 = n2 = 0;
+ else {
+ n1 = PyArray_SIZE(ap1)/l;
+ n2 = PyArray_SIZE(ap2)/l;
+ }
+
+ nd = ap1->nd+ap2->nd-2;
+ j = 0;
+ for(i=0; i<ap1->nd-1; i++) {
+ dimensions[j++] = ap1->dimensions[i];
+ }
+ for(i=0; i<ap2->nd-1; i++) {
+ dimensions[j++] = ap2->dimensions[i];
+ }
+
+
+ /* Need to choose an output array that can hold a sum
+ -- use priority to determine which subtype.
+ */
+ prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
+ prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
+ subtype = (prior2 > prior1 ? ap2->ob_type : ap1->ob_type);
+
+ ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
+ typenum, NULL, NULL, 0, 0,
+ (PyObject *)
+ (prior2 > prior1 ? ap2 : ap1));
+ if (ret == NULL) goto fail;
+
+ dot = (ret->descr->f->dotfunc);
+
+ if (dot == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "dot not available for this type");
+ goto fail;
+ }
+
+
+ is1 = ap1->strides[ap1->nd-1];
+ is2 = ap2->strides[ap2->nd-1];
+ op = ret->data; os = ret->descr->elsize;
+
+ ip1 = ap1->data;
+ for(i1=0; i1<n1; i1++) {
+ ip2 = ap2->data;
+ for(i2=0; i2<n2; i2++) {
+ dot(ip1, is1, ip2, is2, op, l, ret);
+ ip2 += is2*l;
+ op += os;
+ }
+ ip1 += is1*l;
+ }
+ if (PyErr_Occurred()) goto fail;
+
+
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return (PyObject *)ret;
+
+ fail:
+ Py_XDECREF(ap1);
+ Py_XDECREF(ap2);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+
+/* just like inner product but does the swapaxes stuff on the fly */
+/*MULTIARRAY_API
+ Numeric.matrixproduct(a,v)
+*/
+static PyObject *
+PyArray_MatrixProduct(PyObject *op1, PyObject *op2)
+{
+ PyArrayObject *ap1, *ap2, *ret=NULL;
+ intp i, j, l, i1, i2, n1, n2;
+ int typenum;
+ intp is1, is2, os;
+ char *ip1, *ip2, *op;
+ intp dimensions[MAX_DIMS], nd;
+ PyArray_DotFunc *dot;
+ intp matchDim, otherDim, is2r, is1r;
+ PyTypeObject *subtype;
+ double prior1, prior2;
+ PyArray_Descr *typec;
+
+ typenum = PyArray_ObjectType(op1, 0);
+ typenum = PyArray_ObjectType(op2, typenum);
+
+ typec = PyArray_DescrFromType(typenum);
+ Py_INCREF(typec);
+ ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0,
+ DEFAULT_FLAGS);
+ if (ap1 == NULL) {Py_DECREF(typec); return NULL;}
+ ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0,
+ DEFAULT_FLAGS);
+ if (ap2 == NULL) goto fail;
+
+ if (ap1->nd == 0 || ap2->nd == 0) {
+ ret = (ap1->nd == 0 ? ap1 : ap2);
+ ret = (PyArrayObject *)ret->ob_type->tp_as_number->\
+ nb_multiply((PyObject *)ap1, (PyObject *)ap2);
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return (PyObject *)ret;
+ }
+
+ l = ap1->dimensions[ap1->nd-1];
+ if (ap2->nd > 1) {
+ matchDim = ap2->nd - 2;
+ otherDim = ap2->nd - 1;
+ }
+ else {
+ matchDim = 0;
+ otherDim = 0;
+ }
+
+ if (ap2->dimensions[matchDim] != l) {
+ PyErr_SetString(PyExc_ValueError, "objects are not aligned");
+ goto fail;
+ }
+
+ if (l == 0) n1 = n2 = 0;
+ else {
+ n1 = PyArray_SIZE(ap1)/l;
+ n2 = PyArray_SIZE(ap2)/l;
+ }
+
+ nd = ap1->nd+ap2->nd-2;
+ j = 0;
+ for(i=0; i<ap1->nd-1; i++) {
+ dimensions[j++] = ap1->dimensions[i];
+ }
+ for(i=0; i<ap2->nd-2; i++) {
+ dimensions[j++] = ap2->dimensions[i];
+ }
+ if(ap2->nd > 1) {
+ dimensions[j++] = ap2->dimensions[ap2->nd-1];
+ }
+ /*
+ fprintf(stderr, "nd=%d dimensions=", nd);
+ for(i=0; i<j; i++)
+ fprintf(stderr, "%d ", dimensions[i]);
+ fprintf(stderr, "\n");
+ */
+
+ /* Choose which subtype to return */
+ prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
+ prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
+ subtype = (prior2 > prior1 ? ap2->ob_type : ap1->ob_type);
+
+ ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
+ typenum, NULL, NULL, 0, 0,
+ (PyObject *)
+ (prior2 > prior1 ? ap2 : ap1));
+ if (ret == NULL) goto fail;
+
+ dot = ret->descr->f->dotfunc;
+ if (dot == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "dot not available for this type");
+ goto fail;
+ }
+
+ is1 = ap1->strides[ap1->nd-1]; is2 = ap2->strides[matchDim];
+ if(ap1->nd > 1)
+ is1r = ap1->strides[ap1->nd-2];
+ else
+ is1r = ap1->strides[ap1->nd-1];
+ is2r = ap2->strides[otherDim];
+
+ op = ret->data; os = ret->descr->elsize;
+
+ ip1 = ap1->data;
+ for(i1=0; i1<n1; i1++) {
+ ip2 = ap2->data;
+ for(i2=0; i2<n2; i2++) {
+ dot(ip1, is1, ip2, is2, op, l, ret);
+ ip2 += is2r;
+ op += os;
+ }
+ ip1 += is1r;
+ }
+ if (PyErr_Occurred()) goto fail;
+
+
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return (PyObject *)ret;
+
+ fail:
+ Py_XDECREF(ap1);
+ Py_XDECREF(ap2);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+/*MULTIARRAY_API
+ Fast Copy and Transpose
+*/
+static PyObject *
+PyArray_CopyAndTranspose(PyObject *op)
+{
+ PyObject *ret, *arr;
+ int nd;
+ intp dims[2];
+ intp i,j;
+ int elsize, str2;
+ char *iptr;
+ char *optr;
+
+ /* make sure it is well-behaved */
+ arr = PyArray_FromAny(op, NULL, 0, 0, CARRAY_FLAGS);
+ nd = PyArray_NDIM(arr);
+ if (nd == 1) { /* we will give in to old behavior */
+ ret = PyArray_Copy((PyArrayObject *)arr);
+ Py_DECREF(arr);
+ return ret;
+ }
+ else if (nd != 2) {
+ Py_DECREF(arr);
+ PyErr_SetString(PyExc_ValueError,
+ "only 2-d arrays are allowed");
+ return NULL;
+ }
+
+ /* Now construct output array */
+ dims[0] = PyArray_DIM(arr,1);
+ dims[1] = PyArray_DIM(arr,0);
+ elsize = PyArray_ITEMSIZE(arr);
+
+ Py_INCREF(PyArray_DESCR(arr));
+ ret = PyArray_NewFromDescr(arr->ob_type,
+ PyArray_DESCR(arr),
+ 2, dims,
+ NULL, NULL, 0, arr);
+
+ if (ret == NULL) {
+ Py_DECREF(arr);
+ return NULL;
+ }
+ /* do 2-d loop */
+ optr = PyArray_DATA(ret);
+ str2 = elsize*dims[0];
+ for (i=0; i<dims[0]; i++) {
+ iptr = PyArray_DATA(arr) + i*elsize;
+ for (j=0; j<dims[1]; j++) {
+ /* optr[i,j] = iptr[j,i] */
+ memcpy(optr, iptr, elsize);
+ optr += elsize;
+ iptr += str2;
+ }
+ }
+ Py_DECREF(arr);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ Numeric.correlate(a1,a2,mode)
+*/
+static PyObject *
+PyArray_Correlate(PyObject *op1, PyObject *op2, int mode)
+{
+ PyArrayObject *ap1, *ap2, *ret=NULL;
+ intp length;
+ intp i, n1, n2, n, n_left, n_right;
+ int typenum;
+ intp is1, is2, os;
+ char *ip1, *ip2, *op;
+ PyArray_DotFunc *dot;
+ PyArray_Descr *typec;
+ double prior1, prior2;
+ PyTypeObject *subtype=NULL;
+
+ typenum = PyArray_ObjectType(op1, 0);
+ typenum = PyArray_ObjectType(op2, typenum);
+
+ typec = PyArray_DescrFromType(typenum);
+ Py_INCREF(typec);
+ ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 1, 1,
+ DEFAULT_FLAGS);
+ if (ap1 == NULL) {Py_DECREF(typec); return NULL;}
+ ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 1, 1,
+ DEFAULT_FLAGS);
+ if (ap2 == NULL) goto fail;
+
+ n1 = ap1->dimensions[0];
+ n2 = ap2->dimensions[0];
+
+ if (n1 < n2) {
+ ret = ap1; ap1 = ap2; ap2 = ret;
+ ret = NULL; i = n1;n1=n2;n2=i;
+ }
+ length = n1;
+ n = n2;
+ switch(mode) {
+ case 0:
+ length = length-n+1;
+ n_left = n_right = 0;
+ break;
+ case 1:
+ n_left = (intp)(n/2);
+ n_right = n-n_left-1;
+ break;
+ case 2:
+ n_right = n-1;
+ n_left = n-1;
+ length = length+n-1;
+ break;
+ default:
+ PyErr_SetString(PyExc_ValueError,
+ "mode must be 0, 1, or 2");
+ goto fail;
+ }
+
+ /* Need to choose an output array that can hold a sum
+ -- use priority to determine which subtype.
+ */
+ prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
+ prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
+ subtype = (prior2 > prior1 ? ap2->ob_type : ap1->ob_type);
+
+ ret = (PyArrayObject *)PyArray_New(subtype, 1,
+ &length, typenum,
+ NULL, NULL, 0, 0,
+ (PyObject *)
+ (prior2 > prior1 ? ap2 : ap1));
+ if (ret == NULL) goto fail;
+
+ dot = ret->descr->f->dotfunc;
+ if (dot == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "function not available for this data type");
+ goto fail;
+ }
+
+ is1 = ap1->strides[0]; is2 = ap2->strides[0];
+ op = ret->data; os = ret->descr->elsize;
+
+ ip1 = ap1->data; ip2 = ap2->data+n_left*is2;
+ n = n-n_left;
+ for(i=0; i<n_left; i++) {
+ dot(ip1, is1, ip2, is2, op, n, ret);
+ n++;
+ ip2 -= is2;
+ op += os;
+ }
+ for(i=0; i<(n1-n2+1); i++) {
+ dot(ip1, is1, ip2, is2, op, n, ret);
+ ip1 += is1;
+ op += os;
+ }
+ for(i=0; i<n_right; i++) {
+ n--;
+ dot(ip1, is1, ip2, is2, op, n, ret);
+ ip1 += is1;
+ op += os;
+ }
+ if (PyErr_Occurred()) goto fail;
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ return (PyObject *)ret;
+
+ fail:
+ Py_XDECREF(ap1);
+ Py_XDECREF(ap2);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+
+/*MULTIARRAY_API
+ ArgMin
+*/
+static PyObject *
+PyArray_ArgMin(PyArrayObject *ap, int axis)
+{
+ PyObject *obj, *new, *ret;
+
+ if (PyArray_ISFLEXIBLE(ap)) {
+ PyErr_SetString(PyExc_TypeError,
+ "argmax is unsupported for this type");
+ return NULL;
+ }
+ else if (PyArray_ISUNSIGNED(ap))
+ obj = PyInt_FromLong((long) -1);
+
+ else if (PyArray_TYPE(ap)==PyArray_BOOL)
+ obj = PyInt_FromLong((long) 1);
+
+ else
+ obj = PyInt_FromLong((long) 0);
+
+ new = PyArray_EnsureArray(PyNumber_Subtract(obj, (PyObject *)ap));
+ Py_DECREF(obj);
+ if (new == NULL) return NULL;
+ ret = PyArray_ArgMax((PyArrayObject *)new, axis);
+ Py_DECREF(new);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ Max
+*/
+static PyObject *
+PyArray_Max(PyArrayObject *ap, int axis)
+{
+ PyArrayObject *arr;
+ PyObject *ret;
+
+ if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL)
+ return NULL;
+ ret = PyArray_GenericReduceFunction(arr, n_ops.maximum, axis,
+ arr->descr->type_num);
+ Py_DECREF(arr);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ Min
+*/
+static PyObject *
+PyArray_Min(PyArrayObject *ap, int axis)
+{
+ PyArrayObject *arr;
+ PyObject *ret;
+
+ if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL)
+ return NULL;
+ ret = PyArray_GenericReduceFunction(arr, n_ops.minimum, axis,
+ arr->descr->type_num);
+ Py_DECREF(arr);
+ return ret;
+}
+
+/*MULTIARRAY_API
+ Ptp
+*/
+static PyObject *
+PyArray_Ptp(PyArrayObject *ap, int axis)
+{
+ PyArrayObject *arr;
+ PyObject *ret;
+ PyObject *obj1=NULL, *obj2=NULL;
+
+ if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL)
+ return NULL;
+ obj1 = PyArray_Max(arr, axis);
+ if (obj1 == NULL) goto fail;
+ obj2 = PyArray_Min(arr, axis);
+ if (obj2 == NULL) goto fail;
+ Py_DECREF(arr);
+ ret = PyNumber_Subtract(obj1, obj2);
+ Py_DECREF(obj1);
+ Py_DECREF(obj2);
+ return ret;
+
+ fail:
+ Py_XDECREF(arr);
+ Py_XDECREF(obj1);
+ Py_XDECREF(obj2);
+ return NULL;
+}
+
+
+/*MULTIARRAY_API
+ ArgMax
+*/
+static PyObject *
+PyArray_ArgMax(PyArrayObject *op, int axis)
+{
+ PyArrayObject *ap=NULL, *rp=NULL;
+ PyArray_ArgFunc* arg_func;
+ char *ip;
+ intp *rptr;
+ intp i, n, orign, m;
+ int elsize;
+
+ if ((ap=(PyAO *)_check_axis(op, &axis, 0))==NULL) return NULL;
+
+ SWAPAXES(op, ap);
+
+ ap = (PyArrayObject *)\
+ PyArray_ContiguousFromAny((PyObject *)op,
+ PyArray_NOTYPE, 1, 0);
+
+ Py_DECREF(op);
+ if (ap == NULL) return NULL;
+
+ arg_func = ap->descr->f->argmax;
+ if (arg_func == NULL) {
+ PyErr_SetString(PyExc_TypeError, "data type not ordered");
+ goto fail;
+ }
+
+ rp = (PyArrayObject *)PyArray_New(ap->ob_type, ap->nd-1,
+ ap->dimensions, PyArray_INTP,
+ NULL, NULL, 0, 0,
+ (PyObject *)ap);
+ if (rp == NULL) goto fail;
+
+
+ elsize = ap->descr->elsize;
+ m = ap->dimensions[ap->nd-1];
+ if (m == 0) {
+ PyErr_SetString(MultiArrayError,
+ "attempt to get argmax/argmin "\
+ "of an empty sequence??");
+ goto fail;
+ }
+ n = PyArray_SIZE(ap)/m;
+ rptr = (intp *)rp->data;
+ for (ip = ap->data, i=0; i<n; i++, ip+=elsize*m) {
+ arg_func(ip, m, rptr, ap);
+ rptr += 1;
+ }
+ Py_DECREF(ap);
+
+ SWAPBACK(op, rp); /* op now contains the return */
+
+ return (PyObject *)op;
+
+ fail:
+ Py_DECREF(ap);
+ Py_XDECREF(rp);
+ return NULL;
+}
+
+
+/*MULTIARRAY_API
+ Take
+*/
+static PyObject *
+PyArray_Take(PyArrayObject *self0, PyObject *indices0, int axis)
+{
+ PyArrayObject *self, *indices, *ret;
+ intp nd, i, j, n, m, max_item, tmp, chunk;
+ intp shape[MAX_DIMS];
+ char *src, *dest;
+
+ indices = ret = NULL;
+ self = (PyAO *)_check_axis(self0, &axis, CARRAY_FLAGS);
+ if (self == NULL) return NULL;
+
+ indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0,
+ PyArray_INTP,
+ 1, 0);
+ if (indices == NULL) goto fail;
+
+ n = m = chunk = 1;
+ nd = self->nd + indices->nd - 1;
+ for (i=0; i< nd; i++) {
+ if (i < axis) {
+ shape[i] = self->dimensions[i];
+ n *= shape[i];
+ } else {
+ if (i < axis+indices->nd) {
+ shape[i] = indices->dimensions[i-axis];
+ m *= shape[i];
+ } else {
+ shape[i] = self->dimensions[i-indices->nd+1];
+ chunk *= shape[i];
+ }
+ }
+ }
+ Py_INCREF(self->descr);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(self->ob_type,
+ self->descr,
+ nd, shape,
+ NULL, NULL, 0,
+ (PyObject *)self);
+
+ if (ret == NULL) goto fail;
+
+ max_item = self->dimensions[axis];
+ chunk = chunk * ret->descr->elsize;
+ src = self->data;
+ dest = ret->data;
+
+ for(i=0; i<n; i++) {
+ for(j=0; j<m; j++) {
+ tmp = ((intp *)(indices->data))[j];
+ if (tmp < 0) tmp = tmp+max_item;
+ if ((tmp < 0) || (tmp >= max_item)) {
+ PyErr_SetString(PyExc_IndexError,
+ "index out of range for "\
+ "array");
+ goto fail;
+ }
+ memmove(dest, src+tmp*chunk, chunk);
+ dest += chunk;
+ }
+ src += chunk*max_item;
+ }
+
+ PyArray_INCREF(ret);
+
+ Py_XDECREF(indices);
+ Py_XDECREF(self);
+
+ return (PyObject *)ret;
+
+
+ fail:
+ Py_XDECREF(ret);
+ Py_XDECREF(indices);
+ Py_XDECREF(self);
+ return NULL;
+}
+
+/*MULTIARRAY_API
+ Put values into an array
+*/
+static PyObject *
+PyArray_Put(PyArrayObject *self, PyObject* values0, PyObject *indices0)
+{
+ PyArrayObject *indices, *values;
+ int i, chunk, ni, max_item, nv, tmp, thistype;
+ char *src, *dest;
+
+ indices = NULL;
+ values = NULL;
+
+ if (!PyArray_Check(self)) {
+ PyErr_SetString(PyExc_TypeError, "put: first argument must be an array");
+ return NULL;
+ }
+ if (!PyArray_ISCONTIGUOUS(self)) {
+ PyErr_SetString(PyExc_ValueError, "put: first argument must be contiguous");
+ return NULL;
+ }
+ max_item = PyArray_SIZE(self);
+ dest = self->data;
+ chunk = self->descr->elsize;
+
+ indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, PyArray_INTP, 0, 0);
+ if (indices == NULL) goto fail;
+ ni = PyArray_SIZE(indices);
+
+ thistype = self->descr->type_num;
+ values = (PyArrayObject *)\
+ PyArray_ContiguousFromAny(values0, thistype, 0, 0);
+ if (values == NULL) goto fail;
+ nv = PyArray_SIZE(values);
+ if (nv > 0) { /* nv == 0 for a null array */
+ if (thistype == PyArray_OBJECT) {
+ for(i=0; i<ni; i++) {
+ src = values->data + chunk * (i % nv);
+ tmp = ((intp *)(indices->data))[i];
+ if (tmp < 0) tmp = tmp+max_item;
+ if ((tmp < 0) || (tmp >= max_item)) {
+ PyErr_SetString(PyExc_IndexError, "index out of range for array");
+ goto fail;
+ }
+ Py_INCREF(*((PyObject **)src));
+ Py_XDECREF(*((PyObject **)(dest+tmp*chunk)));
+ memmove(dest + tmp * chunk, src, chunk);
+ }
+ }
+ else {
+ for(i=0; i<ni; i++) {
+ src = values->data + chunk * (i % nv);
+ tmp = ((intp *)(indices->data))[i];
+ if (tmp < 0) tmp = tmp+max_item;
+ if ((tmp < 0) || (tmp >= max_item)) {
+ PyErr_SetString(PyExc_IndexError, "index out of range for array");
+ goto fail;
+ }
+ memmove(dest + tmp * chunk, src, chunk);
+ }
+ }
+
+ }
+
+ Py_XDECREF(values);
+ Py_XDECREF(indices);
+ Py_INCREF(Py_None);
+ return Py_None;
+
+ fail:
+ Py_XDECREF(indices);
+ Py_XDECREF(values);
+ return NULL;
+}
+
+/*MULTIARRAY_API
+ Put values into an array according to a mask.
+*/
+static PyObject *
+PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
+{
+ PyArrayObject *mask, *values;
+ int i, chunk, ni, max_item, nv, tmp, thistype;
+ char *src, *dest;
+
+ mask = NULL;
+ values = NULL;
+
+ if (!PyArray_Check(self)) {
+ PyErr_SetString(PyExc_TypeError,
+ "putmask: first argument must "\
+ "be an array");
+ return NULL;
+ }
+ if (!PyArray_ISCONTIGUOUS(self)) {
+ PyErr_SetString(PyExc_ValueError,
+ "putmask: first argument must be contiguous");
+ return NULL;
+ }
+
+ max_item = PyArray_SIZE(self);
+ dest = self->data;
+ chunk = self->descr->elsize;
+
+ mask = (PyArrayObject *)\
+ PyArray_FROM_OTF(mask0, PyArray_BOOL, CARRAY_FLAGS | FORCECAST);
+ if (mask == NULL) goto fail;
+ ni = PyArray_SIZE(mask);
+ if (ni != max_item) {
+ PyErr_SetString(PyExc_ValueError,
+ "putmask: mask and data must be "\
+ "the same size");
+ goto fail;
+ }
+
+ thistype = self->descr->type_num;
+ values = (PyArrayObject *)\
+ PyArray_ContiguousFromAny(values0, thistype, 0, 0);
+ if (values == NULL) goto fail;
+ nv = PyArray_SIZE(values); /* zero if null array */
+ if (nv > 0) {
+ if (thistype == PyArray_OBJECT) {
+ for(i=0; i<ni; i++) {
+ src = values->data + chunk * (i % nv);
+ tmp = ((Bool *)(mask->data))[i];
+ if (tmp) {
+ Py_INCREF(*((PyObject **)src));
+ Py_XDECREF(*((PyObject **)(dest+i*chunk)));
+ memmove(dest + i * chunk, src, chunk);
+ }
+ }
+ }
+ else {
+ for(i=0; i<ni; i++) {
+ src = values->data + chunk * (i % nv);
+ tmp = ((Bool *)(mask->data))[i];
+ if (tmp) memmove(dest + i * chunk, src, chunk);
+ }
+ }
+ }
+
+ Py_XDECREF(values);
+ Py_XDECREF(mask);
+ Py_INCREF(Py_None);
+ return Py_None;
+
+ fail:
+ Py_XDECREF(mask);
+ Py_XDECREF(values);
+ return NULL;
+}
+
+
+/* This conversion function can be used with the "O&" argument for
+ PyArg_ParseTuple. It will immediately return an object of array type
+ or will convert to a CARRAY any other object.
+
+ If you use PyArray_Converter, you must DECREF the array when finished
+ as you get a new reference to it.
+*/
+
+/*MULTIARRAY_API
+ Useful to pass as converter function for O& processing in
+ PyArgs_ParseTuple.
+*/
+static int
+PyArray_Converter(PyObject *object, PyObject **address)
+{
+ if (PyArray_Check(object)) {
+ *address = object;
+ Py_INCREF(object);
+ return PY_SUCCEED;
+ }
+ else {
+ *address = PyArray_FromAny(object, NULL, 0, 0, CARRAY_FLAGS);
+ if (*address == NULL) return PY_FAIL;
+ return PY_SUCCEED;
+ }
+}
+
+/*MULTIARRAY_API
+ Convert an object to true / false
+*/
+static int
+PyArray_BoolConverter(PyObject *object, Bool *val)
+{
+ if (PyObject_IsTrue(object))
+ *val=TRUE;
+ else *val=FALSE;
+ if (PyErr_Occurred())
+ return PY_FAIL;
+ return PY_SUCCEED;
+}
+
+
+/*MULTIARRAY_API
+ Typestr converter
+*/
+static int
+PyArray_TypestrConvert(int itemsize, int gentype)
+{
+ register int newtype = gentype;
+
+ if (gentype == PyArray_GENBOOLLTR) {
+ if (itemsize == 1)
+ newtype = PyArray_BOOL;
+ else
+ newtype = PyArray_NOTYPE;
+ }
+ else if (gentype == PyArray_SIGNEDLTR) {
+ switch(itemsize) {
+ case 1:
+ newtype = PyArray_INT8;
+ break;
+ case 2:
+ newtype = PyArray_INT16;
+ break;
+ case 4:
+ newtype = PyArray_INT32;
+ break;
+ case 8:
+ newtype = PyArray_INT64;
+ break;
+#ifdef PyArray_INT128
+ case 16:
+ newtype = PyArray_INT128;
+ break;
+#endif
+ default:
+ newtype = PyArray_NOTYPE;
+ }
+ }
+
+ else if (gentype == PyArray_UNSIGNEDLTR) {
+ switch(itemsize) {
+ case 1:
+ newtype = PyArray_UINT8;
+ break;
+ case 2:
+ newtype = PyArray_UINT16;
+ break;
+ case 4:
+ newtype = PyArray_UINT32;
+ break;
+ case 8:
+ newtype = PyArray_UINT64;
+ break;
+#ifdef PyArray_INT128
+ case 16:
+ newtype = PyArray_UINT128;
+ break;
+#endif
+ default:
+ newtype = PyArray_NOTYPE;
+ break;
+ }
+ }
+ else if (gentype == PyArray_FLOATINGLTR) {
+ switch(itemsize) {
+ case 4:
+ newtype = PyArray_FLOAT32;
+ break;
+ case 8:
+ newtype = PyArray_FLOAT64;
+ break;
+#ifdef PyArray_FLOAT80
+ case 10:
+ newtype = PyArray_FLOAT80;
+ break;
+#endif
+#ifdef PyArray_FLOAT96
+ case 12:
+ newtype = PyArray_FLOAT96;
+ break;
+#endif
+#ifdef PyArray_FLOAT128
+ case 16:
+ newtype = PyArray_FLOAT128;
+ break;
+#endif
+ default:
+ newtype = PyArray_NOTYPE;
+ }
+ }
+
+ else if (gentype == PyArray_COMPLEXLTR) {
+ switch(itemsize) {
+ case 8:
+ newtype = PyArray_COMPLEX64;
+ break;
+ case 16:
+ newtype = PyArray_COMPLEX128;
+ break;
+#ifdef PyArray_FLOAT80
+ case 20:
+ newtype = PyArray_COMPLEX160;
+ break;
+#endif
+#ifdef PyArray_FLOAT96
+ case 24:
+ newtype = PyArray_COMPLEX192;
+ break;
+#endif
+#ifdef PyArray_FLOAT128
+ case 32:
+ newtype = PyArray_COMPLEX256;
+ break;
+#endif
+ default:
+ newtype = PyArray_NOTYPE;
+ }
+ }
+
+ return newtype;
+}
+
+
+/* this function takes a Python object which exposes the (single-segment)
+ buffer interface and returns a pointer to the data segment
+
+ You should increment the reference count by one of buf->base
+ if you will hang on to a reference
+
+ You only get a borrowed reference to the object. Do not free the
+ memory...
+*/
+
+
+/*MULTIARRAY_API
+ Get buffer chunk from object
+*/
+static int
+PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf)
+{
+ int buflen;
+
+ buf->ptr = NULL;
+ buf->flags = BEHAVED_FLAGS;
+ buf->base = NULL;
+
+ if (obj == Py_None)
+ return PY_SUCCEED;
+
+ if (PyObject_AsWriteBuffer(obj, &(buf->ptr), &buflen) < 0) {
+ PyErr_Clear();
+ buf->flags &= ~WRITEABLE;
+ if (PyObject_AsReadBuffer(obj, (const void **)&(buf->ptr),
+ &buflen) < 0)
+ return PY_FAIL;
+ }
+ buf->len = (intp) buflen;
+
+ /* Point to the base of the buffer object if present */
+ if (PyBuffer_Check(obj)) buf->base = ((PyArray_Chunk *)obj)->base;
+ if (buf->base == NULL) buf->base = obj;
+
+ return PY_SUCCEED;
+}
+
+
+
+/* This function takes a Python sequence object and allocates and
+ fills in an intp array with the converted values.
+
+ **Remember to free the pointer seq.ptr when done using
+ PyDimMem_FREE(seq.ptr)**
+*/
+
+/*MULTIARRAY_API
+ Get intp chunk from sequence
+*/
+static int
+PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
+{
+ int len;
+ int nd;
+
+ seq->ptr = NULL;
+ if (obj == Py_None) return PY_SUCCEED;
+ len = PySequence_Size(obj);
+ if (len == -1) { /* Check to see if it is a number */
+ if (PyNumber_Check(obj)) len = 1;
+ }
+ if (len < 0) {
+ PyErr_SetString(PyExc_TypeError,
+ "expected sequence object with len >= 0");
+ return PY_FAIL;
+ }
+ if (len > MAX_DIMS) {
+ PyErr_Format(PyExc_ValueError, "sequence too large; " \
+ "must be smaller than %d", MAX_DIMS);
+ return PY_FAIL;
+ }
+ if (len > 0) {
+ seq->ptr = PyDimMem_NEW(len);
+ if (seq->ptr == NULL) {
+ PyErr_NoMemory();
+ return PY_FAIL;
+ }
+ }
+ seq->len = len;
+ nd = PyArray_IntpFromSequence(obj, (intp *)seq->ptr, len);
+ if (nd == -1 || nd != len) {
+ PyDimMem_FREE(seq->ptr);
+ seq->ptr=NULL;
+ return PY_FAIL;
+ }
+ return PY_SUCCEED;
+}
+
+
+/* A tuple type would be either (generic typeobject, typesize)
+ or (fixed-length data-type, shape)
+
+ or (inheriting data-type, new-data-type)
+ The new data-type must have the same itemsize as the inheriting data-type
+ unless the latter is 0
+
+ Thus (int32, {'real':(int16,0),'imag',(int16,2)})
+
+ is one way to specify a descriptor that will give
+ a['real'] and a['imag'] to an int32 array.
+*/
+
+/* leave type reference alone */
+static PyArray_Descr *
+_use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag)
+{
+ PyArray_Descr *new;
+ PyArray_Descr *conv;
+
+ *errflag = 0;
+ if (!PyArray_DescrConverter(newobj, &conv)) {
+ return NULL;
+ }
+ *errflag = 1;
+ if (type == &OBJECT_Descr) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot base a new descriptor on an"\
+ " OBJECT descriptor.");
+ return NULL;
+ }
+ new = PyArray_DescrNew(type);
+ if (new == NULL) return NULL;
+
+ if (new->elsize && new->elsize != conv->elsize) {
+ PyErr_SetString(PyExc_ValueError,
+ "mismatch in size of old"\
+ "and new data-descriptor");
+ return NULL;
+ }
+ new->elsize = conv->elsize;
+ if (conv->fields != Py_None) {
+ new->fields = conv->fields;
+ Py_XINCREF(new->fields);
+ }
+ Py_DECREF(conv);
+ *errflag = 0;
+ return new;
+}
+
+static PyArray_Descr *
+_convert_from_tuple(PyObject *obj)
+{
+ PyArray_Descr *type, *res;
+ PyObject *val;
+ int errflag;
+
+ if (PyTuple_GET_SIZE(obj) != 2) return NULL;
+
+ if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj,0), &type))
+ return NULL;
+ val = PyTuple_GET_ITEM(obj,1);
+ /* try to interpret next item as a type */
+ res = _use_inherit(type, val, &errflag);
+ if (res || errflag) {
+ Py_DECREF(type);
+ if (res) return res;
+ else return NULL;
+ }
+ PyErr_Clear();
+ /* We get here if res was NULL but errflag wasn't set
+ --- i.e. the conversion to a data-descr failed in _use_inherit
+ */
+
+ if (type->elsize == 0) { /* interpret next item as a typesize */
+ int itemsize;
+ itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1));
+ if (error_converting(itemsize)) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid itemsize in generic type "\
+ "tuple");
+ goto fail;
+ }
+ PyArray_DESCR_REPLACE(type);
+ type->elsize = itemsize;
+ }
+ else {
+ /* interpret next item as shape (if it's a tuple)
+ and reset the type to PyArray_VOID with
+ anew fields attribute.
+ */
+ PyArray_Dims shape={NULL,-1};
+ PyArray_Descr *newdescr;
+ if (!(PyArray_IntpConverter(val, &shape)) ||
+ (shape.len > MAX_DIMS)) {
+ PyDimMem_FREE(shape.ptr);
+ PyErr_SetString(PyExc_ValueError,
+ "invalid shape in fixed-type tuple.");
+ goto fail;
+ }
+ newdescr = PyArray_DescrNewFromType(PyArray_VOID);
+ if (newdescr == NULL) {PyDimMem_FREE(shape.ptr); goto fail;}
+ newdescr->elsize = type->elsize;
+ newdescr->elsize *= PyArray_MultiplyList(shape.ptr,
+ shape.len);
+ PyDimMem_FREE(shape.ptr);
+ newdescr->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr));
+ newdescr->subarray->base = type;
+ Py_INCREF(val);
+ newdescr->subarray->shape = val;
+ Py_XDECREF(newdescr->fields);
+ newdescr->fields = NULL;
+ type = newdescr;
+ }
+ return type;
+
+ fail:
+ Py_XDECREF(type);
+ return NULL;
+}
+
+/* obj is a list. Each item is a tuple with
+
+(field-name, data-type (either a list or a string), and an optional
+ shape parameter).
+*/
+static PyArray_Descr *
+_convert_from_array_descr(PyObject *obj)
+{
+ int n, i, totalsize;
+ int ret;
+ PyObject *fields, *item, *newobj;
+ PyObject *name, *key, *tup;
+ PyObject *nameslist;
+ PyArray_Descr *new;
+ PyArray_Descr *conv;
+
+ n = PyList_GET_SIZE(obj);
+ nameslist = PyList_New(n);
+ if (!nameslist) return NULL;
+ totalsize = 0;
+ fields = PyDict_New();
+ for (i=0; i<n; i++) {
+ item = PyList_GET_ITEM(obj, i);
+ if (!PyTuple_Check(item) || (PyTuple_GET_SIZE(item) < 2) || \
+ !PyString_Check((name = PyTuple_GET_ITEM(item,0))))
+ goto fail;
+ if (PyString_GET_SIZE(name)==0) {
+ name = PyString_FromFormat("f%d", i);
+ }
+ else {
+ Py_INCREF(name);
+ }
+ PyList_SET_ITEM(nameslist, i, name);
+ if (PyTuple_GET_SIZE(item) == 2) {
+ ret = PyArray_DescrConverter(PyTuple_GET_ITEM(item, 1),
+ &conv);
+ if (ret == PY_FAIL)
+ PyObject_Print(PyTuple_GET_ITEM(item,1),
+ stderr, 0);
+ }
+ else if (PyTuple_GET_SIZE(item) == 3) {
+ newobj = PyTuple_GetSlice(item, 1, 3);
+ ret = PyArray_DescrConverter(newobj, &conv);
+ Py_DECREF(newobj);
+ }
+ else goto fail;
+ if (ret == PY_FAIL) goto fail;
+ tup = PyTuple_New(2);
+ PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
+ PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize));
+ totalsize += conv->elsize;
+ PyDict_SetItem(fields, name, tup);
+ Py_DECREF(tup);
+ }
+ key = PyInt_FromLong(-1);
+ PyDict_SetItem(fields, key, nameslist);
+ Py_DECREF(key);
+ Py_DECREF(nameslist);
+ new = PyArray_DescrNewFromType(PyArray_VOID);
+ new->fields = fields;
+ new->elsize = totalsize;
+ return new;
+
+ fail:
+ Py_DECREF(fields);
+ Py_DECREF(nameslist);
+ return NULL;
+
+}
+
+/* a list specifying a data-type can just be
+ a list of formats. The names for the fields
+ will default to f1, f2, f3, and so forth.
+
+ or it can be an array_descr format string -- in which case
+ align must be 0.
+*/
+
+static PyArray_Descr *
+_convert_from_list(PyObject *obj, int align, int try_descr)
+{
+ int n, i;
+ int totalsize;
+ PyObject *fields;
+ PyArray_Descr *conv=NULL;
+ PyArray_Descr *new;
+ PyObject *key, *tup;
+ PyObject *nameslist=NULL;
+ int ret;
+ int maxalign=0;
+
+ n = PyList_GET_SIZE(obj);
+ totalsize = 0;
+ if (n==0) return NULL;
+ nameslist = PyList_New(n);
+ if (!nameslist) return NULL;
+ fields = PyDict_New();
+ for (i=0; i<n; i++) {
+ tup = PyTuple_New(2);
+ key = PyString_FromFormat("f%d", i+1);
+ ret = PyArray_DescrConverter(PyList_GET_ITEM(obj, i), &conv);
+ PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
+ PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize));
+ PyDict_SetItem(fields, key, tup);
+ Py_DECREF(tup);
+ PyList_SET_ITEM(nameslist, i, key);
+ if (ret == PY_FAIL) goto fail;
+ totalsize += conv->elsize;
+ if (align) {
+ int _align;
+ _align = conv->alignment;
+ if (_align > 1) totalsize = \
+ ((totalsize + _align - 1)/_align)*_align;
+ maxalign = MAX(maxalign, _align);
+ }
+ }
+ key = PyInt_FromLong(-1);
+ PyDict_SetItem(fields, key, nameslist);
+ Py_DECREF(key);
+ Py_DECREF(nameslist);
+ new = PyArray_DescrNewFromType(PyArray_VOID);
+ new->fields = fields;
+ if (maxalign > 1) {
+ totalsize = ((totalsize+maxalign-1)/maxalign)*maxalign;
+ }
+ if (align) new->alignment = maxalign;
+ new->elsize = totalsize;
+ return new;
+
+ fail:
+ Py_DECREF(nameslist);
+ Py_DECREF(fields);
+ if (!try_descr) return NULL;
+ if (align) {
+ PyErr_SetString(PyExc_ValueError,
+ "failed to convert from list of formats "\
+ "and align cannot be 1 for conversion from "\
+ "array_descr structure");
+ return NULL;
+ }
+ PyErr_Clear();
+ return _convert_from_array_descr(obj);
+}
+
+
+/* comma-separated string */
+/* this is the format developed by the numarray records module */
+/* and implemented by the format parser in that module */
+/* this is an alternative implementation found in the _internal.py
+ file patterned after that one -- the approach is to try to convert
+ to a list (with tuples if any repeat information is present)
+ and then call the _convert_from_list)
+*/
+
+static PyArray_Descr *
+_convert_from_commastring(PyObject *obj, int align)
+{
+ PyObject *listobj;
+ PyArray_Descr *res;
+
+ if (!PyString_Check(obj)) return NULL;
+ listobj = PyObject_CallMethod(_scipy_internal, "_commastring",
+ "O", obj);
+ if (!listobj) return NULL;
+ res = _convert_from_list(listobj, align, 0);
+ Py_DECREF(listobj);
+ if (!res && !PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ValueError, "invalid data-type");
+ return NULL;
+ }
+ return res;
+}
+
+
+
+/* a dictionary specifying a data-type
+ must have at least two and up to four
+ keys These must all be sequences of the same length.
+
+ "names" --- field names
+ "formats" --- the data-type descriptors for the field.
+
+ Optional:
+
+ "offsets" --- integers indicating the offset into the
+ record of the start of the field.
+ if not given, then "consecutive offsets"
+ will be assumed and placed in the dictionary.
+
+ "titles" --- Allows the use of an additional key
+ for the fields dictionary.
+
+Attribute-lookup-based field names merely has to query the fields
+dictionary of the data-descriptor. Any result present can be used
+to return the correct field.
+
+So, the notion of what is a name and what is a title is really quite
+arbitrary.
+
+What does distinguish a title, however, is that if it is not None,
+it will be placed at the end of the tuple inserted into the
+fields dictionary.
+
+If the dictionary does not have "names" and "formats" entries,
+then it will be checked for conformity and used directly.
+*/
+
+static PyArray_Descr *
+_use_fields_dict(PyObject *obj, int align)
+{
+ return (PyArray_Descr *)PyObject_CallMethod(_scipy_internal,
+ "_usefields",
+ "Oi", obj, align);
+}
+
+static PyArray_Descr *
+_convert_from_dict(PyObject *obj, int align)
+{
+ PyArray_Descr *new;
+ PyObject *fields=NULL;
+ PyObject *names, *offsets, *descrs, *titles, *key;
+ int n, i;
+ int totalsize;
+ int maxalign=0;
+
+ fields = PyDict_New();
+ if (fields == NULL) return (PyArray_Descr *)PyErr_NoMemory();
+
+ names = PyDict_GetItemString(obj, "names");
+ descrs = PyDict_GetItemString(obj, "formats");
+
+ if (!names || !descrs) {
+ Py_DECREF(fields);
+ return _use_fields_dict(obj, align);
+ }
+ n = PyObject_Length(names);
+ offsets = PyDict_GetItemString(obj, "offsets");
+ titles = PyDict_GetItemString(obj, "titles");
+ if ((n > PyObject_Length(descrs)) || \
+ (offsets && (n > PyObject_Length(offsets))) || \
+ (titles && (n > PyObject_Length(titles)))) {
+ PyErr_SetString(PyExc_ValueError,
+ "all items in the dictionary must have" \
+ " the same length.");
+ goto fail;
+ }
+
+ totalsize = 0;
+ for(i=0; i<n; i++) {
+ PyObject *tup, *descr, *index, *item, *name, *off;
+ int len, ret;
+ PyArray_Descr *newdescr;
+
+ /* Build item to insert (descr, offset, [title])*/
+ len = 2;
+ item = NULL;
+ index = PyInt_FromLong(i);
+ if (titles) {
+ item=PyObject_GetItem(titles, index);
+ if (item && item != Py_None) len = 3;
+ else Py_XDECREF(item);
+ PyErr_Clear();
+ }
+ tup = PyTuple_New(len);
+ descr = PyObject_GetItem(descrs, index);
+ ret = PyArray_DescrConverter(descr, &newdescr);
+ Py_DECREF(descr);
+ PyTuple_SET_ITEM(tup, 0, (PyObject *)newdescr);
+ if (offsets) {
+ long offset;
+ off = PyObject_GetItem(offsets, index);
+ offset = PyInt_AsLong(off);
+ PyTuple_SET_ITEM(tup, 1, off);
+ if (offset < totalsize) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid offset (must be "\
+ "ordered)");
+ ret = PY_FAIL;
+ }
+ if (offset > totalsize) totalsize = offset;
+ }
+ else
+ PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(totalsize));
+ if (len == 3) PyTuple_SET_ITEM(tup, 2, item);
+ name = PyObject_GetItem(names, index);
+ Py_DECREF(index);
+
+ /* Insert into dictionary */
+ PyDict_SetItem(fields, name, tup);
+ Py_DECREF(name);
+ if (len == 3) PyDict_SetItem(fields, item, tup);
+ Py_DECREF(tup);
+ if ((ret == PY_FAIL) || (newdescr->elsize == 0)) goto fail;
+ totalsize += newdescr->elsize;
+ if (align) {
+ int _align = newdescr->alignment;
+ if (_align > 1) totalsize = \
+ ((totalsize + _align - 1)/_align)*_align;
+ maxalign = MAX(maxalign,_align);
+ }
+ }
+
+ new = PyArray_DescrNewFromType(PyArray_VOID);
+ if (new == NULL) goto fail;
+ if (maxalign > 1)
+ totalsize = ((totalsize + maxalign - 1)/maxalign)*maxalign;
+ if (align) new->alignment = maxalign;
+ new->elsize = totalsize;
+ key = PyInt_FromLong(-1);
+ PyDict_SetItem(fields, key, names);
+ Py_DECREF(key);
+ new->fields = fields;
+ return new;
+
+ fail:
+ Py_XDECREF(fields);
+ return NULL;
+}
+
+/*
+ any object with
+ the .fields attribute and/or .itemsize attribute
+ (if the .fields attribute does not give
+ the total size -- i.e. a partial record naming).
+ If itemsize is given it must be >= size computed from fields
+
+ The .fields attribute must return a convertible dictionary if
+ present. Result inherits from PyArray_VOID.
+*/
+
+
+/*MULTIARRAY_API
+ Get typenum from an object -- None goes to NULL
+*/
+static int
+PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at)
+{
+ if (obj == Py_None) {
+ *at = NULL;
+ return PY_SUCCEED;
+ }
+ else return PyArray_DescrConverter(obj, at);
+}
+
+/* This function takes a Python object representing a type and converts it
+ to a the correct PyArray_Descr * structure to describe the type.
+
+ Many objects can be used to represent a data-type which in SciPy is
+ quite a flexible concept.
+
+ This is the central code that converts Python objects to
+ Type-descriptor objects that are used throughout scipy.
+ */
+
+/* new reference in *at */
+/*MULTIARRAY_API
+ Get typenum from an object -- None goes to &LONG_descr
+*/
+static int
+PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
+{
+ char *type;
+ int check_num=PyArray_NOTYPE+10;
+ int len;
+ PyObject *item;
+ int elsize = 0;
+ char endian = '=';
+
+ *at=NULL;
+
+ /* default */
+ if (obj == Py_None) {
+ *at = PyArray_DescrFromType(PyArray_LONG);
+ return PY_SUCCEED;
+ }
+
+ if (PyArray_DescrCheck(obj)) {
+ *at = (PyArray_Descr *)obj;
+ Py_INCREF(*at);
+ return PY_SUCCEED;
+ }
+
+ if (PyType_Check(obj)) {
+ if (PyType_IsSubtype((PyTypeObject *)obj,
+ &PyGenericArrType_Type)) {
+ *at = PyArray_DescrFromTypeObject(obj);
+ if (*at) return PY_SUCCEED;
+ else return PY_FAIL;
+ }
+ check_num = PyArray_OBJECT;
+ if (obj == (PyObject *)(&PyInt_Type))
+ check_num = PyArray_LONG;
+ else if (obj == (PyObject *)(&PyLong_Type))
+ check_num = PyArray_LONGLONG;
+ else if (obj == (PyObject *)(&PyFloat_Type))
+ check_num = PyArray_DOUBLE;
+ else if (obj == (PyObject *)(&PyComplex_Type))
+ check_num = PyArray_CDOUBLE;
+ else if (obj == (PyObject *)(&PyBool_Type))
+ check_num = PyArray_BOOL;
+ else if (obj == (PyObject *)(&PyString_Type))
+ check_num = PyArray_STRING;
+ else if (obj == (PyObject *)(&PyUnicode_Type))
+ check_num = PyArray_UNICODE;
+ else if (obj == (PyObject *)(&PyBuffer_Type))
+ check_num = PyArray_VOID;
+ else {
+ *at = _arraydescr_fromobj(obj);
+ if (*at) return PY_SUCCEED;
+ }
+ goto finish;
+ }
+
+ /* or a typecode string */
+
+ if (PyString_Check(obj)) {
+ /* Check for a string typecode. */
+ type = PyString_AS_STRING(obj);
+ len = PyString_GET_SIZE(obj);
+ if (len <= 0) goto fail;
+ check_num = (int) type[0];
+ if ((char) check_num == '>' || (char) check_num == '<' || \
+ (char) check_num == '|') {
+ if (len <= 1) goto fail;
+ endian = (char) check_num;
+ type++; len--;
+ check_num = (int) type[0];
+ if (endian == '|') endian = '=';
+ }
+ if (len > 1) {
+ elsize = atoi(type+1);
+ if (len > 2 && elsize < 10) {
+ /* perhaps commas present */
+ int i;
+ for (i=1;i<len && type[i]!=',';i++);
+ if (i < len) {
+ /* see if it can be converted from
+ a comma-separated string */
+ *at = _convert_from_commastring(obj,
+ 0);
+ if (*at) return PY_SUCCEED;
+ else return PY_FAIL;
+ }
+ }
+ if (elsize == 0) {
+ check_num = PyArray_NOTYPE+10;
+ }
+ /* When specifying length of UNICODE
+ the number of characters is given to match
+ the STRING interface. Each character can be
+ more than one byte and itemsize must be
+ the number of bytes.
+ */
+ else if (check_num == PyArray_UNICODELTR) {
+ elsize *= sizeof(Py_UNICODE);
+ }
+ /* Support for generic processing
+ c4, i4, f8, etc...
+ */
+ else if ((check_num != PyArray_STRINGLTR) &&
+ (check_num != PyArray_VOIDLTR) && \
+ (check_num != PyArray_STRINGLTR2)) {
+ check_num = \
+ PyArray_TypestrConvert(elsize,
+ check_num);
+ if (check_num == PyArray_NOTYPE)
+ check_num += 10;
+ elsize = 0;
+ }
+ }
+ }
+ /* or a tuple */
+ else if (PyTuple_Check(obj)) {
+ *at = _convert_from_tuple(obj);
+ if (*at == NULL){
+ if (PyErr_Occurred()) return PY_FAIL;
+ goto fail;
+ }
+ return PY_SUCCEED;
+ }
+ /* or a list */
+ else if (PyList_Check(obj)) {
+ *at = _convert_from_list(obj,0,1);
+ if (*at == NULL) {
+ if (PyErr_Occurred()) return PY_FAIL;
+ goto fail;
+ }
+ return PY_SUCCEED;
+ }
+ /* or a dictionary */
+ else if (PyDict_Check(obj)) {
+ *at = _convert_from_dict(obj,0);
+ if (*at == NULL) {
+ if (PyErr_Occurred()) return PY_FAIL;
+ goto fail;
+ }
+ return PY_SUCCEED;
+ }
+ else {
+ *at = _arraydescr_fromobj(obj);
+ if (*at) return PY_SUCCEED;
+ if (PyErr_Occurred()) return PY_FAIL;
+ goto fail;
+ }
+ if (PyErr_Occurred()) goto fail;
+
+ /*
+ if (check_num == PyArray_NOTYPE) return PY_FAIL;
+ */
+
+ finish:
+ if ((check_num == PyArray_NOTYPE+10) || \
+ (*at = PyArray_DescrFromType(check_num))==NULL) {
+ /* Now check to see if the object is registered
+ in typeDict */
+ if (typeDict != NULL) {
+ item = PyDict_GetItem(typeDict, obj);
+ if (item) return PyArray_DescrConverter(item, at);
+ }
+ goto fail;
+ }
+
+ if (((*at)->elsize == 0) && (elsize != 0)) {
+ PyArray_DESCR_REPLACE(*at);
+ (*at)->elsize = elsize;
+ }
+ if (endian != '=' && PyArray_ISNBO(endian)) endian = '=';
+
+ if (endian != '=' && (*at)->byteorder != '|' && \
+ (*at)->byteorder != endian) {
+ PyArray_DESCR_REPLACE(*at);
+ (*at)->byteorder = endian;
+ }
+
+ return PY_SUCCEED;
+
+ fail:
+ PyErr_SetString(PyExc_TypeError,
+ "data type not understood");
+ *at=NULL;
+ return PY_FAIL;
+}
+
+/*MULTIARRAY_API
+ Convert object to endian
+*/
+static int
+PyArray_ByteorderConverter(PyObject *obj, char *endian)
+{
+ char *str;
+ *endian = PyArray_SWAP;
+ str = PyString_AsString(obj);
+ if (!str) return PY_FAIL;
+ if (strlen(str) < 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "Byteorder string must be at least length 1");
+ return PY_FAIL;
+ }
+ *endian = str[0];
+ if (str[0] != PyArray_BIG && str[0] != PyArray_LITTLE && \
+ str[0] != PyArray_NATIVE) {
+ if (str[0] == 'b' || str[0] == 'B')
+ *endian = PyArray_BIG;
+ else if (str[0] == 'l' || str[0] == 'L')
+ *endian = PyArray_LITTLE;
+ else if (str[0] == 'n' || str[0] == 'N')
+ *endian = PyArray_NATIVE;
+ else if (str[0] == 'i' || str[0] == 'I')
+ *endian = PyArray_IGNORE;
+ else if (str[0] == 's' || str[0] == 'S')
+ *endian = PyArray_SWAP;
+ else {
+ PyErr_Format(PyExc_ValueError,
+ "%s is an unrecognized byteorder",
+ str);
+ return PY_FAIL;
+ }
+ }
+ return PY_SUCCEED;
+}
+
+/*MULTIARRAY_API
+ Convert object to sort kind
+*/
+static int
+PyArray_SortkindConverter(PyObject *obj, PyArray_SORTKIND *sortkind)
+{
+ char *str;
+ *sortkind = PyArray_QUICKSORT;
+ str = PyString_AsString(obj);
+ if (!str) return PY_FAIL;
+ if (strlen(str) < 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "Sort kind string must be at least length 1");
+ return PY_FAIL;
+ }
+ if (str[0] == 'q' || str[0] == 'Q')
+ *sortkind = PyArray_QUICKSORT;
+ else if (str[0] == 'h' || str[0] == 'H')
+ *sortkind = PyArray_HEAPSORT;
+ else if (str[0] == 'm' || str[0] == 'M')
+ *sortkind = PyArray_MERGESORT;
+ else if (str[0] == 't' || str[0] == 'T')
+ *sortkind = PyArray_TIMSORT;
+ else {
+ PyErr_Format(PyExc_ValueError,
+ "%s is an unrecognized kind of sort",
+ str);
+ return PY_FAIL;
+ }
+ return PY_SUCCEED;
+}
+
+
+/* This function returns true if the two typecodes are
+ equivalent (same basic kind and same itemsize).
+*/
+
+/*MULTIARRAY_API*/
+static Bool
+PyArray_EquivTypes(PyArray_Descr *typ1, PyArray_Descr *typ2)
+{
+ register int typenum1=typ1->type_num;
+ register int typenum2=typ2->type_num;
+ register int size1=typ1->elsize;
+ register int size2=typ2->elsize;
+
+ if (size1 != size2) return FALSE;
+ if (typ1->fields != typ2->fields) return FALSE;
+ if (PyArray_ISNBO(typ1->byteorder) != PyArray_ISNBO(typ2->byteorder))
+ return FALSE;
+
+ if (typenum1 == PyArray_VOID || \
+ typenum2 == PyArray_VOID) {
+ return ((typenum1 == typenum2) &&
+ (typ1->typeobj == typ2->typeobj) &&
+ (typ1->fields == typ2->fields));
+ }
+ return (typ1->kind == typ2->kind);
+}
+
+/*** END C-API FUNCTIONS **/
+
+
+#define _ARET(x) PyArray_Return((PyArrayObject *)(x))
+
+static char doc_fromobject[] = "array(object, dtype=None, copy=1, fortran=0, "\
+ "subok=0)\n"\
+ "will return a new array formed from the given object type given.\n"\
+ "Object can anything with an __array__ method, or any object\n"\
+ "exposing the array interface, or any (nested) sequence.\n"\
+ "If no type is given, then the type will be determined as the\n"\
+ "minimum type required to hold the objects in the sequence.\n"\
+ "If copy is zero and sequence is already an array with the right \n"\
+ "type, a reference will be returned. If the sequence is an array,\n"\
+ "type can be used only to upcast the array. For downcasting \n"\
+ "use .astype(t) method. If subok is true, then subclasses of the\n"\
+ "array may be returned. Otherwise, a base-class ndarray is returned";
+
+static PyObject *
+_array_fromobject(PyObject *ignored, PyObject *args, PyObject *kws)
+{
+ PyObject *op, *ret=NULL;
+ static char *kwd[]= {"object", "dtype", "copy", "fortran", "subok",
+ NULL};
+ Bool subok=FALSE;
+ Bool copy=TRUE;
+ PyArray_Descr *type=NULL;
+ PyArray_Descr *oldtype=NULL;
+ Bool fortran=FALSE;
+ int flags=0;
+
+ if(!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&", kwd, &op,
+ PyArray_DescrConverter2,
+ &type,
+ PyArray_BoolConverter, &copy,
+ PyArray_BoolConverter, &fortran,
+ PyArray_BoolConverter, &subok))
+ return NULL;
+
+ /* fast exit if simple call */
+ if ((PyArray_CheckExact(op) || PyBigArray_CheckExact(op))) {
+ if (type==NULL) {
+ if (!copy && fortran==PyArray_ISFORTRAN(op)) {
+ Py_INCREF(op);
+ return op;
+ }
+ else {
+ return PyArray_NewCopy((PyArrayObject*)op,
+ fortran);
+ }
+ }
+ /* One more chance */
+ oldtype = PyArray_DESCR(op);
+ if (PyArray_EquivTypes(oldtype, type)) {
+ if (!copy && fortran==PyArray_ISFORTRAN(op)) {
+ Py_INCREF(op);
+ return op;
+ }
+ else {
+ ret = PyArray_NewCopy((PyArrayObject*)op,
+ fortran);
+ if (oldtype == type) return ret;
+ Py_INCREF(oldtype);
+ Py_DECREF(PyArray_DESCR(ret));
+ PyArray_DESCR(ret) = oldtype;
+ return ret;
+ }
+ }
+ }
+
+ if (copy) {
+ flags = ENSURECOPY;
+ }
+ if (fortran) {
+ flags |= FORTRAN;
+ }
+ if (!subok) {
+ flags |= ENSUREARRAY;
+ }
+
+ if ((ret = PyArray_FromAny(op, type, 0, 0, flags)) == NULL)
+ return NULL;
+
+ return ret;
+}
+
+/* accepts NULL type */
+/* steals referenct to type */
+/*MULTIARRAY_API
+ Empty
+*/
+static PyObject *
+PyArray_Empty(int nd, intp *dims, PyArray_Descr *type, int fortran)
+{
+ PyArrayObject *ret;
+
+ if (!type) type = PyArray_DescrFromType(PyArray_LONG);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
+ type, nd, dims,
+ NULL, NULL,
+ fortran, NULL);
+ if (ret == NULL) return NULL;
+
+ if ((PyArray_TYPE(ret) == PyArray_OBJECT)) {
+ PyArray_FillObjectArray(ret, Py_None);
+ }
+ return (PyObject *)ret;
+}
+
+
+static char doc_empty[] = "empty((d1,...,dn),dtype=int,fortran=0) will return a new array\n of shape (d1,...,dn) and given type with all its entries uninitialized. This can be faster than zeros.";
+
+static PyObject *
+array_empty(PyObject *ignored, PyObject *args, PyObject *kwds)
+{
+
+ static char *kwlist[] = {"shape","dtype","fortran",NULL};
+ PyArray_Descr *typecode=NULL;
+ PyArray_Dims shape = {NULL, 0};
+ Bool fortran = FALSE;
+ PyObject *ret=NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&",
+ kwlist, PyArray_IntpConverter,
+ &shape,
+ PyArray_DescrConverter,
+ &typecode,
+ PyArray_BoolConverter, &fortran))
+ goto fail;
+
+ ret = PyArray_Empty(shape.len, shape.ptr, typecode, fortran);
+ PyDimMem_FREE(shape.ptr);
+ return ret;
+
+ fail:
+ PyDimMem_FREE(shape.ptr);
+ return ret;
+}
+
+static char doc_scalar[] = "scalar(dtypedescr,obj) will return a new scalar array of the given type initialized with obj. Mainly for pickle support. The dtypedescr must be a valid data-type descriptor. If dtypedescr corresponds to an OBJECT descriptor, then obj can be any object, otherwise obj must be a string. If obj is not given it will be interpreted as None for object type and zeros for all other types.";
+
+static PyObject *
+array_scalar(PyObject *ignored, PyObject *args, PyObject *kwds)
+{
+
+ static char *kwlist[] = {"dtypedescr","obj", NULL};
+ PyArray_Descr *typecode;
+ PyObject *obj=NULL;
+ int alloc=0;
+ void *dptr;
+ PyObject *ret;
+
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|O",
+ kwlist, &PyArrayDescr_Type,
+ &typecode,
+ &obj))
+ return NULL;
+
+ if (typecode->elsize == 0) {
+ PyErr_SetString(PyExc_ValueError, \
+ "itemsize cannot be zero");
+ return NULL;
+ }
+
+ if (typecode->type_num == PyArray_OBJECT) {
+ if (obj == NULL) obj = Py_None;
+ dptr = &obj;
+ }
+ else {
+ if (obj == NULL) {
+ dptr = _pya_malloc(typecode->elsize);
+ if (dptr == NULL) {
+ return PyErr_NoMemory();
+ }
+ memset(dptr, '\0', typecode->elsize);
+ alloc = 1;
+ }
+ else {
+ if (!PyString_Check(obj)) {
+ PyErr_SetString(PyExc_TypeError,
+ "initializing object must "\
+ "be a string");
+ return NULL;
+ }
+ if (PyString_GET_SIZE(obj) < typecode->elsize) {
+ PyErr_SetString(PyExc_ValueError,
+ "initialization string is too"\
+ " small");
+ return NULL;
+ }
+ dptr = PyString_AS_STRING(obj);
+ }
+ }
+
+ ret = PyArray_Scalar(dptr, typecode, NULL);
+
+ /* free dptr which contains zeros */
+ if (alloc) _pya_free(dptr);
+ return ret;
+}
+
+
+/* steal a reference */
+/* accepts NULL type */
+/*MULTIARRAY_API
+ Zeros
+*/
+static PyObject *
+PyArray_Zeros(int nd, intp *dims, PyArray_Descr *type, int fortran)
+{
+ PyArrayObject *ret;
+ intp n;
+
+ if (!type) type = PyArray_DescrFromType(PyArray_LONG);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
+ type,
+ nd, dims,
+ NULL, NULL,
+ fortran, NULL);
+ if (ret == NULL) return NULL;
+
+ if ((PyArray_TYPE(ret) == PyArray_OBJECT)) {
+ PyObject *zero = PyInt_FromLong(0);
+ PyArray_FillObjectArray(ret, zero);
+ Py_DECREF(zero);
+ }
+ else {
+ n = PyArray_NBYTES(ret);
+ memset(ret->data, 0, n);
+ }
+ return (PyObject *)ret;
+
+}
+
+static char doc_zeros[] = "zeros((d1,...,dn),dtype=int,fortran=0) will return a new array of shape (d1,...,dn) and type typecode with all it's entries initialized to zero.";
+
+
+static PyObject *
+array_zeros(PyObject *ignored, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"shape","dtype","fortran",NULL};
+ PyArray_Descr *typecode=NULL;
+ PyArray_Dims shape = {NULL, 0};
+ Bool fortran = FALSE;
+ PyObject *ret=NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&",
+ kwlist, PyArray_IntpConverter,
+ &shape,
+ PyArray_DescrConverter,
+ &typecode,
+ PyArray_BoolConverter,
+ &fortran))
+ goto fail;
+
+ ret = PyArray_Zeros(shape.len, shape.ptr, typecode, (int) fortran);
+ PyDimMem_FREE(shape.ptr);
+ return ret;
+
+ fail:
+ PyDimMem_FREE(shape.ptr);
+ return ret;
+}
+
+static char doc_set_typeDict[] = "set_typeDict(dict) set the internal "\
+ "dictionary that can look up an array type using a registered "\
+ "code";
+
+static PyObject *
+array_set_typeDict(PyObject *ignored, PyObject *args)
+{
+ PyObject *dict;
+ if (!PyArg_ParseTuple(args, "O", &dict)) return NULL;
+ Py_XDECREF(typeDict); /* Decrement old reference (if any)*/
+ typeDict = dict;
+ Py_INCREF(dict); /* Create an internal reference to it */
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+/* steals a reference to dtype -- accepts NULL */
+/*OBJECT_API*/
+static PyObject *
+PyArray_FromString(char *data, intp slen, PyArray_Descr *dtype, intp n)
+{
+ int itemsize;
+ PyArrayObject *ret;
+
+ if (dtype == NULL)
+ dtype=PyArray_DescrFromType(PyArray_LONG);
+
+ if (dtype == &OBJECT_Descr) {
+ PyErr_SetString(PyExc_ValueError,
+ "Cannot create an object array from a"\
+ " string.");
+ Py_DECREF(dtype);
+ return NULL;
+ }
+
+ itemsize = dtype->elsize;
+ if (itemsize == 0) {
+ PyErr_SetString(PyExc_ValueError, "zero-valued itemsize");
+ Py_DECREF(dtype);
+ return NULL;
+ }
+
+ if (n < 0 ) {
+ if (slen % itemsize != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "string size must be a multiple"\
+ " of element size");
+ Py_DECREF(dtype);
+ return NULL;
+ }
+ n = slen/itemsize;
+ } else {
+ if (slen < n*itemsize) {
+ PyErr_SetString(PyExc_ValueError,
+ "string is smaller than requested"\
+ " size");
+ Py_DECREF(dtype);
+ return NULL;
+ }
+ }
+
+ if ((ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
+ dtype,
+ 1, &n,
+ NULL, NULL,
+ 0, NULL)) == NULL)
+ return NULL;
+
+ memcpy(ret->data, data, n*dtype->elsize);
+ return (PyObject *)ret;
+}
+
+static char doc_fromString[] = "fromstring(string, dtype=int, count=-1) returns a new 1d array initialized from the raw binary data in string. If count is positive, the new array will have count elements, otherwise it's size is determined by the size of string.";
+
+static PyObject *
+array_fromString(PyObject *ignored, PyObject *args, PyObject *keywds)
+{
+ char *data;
+ longlong nin=-1;
+ int s;
+ static char *kwlist[] = {"string", "dtype", "count", NULL};
+ PyArray_Descr *descr=NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, keywds, "s#|O&L", kwlist,
+ &data, &s,
+ PyArray_DescrConverter, &descr,
+ &nin)) {
+ return NULL;
+ }
+
+ return PyArray_FromString(data, (intp)s, descr, (intp)nin);
+}
+
+/* This needs an open file object and reads it in directly.
+ memory-mapped files handled differently through buffer interface.
+
+file pointer number in resulting 1d array
+(can easily reshape later, -1 for to end of file)
+type of array
+sep is a separator string for character-based data (or NULL for binary)
+ " " means whitespace
+*/
+
+/*OBJECT_API*/
+static PyObject *
+PyArray_FromFile(FILE *fp, PyArray_Descr *typecode, intp num, char *sep)
+{
+ PyArrayObject *r;
+ size_t nread = 0;
+ PyArray_ScanFunc *scan;
+ Bool binary;
+
+ if (typecode->elsize == 0) {
+ PyErr_SetString(PyExc_ValueError, "0-sized elements.");
+ return NULL;
+ }
+
+ binary = ((sep == NULL) || (strlen(sep) == 0));
+ if (num == -1 && binary) { /* Get size for binary file*/
+ intp start, numbytes;
+ start = (intp )ftell(fp);
+ fseek(fp, 0, SEEK_END);
+ numbytes = (intp )ftell(fp) - start;
+ rewind(fp);
+ if (numbytes == -1) {
+ PyErr_SetString(PyExc_IOError,
+ "could not seek in file");
+ return NULL;
+ }
+ num = numbytes / typecode->elsize;
+ }
+
+ if (binary) { /* binary data */
+ r = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
+ typecode,
+ 1, &num,
+ NULL, NULL,
+ 0, NULL);
+ if (r==NULL) return NULL;
+ nread = fread(r->data, typecode->elsize, num, fp);
+ }
+ else { /* character reading */
+ intp i;
+ char *dptr;
+ int done=0;
+
+ scan = typecode->f->scanfunc;
+ if (scan == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "don't know how to read " \
+ "character files with that " \
+ "array type");
+ return NULL;
+ }
+
+ if (num != -1) { /* number to read is known */
+ r = (PyArrayObject *)\
+ PyArray_NewFromDescr(&PyArray_Type,
+ typecode,
+ 1, &num,
+ NULL, NULL,
+ 0, NULL);
+ if (r==NULL) return NULL;
+ dptr = r->data;
+ for (i=0; i < num; i++) {
+ if (done) break;
+ done = scan(fp, dptr, sep, NULL);
+ if (done < -2) break;
+ nread += 1;
+ dptr += r->descr->elsize;
+ }
+ if (PyErr_Occurred()) {
+ Py_DECREF(r);
+ return NULL;
+ }
+ }
+ else { /* we have to watch for the end of the file and
+ reallocate at the end */
+#define _FILEBUFNUM 4096
+ intp thisbuf=0;
+ intp size = _FILEBUFNUM;
+ intp bytes;
+ intp totalbytes;
+
+ r = (PyArrayObject *)\
+ PyArray_NewFromDescr(&PyArray_Type,
+ typecode,
+ 1, &size,
+ NULL, NULL,
+ 0, NULL);
+ if (r==NULL) return NULL;
+ totalbytes = bytes = size * typecode->elsize;
+ dptr = r->data;
+ while (!done) {
+ done = scan(fp, dptr, sep, NULL);
+
+ /* end of file reached trying to
+ scan value. done is 1 or 2
+ if end of file reached trying to
+ scan separator. Still good value.
+ */
+ if (done < -2) break;
+ thisbuf += 1;
+ nread += 1;
+ dptr += r->descr->elsize;
+ if (!done && thisbuf == size) {
+ totalbytes += bytes;
+ r->data = PyDataMem_RENEW(r->data,
+ totalbytes);
+ dptr = r->data + (totalbytes - bytes);
+ thisbuf = 0;
+ }
+ }
+ if (PyErr_Occurred()) {
+ Py_DECREF(r);
+ return NULL;
+ }
+ r->data = PyDataMem_RENEW(r->data, nread*r->descr->elsize);
+ PyArray_DIM(r,0) = nread;
+ num = nread;
+#undef _FILEBUFNUM
+ }
+ }
+ if (nread < num) {
+ fprintf(stderr, "%ld items requested but only %ld read\n",
+ (long) num, (long) nread);
+ r->data = PyDataMem_RENEW(r->data, nread * r->descr->elsize);
+ PyArray_DIM(r,0) = nread;
+ }
+ return (PyObject *)r;
+}
+
+static char doc_fromfile[] = \
+ "fromfile(file=, dtype=int, count=-1, sep='')\n" \
+ "\n"\
+ " Return an array of the given data type from a \n"\
+ " (text or binary) file. The file argument can be an open file\n"\
+ " or a string with the name of a file to read from. If\n"\
+ " count==-1, then the entire file is read, otherwise count is\n"\
+ " the number of items of the given type read in. If sep is ''\n"\
+ " then read a binary file, otherwise it gives the separator\n"\
+ " between elements in a text file.\n"\
+ "\n"\
+ " WARNING: This function should be used sparingly, as it is not\n"\
+ " a robust method of persistence. But it can be useful to\n"\
+ " read in simply-formatted or binary data quickly.";
+
+static PyObject *
+array_fromfile(PyObject *ignored, PyObject *args, PyObject *keywds)
+{
+ PyObject *file=NULL, *ret;
+ FILE *fp;
+ char *sep="";
+ char *mode=NULL;
+ longlong nin=-1;
+ static char *kwlist[] = {"file", "dtype", "count", "sep", NULL};
+ PyArray_Descr *type=NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, keywds, "O|O&Ls", kwlist,
+ &file,
+ PyArray_DescrConverter, &type,
+ &nin, &sep)) {
+ return NULL;
+ }
+
+ if (type == NULL) type = PyArray_DescrFromType(PyArray_LONG);
+
+ if (PyString_Check(file)) {
+ if (sep == "") mode="rb";
+ else mode="r";
+ file = PyFile_FromString(PyString_AS_STRING(file), mode);
+ if (file==NULL) return NULL;
+ }
+ else {
+ Py_INCREF(file);
+ }
+ fp = PyFile_AsFile(file);
+ if (fp == NULL) {
+ PyErr_SetString(PyExc_IOError,
+ "first argument must be an open file");
+ Py_DECREF(file);
+ return NULL;
+ }
+ ret = PyArray_FromFile(fp, type, (intp) nin, sep);
+ Py_DECREF(file);
+ return ret;
+}
+
+/*OBJECT_API*/
+static PyObject *
+PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
+ intp count, intp offset)
+{
+ PyArrayObject *ret;
+ char *data;
+ int ts;
+ intp s, n;
+ int itemsize;
+ int write=1;
+
+
+ if (type->type_num == PyArray_OBJECT) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot create an OBJECT array from memory"\
+ " buffer");
+ Py_DECREF(type);
+ return NULL;
+ }
+ if (type->elsize == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "itemsize cannot be zero in type");
+ Py_DECREF(type);
+ return NULL;
+ }
+
+ if (buf->ob_type->tp_as_buffer == NULL || \
+ (buf->ob_type->tp_as_buffer->bf_getwritebuffer == NULL && \
+ buf->ob_type->tp_as_buffer->bf_getreadbuffer == NULL)) {
+ PyObject *newbuf;
+ newbuf = PyObject_GetAttrString(buf, "__buffer__");
+ if (newbuf == NULL) {Py_DECREF(type); return NULL;}
+ buf = newbuf;
+ }
+ else {Py_INCREF(buf);}
+
+ if (PyObject_AsWriteBuffer(buf, (void *)&data, &ts)==-1) {
+ write = 0;
+ PyErr_Clear();
+ if (PyObject_AsReadBuffer(buf, (void *)&data, &ts)==-1) {
+ Py_DECREF(buf);
+ Py_DECREF(type);
+ return NULL;
+ }
+ }
+
+ if ((offset < 0) || (offset >= ts)) {
+ PyErr_Format(PyExc_ValueError,
+ "offset must be positive and smaller than %"
+ INTP_FMT, (intp)ts);
+ }
+
+ data += offset;
+ s = (intp)ts - offset;
+ n = (intp)count;
+ itemsize = type->elsize;
+
+ if (n < 0 ) {
+ if (s % itemsize != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "buffer size must be a multiple"\
+ " of element size");
+ Py_DECREF(buf);
+ Py_DECREF(type);
+ return NULL;
+ }
+ n = s/itemsize;
+ } else {
+ if (s < n*itemsize) {
+ PyErr_SetString(PyExc_ValueError,
+ "buffer is smaller than requested"\
+ " size");
+ Py_DECREF(buf);
+ Py_DECREF(type);
+ return NULL;
+ }
+ }
+
+ if ((ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
+ type,
+ 1, &n,
+ NULL, data,
+ DEFAULT_FLAGS,
+ NULL)) == NULL) {
+ Py_DECREF(buf);
+ return NULL;
+ }
+
+ if (!write) ret->flags &= ~WRITEABLE;
+
+ /* Store a reference for decref on deallocation */
+ ret->base = buf;
+ PyArray_UpdateFlags(ret, ALIGNED);
+ return (PyObject *)ret;
+}
+
+static char doc_frombuffer[] = \
+ "frombuffer(buffer=, dtype=int, count=-1, offset=0)\n"\
+ "\n" \
+ " Returns a 1-d array of data type dtype from buffer. The buffer\n"\
+ " argument must be an object that exposes the buffer interface.\n"\
+ " If count is -1 then the entire buffer is used, otherwise, count\n"\
+ " is the size of the output. If offset is given then jump that\n"\
+ " far into the buffer. If the buffer has data that is out\n" \
+ " not in machine byte-order, than use a propert data type\n"\
+ " descriptor. The data will not\n" \
+ " be byteswapped, but the array will manage it in future\n"\
+ " operations.\n";
+
+static PyObject *
+array_frombuffer(PyObject *ignored, PyObject *args, PyObject *keywds)
+{
+ PyObject *obj=NULL;
+ longlong nin=-1, offset=0;
+ static char *kwlist[] = {"buffer", "dtype", "count", NULL};
+ PyArray_Descr *type=NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, keywds, "O|O&LL", kwlist,
+ &obj,
+ PyArray_DescrConverter, &type,
+ &nin, &offset)) {
+ return NULL;
+ }
+ if (type==NULL)
+ type = PyArray_DescrFromType(PyArray_LONG);
+
+ return PyArray_FromBuffer(obj, type, (intp)nin, (intp)offset);
+}
+
+
+static char doc_concatenate[] = "concatenate((a1,a2,...),axis=None).";
+
+static PyObject *
+array_concatenate(PyObject *dummy, PyObject *args, PyObject *kwds)
+{
+ PyObject *a0;
+ int axis=0;
+ static char *kwlist[] = {"seq", "axis", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist,
+ &a0,
+ PyArray_AxisConverter, &axis))
+ return NULL;
+ return PyArray_Concatenate(a0, axis);
+}
+
+static char doc_innerproduct[] = \
+ "inner(a,b) returns the dot product of two arrays, which has\n"\
+ "shape a.shape[:-1] + b.shape[:-1] with elements computed by\n" \
+ "the product of the elements from the last dimensions of a and b.";
+
+static PyObject *array_innerproduct(PyObject *dummy, PyObject *args) {
+ PyObject *b0, *a0;
+
+ if (!PyArg_ParseTuple(args, "OO", &a0, &b0)) return NULL;
+
+ return _ARET(PyArray_InnerProduct(a0, b0));
+}
+
+static char doc_matrixproduct[] = \
+ "dot(a,v) returns matrix-multiplication between a and b. \n"\
+ "The product-sum is over the last dimension of a and the \n"\
+ "second-to-last dimension of b.";
+
+static PyObject *array_matrixproduct(PyObject *dummy, PyObject *args) {
+ PyObject *v, *a;
+
+ if (!PyArg_ParseTuple(args, "OO", &a, &v)) return NULL;
+
+ return _ARET(PyArray_MatrixProduct(a, v));
+}
+
+static char doc_fastCopyAndTranspose[] = "_fastCopyAndTranspose(a)";
+
+static PyObject *array_fastCopyAndTranspose(PyObject *dummy, PyObject *args) {
+ PyObject *a0;
+
+ if (!PyArg_ParseTuple(args, "O", &a0)) return NULL;
+
+ return _ARET(PyArray_CopyAndTranspose(a0));
+}
+
+static char doc_correlate[] = "cross_correlate(a,v, mode=0)";
+
+static PyObject *array_correlate(PyObject *dummy, PyObject *args, PyObject *kwds) {
+ PyObject *shape, *a0;
+ int mode=0;
+ static char *kwlist[] = {"a", "v", "mode", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i", kwlist,
+ &a0, &shape, &mode)) return NULL;
+
+ return PyArray_Correlate(a0, shape, mode);
+}
+
+
+/*MULTIARRAY_API
+ Arange,
+*/
+static PyObject *
+PyArray_Arange(double start, double stop, double step, int type_num)
+{
+ intp length;
+ PyObject *range;
+ PyArray_ArrFuncs *funcs;
+ PyObject *obj;
+ int ret;
+
+ length = (intp ) ceil((stop - start)/step);
+
+ if (length <= 0) {
+ length = 0;
+ return PyArray_New(&PyArray_Type, 1, &length, type_num,
+ NULL, NULL, 0, 0, NULL);
+ }
+
+ range = PyArray_New(&PyArray_Type, 1, &length, type_num,
+ NULL, NULL, 0, 0, NULL);
+ if (range == NULL) return NULL;
+
+ funcs = PyArray_DESCR(range)->f;
+
+ /* place start in the buffer and the next value in the second position */
+ /* if length > 2, then call the inner loop, otherwise stop */
+
+ obj = PyFloat_FromDouble(start);
+ ret = funcs->setitem(obj, PyArray_DATA(range), (PyArrayObject *)range);
+ Py_DECREF(obj);
+ if (ret < 0) goto fail;
+ if (length == 1) return range;
+
+ obj = PyFloat_FromDouble(start + step);
+ ret = funcs->setitem(obj, PyArray_DATA(range)+PyArray_ITEMSIZE(range),
+ (PyArrayObject *)range);
+ Py_DECREF(obj);
+ if (ret < 0) goto fail;
+ if (length == 2) return range;
+
+ if (!funcs->fill) {
+ PyErr_SetString(PyExc_ValueError, "no fill-function for data-type.");
+ Py_DECREF(range);
+ return NULL;
+ }
+ funcs->fill(PyArray_DATA(range), length, (PyArrayObject *)range);
+ if (PyErr_Occurred()) goto fail;
+
+ return range;
+
+ fail:
+ Py_DECREF(range);
+ return NULL;
+}
+
+/* the formula is
+ len = (intp) ceil((start - stop) / step);
+*/
+static intp
+_calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, int cmplx)
+{
+ intp len;
+ PyObject *val;
+ double value;
+
+ *next = PyNumber_Subtract(stop, start);
+ if (!(*next)) return -1;
+ val = PyNumber_TrueDivide(*next, step);
+ Py_DECREF(*next); *next=NULL;
+ if (!val) return -1;
+ if (cmplx && PyComplex_Check(val)) {
+ value = PyComplex_RealAsDouble(val);
+ if (error_converting(value)) {Py_DECREF(val); return -1;}
+ len = (intp) ceil(value);
+ value = PyComplex_ImagAsDouble(val);
+ Py_DECREF(val);
+ if (error_converting(value)) return -1;
+ len = MIN(len, (intp) ceil(value));
+ }
+ else {
+ value = PyFloat_AsDouble(val);
+ Py_DECREF(val);
+ if (error_converting(value)) return -1;
+ len = (intp) ceil(value);
+ }
+
+ if (len > 0) {
+ *next = PyNumber_Add(start, step);
+ if (!next) return -1;
+ }
+ return len;
+}
+
+/* this doesn't change the references */
+/*MULTIARRAY_API
+ ArangeObj,
+*/
+static PyObject *
+PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject *step, PyArray_Descr *dtype)
+{
+ PyObject *range;
+ PyArray_ArrFuncs *funcs;
+ PyObject *next;
+ intp length;
+
+ if (!dtype) {
+ PyArray_Descr *deftype;
+ PyArray_Descr *newtype;
+ deftype = PyArray_DescrFromType(PyArray_LONG);
+ newtype = PyArray_DescrFromObject(start, deftype);
+ Py_DECREF(deftype);
+ deftype = newtype;
+ if (stop && stop != Py_None) {
+ newtype = PyArray_DescrFromObject(stop, deftype);
+ Py_DECREF(deftype);
+ deftype = newtype;
+ }
+ if (step && step != Py_None) {
+ newtype = PyArray_DescrFromObject(step, deftype);
+ Py_DECREF(deftype);
+ deftype = newtype;
+ }
+ dtype = deftype;
+ }
+ else Py_INCREF(dtype);
+
+ if (!step || step == Py_None) {
+ step = PyInt_FromLong(1);
+ }
+ else Py_XINCREF(step);
+
+ if (!stop || stop == Py_None) {
+ stop = start;
+ start = PyInt_FromLong(0);
+ }
+ else Py_INCREF(start);
+
+ /* calculate the length and next = start + step*/
+ length = _calc_length(start, stop, step, &next,
+ PyTypeNum_ISCOMPLEX(dtype->type_num));
+
+ if (PyErr_Occurred()) {Py_DECREF(dtype); goto fail;}
+ if (length <= 0) {
+ length = 0;
+ range = PyArray_SimpleNewFromDescr(1, &length, dtype);
+ Py_DECREF(step); Py_DECREF(start); return range;
+ }
+
+ range = PyArray_SimpleNewFromDescr(1, &length, dtype);
+ if (range == NULL) goto fail;
+
+ funcs = PyArray_DESCR(range)->f;
+
+ /* place start in the buffer and the next value in the second position */
+ /* if length > 2, then call the inner loop, otherwise stop */
+
+ if (funcs->setitem(start, PyArray_DATA(range), (PyArrayObject *)range) < 0)
+ goto fail;
+ if (length == 1) goto finish;
+ if (funcs->setitem(next, PyArray_DATA(range)+PyArray_ITEMSIZE(range),
+ (PyArrayObject *)range) < 0) goto fail;
+ if (length == 2) goto finish;
+
+ if (!funcs->fill) {
+ PyErr_SetString(PyExc_ValueError, "no fill-function for data-type.");
+ Py_DECREF(range);
+ goto fail;
+ }
+ funcs->fill(PyArray_DATA(range), length, (PyArrayObject *)range);
+ if (PyErr_Occurred()) goto fail;
+
+ finish:
+ Py_DECREF(start);
+ Py_DECREF(step);
+ Py_DECREF(next);
+ return range;
+
+ fail:
+ Py_DECREF(start);
+ Py_DECREF(step);
+ Py_XDECREF(next);
+ return NULL;
+}
+
+
+static char doc_arange[] = "arange(start, stop=None, step=1, dtype=int)\n\n Just like range() except it returns an array whose type can be\n specified by the keyword argument typecode.";
+
+static PyObject *
+array_arange(PyObject *ignored, PyObject *args, PyObject *kws) {
+ PyObject *o_start=NULL, *o_stop=NULL, *o_step=NULL;
+ static char *kwd[]= {"start", "stop", "step", "dtype", NULL};
+ PyArray_Descr *typecode=NULL;
+
+ if(!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&", kwd, &o_start,
+ &o_stop, &o_step,
+ PyArray_DescrConverter,
+ &typecode))
+ return NULL;
+
+ return PyArray_ArangeObj(o_start, o_stop, o_step, typecode);
+}
+
+
+static char
+doc_set_string_function[] = "set_string_function(f, repr=1) sets the python function f to be the function used to obtain a pretty printable string version of a array whenever a array is printed. f(M) should expect a array argument M, and should return a string consisting of the desired representation of M for printing.";
+
+static PyObject *
+array_set_string_function(PyObject *dummy, PyObject *args, PyObject *kwds)
+{
+ PyObject *op;
+ int repr=1;
+ static char *kwlist[] = {"f", "repr", NULL};
+
+ if(!PyArg_ParseTupleAndKeywords(args, kwds, "O|i", kwlist,
+ &op, &repr)) return NULL;
+ if (!PyCallable_Check(op)) {
+ PyErr_SetString(PyExc_TypeError,
+ "Argument must be callable.");
+ return NULL;
+ }
+ PyArray_SetStringFunction(op, repr);
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static char
+doc_set_ops_function[] = "set_numeric_ops(op=func, ...) sets some or all of the number methods for all array objects. Don't forget **dict can be used as the argument list. Returns the functions that were replaced -- can be stored and set later.";
+
+static PyObject *
+array_set_ops_function(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *oldops=NULL;
+
+ if ((oldops = PyArray_GetNumericOps())==NULL) return NULL;
+
+ /* Should probably ensure that objects are at least callable */
+ /* Leave this to the caller for now --- error will be raised
+ later when use is attempted
+ */
+ if (kwds && PyArray_SetNumericOps(kwds) == -1) {
+ Py_DECREF(oldops);
+ PyErr_SetString(PyExc_ValueError,
+ "one or more objects not callable");
+ return NULL;
+ }
+ return oldops;
+}
+
+
+/*MULTIARRAY_API
+ Where
+*/
+static PyObject *
+PyArray_Where(PyObject *condition, PyObject *x, PyObject *y)
+{
+ PyArrayObject *arr;
+ PyObject *tup=NULL, *obj=NULL;
+ PyObject *ret=NULL, *zero=NULL;
+
+
+ arr = (PyArrayObject *)PyArray_FromAny(condition, NULL, 0, 0, 0);
+ if (arr == NULL) return NULL;
+
+ if ((x==NULL) && (y==NULL)) {
+ ret = PyArray_Nonzero(arr);
+ Py_DECREF(arr);
+ return ret;
+ }
+
+ if ((x==NULL) || (y==NULL)) {
+ Py_DECREF(arr);
+ PyErr_SetString(PyExc_ValueError, "either both or neither "
+ "of x and y should be given");
+ return NULL;
+ }
+
+
+ zero = PyInt_FromLong((long) 0);
+
+ obj = PyArray_EnsureArray(PyArray_GenericBinaryFunction(arr, zero,
+ n_ops.not_equal));
+ Py_DECREF(zero);
+ Py_DECREF(arr);
+ if (obj == NULL) return NULL;
+
+ tup = Py_BuildValue("(OO)", y, x);
+ if (tup == NULL) {Py_DECREF(obj); return NULL;}
+
+ ret = PyArray_Choose((PyAO *)obj, tup);
+
+ Py_DECREF(obj);
+ Py_DECREF(tup);
+ return ret;
+}
+
+static char doc_where[] = "where(condition, | x, y) is shaped like condition"\
+ " and has elements of x and y where condition is respectively true or"\
+ " false. If x or y are not given, then it is equivalent to"\
+ " nonzero(condition).";
+
+static PyObject *
+array_where(PyObject *ignored, PyObject *args)
+{
+ PyObject *obj=NULL, *x=NULL, *y=NULL;
+
+ if (!PyArg_ParseTuple(args, "O|OO", &obj, &x, &y)) return NULL;
+
+ return PyArray_Where(obj, x, y);
+
+}
+
+static char doc_lexsort[] = "lexsort(keys=, axis=-1) returns an array of indexes"\
+ " similar to argsort except the sorting is done using the provided sorting"\
+ " keys. First the sort is done using key[0], then the resulting list of"\
+ " indexes is further manipulated by sorting on key[0]. And so forth"\
+ " The result is a sort on multiple keys. If the keys represented columns" \
+ " of a spread-sheet, for example, this would sort using multiple columns."\
+ " The keys argument must be a tuple of things that can be converted to "\
+ " arrays of the same shape.";
+
+static PyObject *
+array_lexsort(PyObject *ignored, PyObject *args, PyObject *kwds)
+{
+ int axis=-1;
+ PyObject *obj;
+ static char *kwlist[] = {"keys", "axis", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|i", kwlist,
+ &PyTuple_Type, &obj, &axis)) return NULL;
+
+ return _ARET(PyArray_LexSort(obj, axis));
+}
+
+#undef _ARET
+
+
+static char doc_register_dtype[] = \
+ "register_dtype(a) registers a new type object -- gives it a typenum";
+
+static PyObject *
+array_register_dtype(PyObject *dummy, PyObject *args)
+{
+ PyObject *dtype;
+ int ret;
+
+ if (!PyArg_ParseTuple(args, "O", &dtype)) return NULL;
+
+ ret = PyArray_RegisterDataType((PyTypeObject *)dtype);
+ if (ret < 0)
+ return NULL;
+ return PyInt_FromLong((long) ret);
+}
+
+static char doc_can_cast_safely[] = \
+ "can_cast_safely(from=d1, to=d2) returns True if data type d1 "\
+ "can be cast to data type d2 without losing precision.";
+
+static PyObject *
+array_can_cast_safely(PyObject *dummy, PyObject *args, PyObject *kwds)
+{
+ PyArray_Descr *d1=NULL;
+ PyArray_Descr *d2=NULL;
+ Bool ret;
+ PyObject *retobj;
+ static char *kwlist[] = {"from", "to", NULL};
+
+ if(!PyArg_ParseTupleAndKeywords(args, kwds, "O&O&", kwlist,
+ PyArray_DescrConverter, &d1,
+ PyArray_DescrConverter, &d2))
+ return NULL;
+ if (d1 == NULL || d2 == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "did not understand one of the types; " \
+ "'None' not accepted");
+ return NULL;
+ }
+
+ ret = PyArray_CanCastTo(d1, d2);
+ retobj = (ret ? Py_True : Py_False);
+ Py_INCREF(retobj);
+ return retobj;
+}
+
+static char doc_new_buffer[] = \
+ "newbuffer(size) return a new uninitialized buffer object of size "
+ "bytes";
+
+static PyObject *
+new_buffer(PyObject *dummy, PyObject *args)
+{
+ int size;
+
+ if(!PyArg_ParseTuple(args, "i", &size))
+ return NULL;
+
+ return PyBuffer_New(size);
+}
+
+static char doc_buffer_buffer[] = \
+ "getbuffer(obj [,offset[, size]]) create a buffer object from the "\
+ "given object\n referencing a slice of length size starting at "\
+ "offset. Default\n is the entire buffer. A read-write buffer is "\
+ "attempted followed by a read-only buffer.";
+
+static PyObject *
+buffer_buffer(PyObject *dummy, PyObject *args, PyObject *kwds)
+{
+ PyObject *obj;
+ int offset=0, size=Py_END_OF_BUFFER, n;
+ void *unused;
+ static char *kwlist[] = {"object", "offset", "size", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|ii", kwlist,
+ &obj, &offset, &size))
+ return NULL;
+
+
+ if (PyObject_AsWriteBuffer(obj, &unused, &n) < 0) {
+ PyErr_Clear();
+ return PyBuffer_FromObject(obj, offset, size);
+ }
+ else
+ return PyBuffer_FromReadWriteObject(obj, offset, size);
+}
+
+
+static struct PyMethodDef array_module_methods[] = {
+ {"set_string_function", (PyCFunction)array_set_string_function,
+ METH_VARARGS|METH_KEYWORDS, doc_set_string_function},
+ {"set_numeric_ops", (PyCFunction)array_set_ops_function,
+ METH_VARARGS|METH_KEYWORDS, doc_set_ops_function},
+ {"set_typeDict", (PyCFunction)array_set_typeDict,
+ METH_VARARGS, doc_set_typeDict},
+
+ {"array", (PyCFunction)_array_fromobject,
+ METH_VARARGS|METH_KEYWORDS, doc_fromobject},
+ {"arange", (PyCFunction)array_arange,
+ METH_VARARGS|METH_KEYWORDS, doc_arange},
+ {"zeros", (PyCFunction)array_zeros,
+ METH_VARARGS|METH_KEYWORDS, doc_zeros},
+ {"empty", (PyCFunction)array_empty,
+ METH_VARARGS|METH_KEYWORDS, doc_empty},
+ {"scalar", (PyCFunction)array_scalar,
+ METH_VARARGS|METH_KEYWORDS, doc_scalar},
+ {"where", (PyCFunction)array_where,
+ METH_VARARGS, doc_where},
+ {"lexsort", (PyCFunction)array_lexsort,
+ METH_VARARGS | METH_KEYWORDS, doc_lexsort},
+ {"fromstring",(PyCFunction)array_fromString,
+ METH_VARARGS|METH_KEYWORDS, doc_fromString},
+ {"concatenate", (PyCFunction)array_concatenate,
+ METH_VARARGS|METH_KEYWORDS, doc_concatenate},
+ {"inner", (PyCFunction)array_innerproduct,
+ METH_VARARGS, doc_innerproduct},
+ {"dot", (PyCFunction)array_matrixproduct,
+ METH_VARARGS, doc_matrixproduct},
+ {"_fastCopyAndTranspose", (PyCFunction)array_fastCopyAndTranspose,
+ METH_VARARGS, doc_fastCopyAndTranspose},
+ {"correlate", (PyCFunction)array_correlate,
+ METH_VARARGS | METH_KEYWORDS, doc_correlate},
+ {"frombuffer", (PyCFunction)array_frombuffer,
+ METH_VARARGS | METH_KEYWORDS, doc_frombuffer},
+ {"fromfile", (PyCFunction)array_fromfile,
+ METH_VARARGS | METH_KEYWORDS, doc_fromfile},
+ {"register_dtype", (PyCFunction)array_register_dtype,
+ METH_VARARGS, doc_register_dtype},
+ {"can_cast", (PyCFunction)array_can_cast_safely,
+ METH_VARARGS | METH_KEYWORDS, doc_can_cast_safely},
+ {"newbuffer", (PyCFunction)new_buffer,
+ METH_VARARGS, doc_new_buffer},
+ {"getbuffer", (PyCFunction)buffer_buffer,
+ METH_VARARGS | METH_KEYWORDS, doc_buffer_buffer},
+ {NULL, NULL, 0} /* sentinel */
+};
+
+#include "__multiarray_api.c"
+
+/* Establish scalar-type hierarchy */
+
+/* For dual inheritance we need to make sure that the objects being
+ inherited from have the tp->mro object initialized. This is
+ not necessarily true for the basic type objects of Python (it is
+ checked for single inheritance but not dual in PyType_Ready).
+
+ Thus, we call PyType_Ready on the standard Python Types, here.
+*/
+static int
+setup_scalartypes(PyObject *dict)
+{
+
+ initialize_numeric_types();
+
+ if (PyType_Ready(&PyBool_Type) < 0) return -1;
+ if (PyType_Ready(&PyInt_Type) < 0) return -1;
+ if (PyType_Ready(&PyFloat_Type) < 0) return -1;
+ if (PyType_Ready(&PyComplex_Type) < 0) return -1;
+ if (PyType_Ready(&PyString_Type) < 0) return -1;
+ if (PyType_Ready(&PyUnicode_Type) < 0) return -1;
+
+#define SINGLE_INHERIT(child, parent) \
+ Py##child##ArrType_Type.tp_base = &Py##parent##ArrType_Type; \
+ if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \
+ PyErr_Print(); \
+ PyErr_Format(PyExc_SystemError, \
+ "could not initialize Py%sArrType_Type", \
+ #child); \
+ return -1; \
+ }
+
+ if (PyType_Ready(&PyGenericArrType_Type) < 0)
+ return -1;
+
+ SINGLE_INHERIT(Numeric, Generic);
+ SINGLE_INHERIT(Integer, Numeric);
+ SINGLE_INHERIT(Inexact, Numeric);
+ SINGLE_INHERIT(SignedInteger, Integer);
+ SINGLE_INHERIT(UnsignedInteger, Integer);
+ SINGLE_INHERIT(Floating, Inexact);
+ SINGLE_INHERIT(ComplexFloating, Inexact);
+ SINGLE_INHERIT(Flexible, Generic);
+ SINGLE_INHERIT(Character, Flexible);
+
+#define DUAL_INHERIT(child, parent1, parent2) \
+ Py##child##ArrType_Type.tp_base = &Py##parent2##ArrType_Type; \
+ Py##child##ArrType_Type.tp_bases = \
+ Py_BuildValue("(OO)", &Py##parent2##ArrType_Type, \
+ &Py##parent1##_Type); \
+ if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \
+ PyErr_Print(); \
+ PyErr_Format(PyExc_SystemError, \
+ "could not initialize Py%sArrType_Type", \
+ #child); \
+ return -1; \
+ }\
+ Py##child##ArrType_Type.tp_hash = Py##parent1##_Type.tp_hash;
+
+#define DUAL_INHERIT2(child, parent1, parent2) \
+ Py##child##ArrType_Type.tp_base = &Py##parent1##_Type; \
+ Py##child##ArrType_Type.tp_bases = \
+ Py_BuildValue("(OO)", &Py##parent1##_Type, \
+ &Py##parent2##ArrType_Type); \
+ Py##child##ArrType_Type.tp_richcompare = \
+ Py##parent1##_Type.tp_richcompare; \
+ Py##child##ArrType_Type.tp_compare = \
+ Py##parent1##_Type.tp_compare; \
+ Py##child##ArrType_Type.tp_hash = Py##parent1##_Type.tp_hash; \
+ if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \
+ PyErr_Print(); \
+ PyErr_Format(PyExc_SystemError, \
+ "could not initialize Py%sArrType_Type", \
+ #child); \
+ return -1; \
+ }
+
+ SINGLE_INHERIT(Bool, Generic);
+ SINGLE_INHERIT(Byte, SignedInteger);
+ SINGLE_INHERIT(Short, SignedInteger);
+#if SIZEOF_INT == SIZEOF_LONG
+ DUAL_INHERIT(Int, Int, SignedInteger);
+#else
+ SINGLE_INHERIT(Int, SignedInteger);
+#endif
+ DUAL_INHERIT(Long, Int, SignedInteger);
+#if SIZEOF_LONGLONG == SIZEOF_LONG
+ DUAL_INHERIT(LongLong, Int, SignedInteger);
+#else
+ SINGLE_INHERIT(LongLong, SignedInteger);
+#endif
+
+ /* fprintf(stderr, "tp_free = %p, PyObject_Del = %p, int_tp_free = %p, base.tp_free = %p\n", PyIntArrType_Type.tp_free, PyObject_Del, PyInt_Type.tp_free, PySignedIntegerArrType_Type.tp_free);
+ */
+ SINGLE_INHERIT(UByte, UnsignedInteger);
+ SINGLE_INHERIT(UShort, UnsignedInteger);
+ SINGLE_INHERIT(UInt, UnsignedInteger);
+ SINGLE_INHERIT(ULong, UnsignedInteger);
+ SINGLE_INHERIT(ULongLong, UnsignedInteger);
+
+ SINGLE_INHERIT(Float, Floating);
+ DUAL_INHERIT(Double, Float, Floating);
+ SINGLE_INHERIT(LongDouble, Floating);
+
+ SINGLE_INHERIT(CFloat, ComplexFloating);
+ DUAL_INHERIT(CDouble, Complex, ComplexFloating);
+ SINGLE_INHERIT(CLongDouble, ComplexFloating);
+
+ DUAL_INHERIT2(String, String, Character);
+ DUAL_INHERIT2(Unicode, Unicode, Character);
+
+ SINGLE_INHERIT(Void, Flexible);
+
+ SINGLE_INHERIT(Object, Generic);
+
+ return 0;
+
+#undef SINGLE_INHERIT
+#undef DUAL_INHERIT
+
+ /* Clean up string and unicode array types so they act more like
+ strings -- get their tables from the standard types.
+ */
+}
+
+/* place a flag dictionary in d */
+
+static void
+set_flaginfo(PyObject *d)
+{
+ PyObject *s;
+ PyObject *newd;
+
+ newd = PyDict_New();
+
+ PyDict_SetItemString(newd, "OWNDATA", s=PyInt_FromLong(OWNDATA));
+ Py_DECREF(s);
+ PyDict_SetItemString(newd, "FORTRAN", s=PyInt_FromLong(FORTRAN));
+ Py_DECREF(s);
+ PyDict_SetItemString(newd, "CONTIGUOUS", s=PyInt_FromLong(CONTIGUOUS));
+ Py_DECREF(s);
+ PyDict_SetItemString(newd, "ALIGNED", s=PyInt_FromLong(ALIGNED));
+ Py_DECREF(s);
+
+ PyDict_SetItemString(newd, "UPDATEIFCOPY", s=PyInt_FromLong(UPDATEIFCOPY));
+ Py_DECREF(s);
+ PyDict_SetItemString(newd, "WRITEABLE", s=PyInt_FromLong(WRITEABLE));
+ Py_DECREF(s);
+
+ PyDict_SetItemString(d, "_flagdict", newd);
+ Py_DECREF(newd);
+ return;
+}
+
+
+/* Initialization function for the module */
+
+DL_EXPORT(void) initmultiarray(void) {
+ PyObject *m, *d, *s;
+ PyObject *c_api;
+
+ /* Create the module and add the functions */
+ m = Py_InitModule("multiarray", array_module_methods);
+ if (!m) goto err;
+
+ /* Add some symbolic constants to the module */
+ d = PyModule_GetDict(m);
+ if (!d) goto err;
+
+ /* Create the module and add the functions */
+ if (PyType_Ready(&PyBigArray_Type) < 0)
+ return;
+
+ PyArray_Type.tp_base = &PyBigArray_Type;
+
+ PyArray_Type.tp_as_mapping = &array_as_mapping;
+ /* Even though, this would be inherited, it needs to be set now
+ so that the __getitem__ will map to the as_mapping descriptor
+ */
+ PyArray_Type.tp_as_number = &array_as_number;
+ /* For good measure */
+ PyArray_Type.tp_as_sequence = &array_as_sequence;
+ PyArray_Type.tp_as_buffer = &array_as_buffer;
+ PyArray_Type.tp_flags = (Py_TPFLAGS_DEFAULT
+ | Py_TPFLAGS_BASETYPE
+ | Py_TPFLAGS_CHECKTYPES);
+ PyArray_Type.tp_doc = Arraytype__doc__;
+
+ if (PyType_Ready(&PyArray_Type) < 0)
+ return;
+
+ if (setup_scalartypes(d) < 0) goto err;
+
+ PyArrayIter_Type.tp_iter = PyObject_SelfIter;
+ PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter;
+ if (PyType_Ready(&PyArrayIter_Type) < 0)
+ return;
+
+ if (PyType_Ready(&PyArrayMapIter_Type) < 0)
+ return;
+
+ if (PyType_Ready(&PyArrayMultiIter_Type) < 0)
+ return;
+
+ if (PyType_Ready(&PyArrayDescr_Type) < 0)
+ return;
+
+ c_api = PyCObject_FromVoidPtr((void *)PyArray_API, NULL);
+ if (PyErr_Occurred()) goto err;
+ PyDict_SetItemString(d, "_ARRAY_API", c_api);
+ Py_DECREF(c_api);
+ if (PyErr_Occurred()) goto err;
+
+ MultiArrayError = PyString_FromString ("multiarray.error");
+ PyDict_SetItemString (d, "error", MultiArrayError);
+
+ s = PyString_FromString("3.0");
+ PyDict_SetItemString(d, "__version__", s);
+ Py_DECREF(s);
+ Py_INCREF(&PyBigArray_Type);
+ PyDict_SetItemString(d, "bigndarray", (PyObject *)&PyBigArray_Type);
+ Py_INCREF(&PyArray_Type);
+ PyDict_SetItemString(d, "ndarray", (PyObject *)&PyArray_Type);
+ Py_INCREF(&PyArrayIter_Type);
+ PyDict_SetItemString(d, "flatiter", (PyObject *)&PyArrayIter_Type);
+ Py_INCREF(&PyArrayMultiIter_Type);
+ PyDict_SetItemString(d, "broadcast",
+ (PyObject *)&PyArrayMultiIter_Type);
+ Py_INCREF(&PyArrayDescr_Type);
+ PyDict_SetItemString(d, "dtypedescr", (PyObject *)&PyArrayDescr_Type);
+
+ /* Doesn't need to be exposed to Python
+ Py_INCREF(&PyArrayMapIter_Type);
+ PyDict_SetItemString(d, "mapiter", (PyObject *)&PyArrayMapIter_Type);
+ */
+ set_flaginfo(d);
+
+ if (set_typeinfo(d) != 0) goto err;
+
+ _scipy_internal = \
+ PyImport_ImportModule("scipy.base._internal");
+ if (_scipy_internal != NULL) return;
+
+ err:
+ /* Check for errors */
+ if (PyErr_Occurred())
+ PyErr_Print();
+ Py_FatalError("can't initialize module multiarray");
+
+ return;
+}
+
diff --git a/numpy/base/src/scalarmathmodule.c.src b/numpy/base/src/scalarmathmodule.c.src
new file mode 100644
index 000000000..dc2c3c198
--- /dev/null
+++ b/numpy/base/src/scalarmathmodule.c.src
@@ -0,0 +1,103 @@
+/* The purpose of this module is to add faster math for array scalars
+ that does not go through the ufunc machinery
+
+ NOT FINISHED
+ */
+
+#include "scipy/arrayobject.h"
+#include "scipy/ufuncobject.h"
+
+
+/**begin repeat
+name=bool,
+
+**/
+static PyNumberMethods @name@_as_number = {
+ (binaryfunc)@name@_add, /*nb_add*/
+ (binaryfunc)@name@_subtract, /*nb_subtract*/
+ (binaryfunc)@name@_multiply, /*nb_multiply*/
+ (binaryfunc)@name@_divide, /*nb_divide*/
+ (binaryfunc)@name@_remainder, /*nb_remainder*/
+ (binaryfunc)@name@_divmod, /*nb_divmod*/
+ (ternaryfunc)@name@_power, /*nb_power*/
+ (unaryfunc)@name@_negative,
+ (unaryfunc)@name@_copy, /*nb_pos*/
+ (unaryfunc)@name@_absolute, /*nb_abs*/
+ (inquiry)@name@_nonzero_number, /*nb_nonzero*/
+ (unaryfunc)@name@_invert, /*nb_invert*/
+ (binaryfunc)@name@_lshift, /*nb_lshift*/
+ (binaryfunc)@name@_rshift, /*nb_rshift*/
+ (binaryfunc)@name@_and, /*nb_and*/
+ (binaryfunc)@name@_xor, /*nb_xor*/
+ (binaryfunc)@name@_or, /*nb_or*/
+ 0, /*nb_coerce*/
+ (unaryfunc)@name@_int, /*nb_int*/
+ (unaryfunc)@name@_long, /*nb_long*/
+ (unaryfunc)@name@_float, /*nb_float*/
+ (unaryfunc)@name@_oct, /*nb_oct*/
+ (unaryfunc)@name@_hex, /*nb_hex*/
+ 0, /*inplace_add*/
+ 0, /*inplace_subtract*/
+ 0, /*inplace_multiply*/
+ 0, /*inplace_divide*/
+ 0, /*inplace_remainder*/
+ 0, /*inplace_power*/
+ 0, /*inplace_lshift*/
+ 0, /*inplace_rshift*/
+ 0, /*inplace_and*/
+ 0, /*inplace_xor*/
+ 0, /*inplace_or*/
+ (binaryfunc)@name@_floor_divide, /*nb_floor_divide*/
+ (binaryfunc)@name@_true_divide, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+
+};
+
+/**end repeat**/
+
+
+/**begin repeat
+
+**/
+
+static PyObject*
+@name@_richcompare(PyObject *self, PyObject *other, int cmp_op)
+{
+}
+/**end repeat**/
+
+
+
+static void
+add_scalarmath(void)
+{
+/**begin repeat
+name=bool,
+NAME=Bool
+**/
+ PyArr@NAME@Type_Type.tp_as_number = @name@_as_number;
+ PyArr@NAME@Type_Type.tp_richcompare = @name@_richcompare;
+/**end repeat**/
+}
+
+
+
+static struct PyMethodDef methods[] = {
+ {"alter_pyscalars", (PyCFunction) alter_pyscalars,
+ METH_VARARGS , doc_alterpyscalars},
+ {NULL, NULL, 0}
+};
+
+DL_EXPORT(void) initscalarmath(void) {
+ PyObject *m;
+
+ m = Py_initModule("scalarmath", methods);
+
+ if (import_array() < 0) return;
+ if (import_umath() < 0) return;
+
+ add_scalarmath();
+
+ return;
+}
diff --git a/numpy/base/src/scalartypes.inc.src b/numpy/base/src/scalartypes.inc.src
new file mode 100644
index 000000000..629adbcf0
--- /dev/null
+++ b/numpy/base/src/scalartypes.inc.src
@@ -0,0 +1,2165 @@
+/* -*- c -*- */
+
+static int PyArrayScalar_Offset[PyArray_NTYPES+1];
+
+#define _SOFFSET_(obj, type_num) ((char *)(obj) + PyArrayScalar_Offset[(type_num)])
+
+/**begin repeat
+#name=Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble, Object,#
+#type=Bool, signed char, short, int, long, longlong, unsigned char, unsigned short, unsigned int, unsigned long, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, PyObject *,char#
+*/
+typedef struct {
+ PyObject_HEAD;
+ @type@ obval;
+} Py@name@ScalarObject;
+/**end repeat**/
+
+/* Inheritance established later when tp_bases is set (or tp_base for
+ single inheritance) */
+
+/**begin repeat
+
+#name=numeric, integer, signedinteger, unsignedinteger, inexact, floating, complexfloating, flexible,
+character#
+#NAME=Numeric, Integer, SignedInteger, UnsignedInteger, Inexact, Floating, ComplexFloating, Flexible, Character#
+*/
+
+static PyTypeObject Py@NAME@ArrType_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "@name@_arrtype", /*tp_name*/
+ sizeof(PyObject), /*tp_basicsize*/
+};
+/**end repeat**/
+
+
+#define PyStringScalarObject PyStringObject
+#define PyUnicodeScalarObject PyUnicodeObject
+
+typedef struct {
+ PyObject_VAR_HEAD;
+ char *obval;
+ PyArray_Descr *descr;
+ int flags;
+ PyObject *base;
+} PyVoidScalarObject;
+
+/* no error checking is performed -- ctypeptr must be same type as scalar */
+/* in case of flexible type, the data is not copied
+ into ctypeptr which is expected to be a pointer to pointer */
+/*OBJECT_API
+ Convert to c-type
+*/
+static void
+PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr)
+{
+ PyArray_Descr *typecode;
+ typecode = PyArray_DescrFromScalar(scalar);
+
+ if (PyTypeNum_ISEXTENDED(typecode->type_num)) {
+ void **newptr = (void **)ctypeptr;
+ switch(typecode->type_num) {
+ case PyArray_STRING:
+ *newptr = (void *)PyString_AS_STRING(scalar);
+ case PyArray_UNICODE:
+ *newptr = (void *)PyUnicode_AS_DATA(scalar);
+ default:
+ *newptr = ((PyVoidScalarObject *)scalar)->obval;
+ }
+ return;
+ }
+ memcpy(ctypeptr, _SOFFSET_(scalar, typecode->type_num),
+ typecode->elsize);
+ Py_DECREF(typecode);
+ return;
+}
+
+/* The output buffer must be large-enough to receive the value */
+/* Even for flexible types which is different from ScalarAsCtype
+ where only a reference for flexible types is returned
+*/
+
+/*OBJECT_API
+ Cast Scalar to c-type
+*/
+static int
+PyArray_CastScalarToCtype(PyObject *scalar, void *ctypeptr,
+ PyArray_Descr *outcode)
+{
+ PyArray_Descr* descr;
+
+ descr = PyArray_DescrFromScalar(scalar);
+ if (PyTypeNum_ISEXTENDED(descr->type_num) ||
+ PyTypeNum_ISEXTENDED(outcode->type_num)) {
+ PyArrayObject *ain, *aout;
+
+ ain = (PyArrayObject *)PyArray_FromScalar(scalar, NULL);
+ if (ain == NULL) {Py_DECREF(descr); return -1;}
+ aout = (PyArrayObject *)\
+ PyArray_NewFromDescr(&PyArray_Type,
+ outcode,
+ 0, NULL,
+ NULL, ctypeptr,
+ CARRAY_FLAGS, NULL);
+ if (aout == NULL) {Py_DECREF(ain); return -1;}
+ descr->f->cast[outcode->type_num](ain->data,
+ aout->data, 1, ain, aout);
+ Py_DECREF(ain);
+ Py_DECREF(aout);
+ }
+ else {
+ descr->f->cast[outcode->type_num](_SOFFSET_(scalar,
+ descr->type_num),
+ ctypeptr, 1, NULL, NULL);
+ }
+ Py_DECREF(descr);
+ return 0;
+}
+
+/* 0-dim array from array-scalar object */
+/* always contains a copy of the data
+ unless outcode is NULL, it is of void type and the referrer does
+ not own it either.
+*/
+
+/* steals reference to outcode */
+/*OBJECT_API
+ Get 0-dim array from scalar
+*/
+static PyObject *
+PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode)
+{
+ PyArray_Descr *typecode;
+ PyObject *r;
+ char *memptr;
+ PyObject *ret;
+
+ /* convert to 0-dim array of scalar typecode */
+ typecode = PyArray_DescrFromScalar(scalar);
+ if ((typecode->type_num == PyArray_VOID) && \
+ !(((PyVoidScalarObject *)scalar)->flags & OWNDATA) && \
+ outcode == NULL) {
+ r = PyArray_NewFromDescr(&PyArray_Type,
+ typecode,
+ 0, NULL, NULL,
+ ((PyVoidScalarObject *)scalar)->obval,
+ ((PyVoidScalarObject *)scalar)->flags,
+ NULL);
+ PyArray_BASE(r) = (PyObject *)scalar;
+ Py_INCREF(scalar);
+ return r;
+ }
+ r = PyArray_NewFromDescr(&PyArray_Type,
+ typecode,
+ 0, NULL,
+ NULL, NULL, 0, NULL);
+ if (r==NULL) {Py_XDECREF(outcode); return NULL;}
+
+ switch(typecode->type_num) {
+ case PyArray_STRING:
+ memptr = PyString_AS_STRING(scalar);
+ break;
+ case PyArray_UNICODE:
+ memptr = (char *)PyUnicode_AS_DATA(scalar);
+ break;
+ default:
+ if (PyTypeNum_ISEXTENDED(typecode->type_num)) {
+ memptr = (((PyVoidScalarObject *)scalar)->obval);
+ }
+ else {
+ memptr = _SOFFSET_(scalar, typecode->type_num);
+ }
+ break;
+ }
+
+ memcpy(PyArray_DATA(r), memptr, PyArray_ITEMSIZE(r));
+ if (PyArray_ISOBJECT(r)) {
+ Py_INCREF(*((PyObject **)memptr));
+ }
+
+ if (outcode == NULL) return r;
+
+ if (outcode->type_num == typecode->type_num) {
+ if (!PyTypeNum_ISEXTENDED(typecode->type_num))
+ return r;
+ if (outcode->elsize == typecode->elsize);
+ return r;
+ }
+
+ /* cast if necessary to desired output typecode */
+ ret = PyArray_CastToType((PyArrayObject *)r, outcode, 0);
+ Py_DECREF(r);
+ return ret;
+}
+
+static PyObject *
+gentype_alloc(PyTypeObject *type, int nitems)
+{
+ PyObject *obj;
+ const size_t size = _PyObject_VAR_SIZE(type, nitems+1);
+
+ obj = (PyObject *)_pya_malloc(size);
+ memset(obj, 0, size);
+ if (type->tp_itemsize == 0)
+ PyObject_INIT(obj, type);
+ else
+ (void) PyObject_INIT_VAR((PyVarObject *)obj, type, nitems);
+ return obj;
+}
+
+static void
+gentype_dealloc(PyObject *v)
+{
+ v->ob_type->tp_free(v);
+}
+
+
+static PyObject *
+gentype_power(PyObject *m1, PyObject *m2, PyObject *m3)
+{
+ PyObject *arr, *ret, *arg2;
+ char *msg="unsupported operand type(s) for ** or pow()";
+
+ if (!PyArray_IsScalar(m1,Generic)) {
+ if (PyArray_Check(m1)) {
+ ret = m1->ob_type->tp_as_number->nb_power(m1,m2,
+ Py_None);
+ }
+ else {
+ if (!PyArray_IsScalar(m2,Generic)) {
+ PyErr_SetString(PyExc_TypeError, msg);
+ return NULL;
+ }
+ arr = PyArray_FromScalar(m2, NULL);
+ if (arr == NULL) return NULL;
+ ret = arr->ob_type->tp_as_number->nb_power(m1, arr,
+ Py_None);
+ Py_DECREF(arr);
+ }
+ return ret;
+ }
+ if (!PyArray_IsScalar(m2, Generic)) {
+ if (PyArray_Check(m2)) {
+ ret = m2->ob_type->tp_as_number->nb_power(m1,m2,
+ Py_None);
+ }
+ else {
+ if (!PyArray_IsScalar(m1, Generic)) {
+ PyErr_SetString(PyExc_TypeError, msg);
+ return NULL;
+ }
+ arr = PyArray_FromScalar(m1, NULL);
+ if (arr == NULL) return NULL;
+ ret = arr->ob_type->tp_as_number->nb_power(arr, m2,
+ Py_None);
+ Py_DECREF(arr);
+ }
+ return ret;
+ }
+ arr=arg2=NULL;
+ arr = PyArray_FromScalar(m1, NULL);
+ arg2 = PyArray_FromScalar(m2, NULL);
+ if (arr == NULL || arg2 == NULL) {
+ Py_XDECREF(arr); Py_XDECREF(arg2); return NULL;
+ }
+ ret = arr->ob_type->tp_as_number->nb_power(arr, arg2, Py_None);
+ Py_DECREF(arr);
+ Py_DECREF(arg2);
+ return ret;
+}
+
+static PyObject *
+gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds,
+ char *str)
+{
+ PyObject *arr, *meth, *ret;
+
+ arr = PyArray_FromScalar(self, NULL);
+ if (arr == NULL) return NULL;
+ meth = PyObject_GetAttrString(arr, str);
+ if (meth == NULL) {Py_DECREF(arr); return NULL;}
+ if (kwds == NULL)
+ ret = PyObject_CallObject(meth, args);
+ else
+ ret = PyObject_Call(meth, args, kwds);
+ Py_DECREF(meth);
+ Py_DECREF(arr);
+ if (ret && PyArray_Check(ret))
+ return PyArray_Return((PyArrayObject *)ret);
+ else
+ return ret;
+}
+
+/**begin repeat
+
+#name=add, subtract, divide, remainder, divmod, lshift, rshift, and, xor, or, floor_divide, true_divide#
+#PYNAME=Add, Subtract, Divide, Remainder, Divmod, Lshift, Rshift, And, Xor, Or, FloorDivide, TrueDivide#
+*/
+
+static PyObject *
+gentype_@name@(PyObject *m1, PyObject *m2)
+{
+ PyObject *arr, *ret=NULL, *tup;
+
+ if (!PyArray_IsScalar(m1, Generic)) {
+ if (PyArray_Check(m1)) {
+ ret = m1->ob_type->tp_as_number->nb_@name@(m1,m2);
+ }
+ else {
+ PyObject *newarr;
+ /* Convert object to Array scalar and try again */
+ newarr = PyArray_FromAny(m1, NULL, 0, 0, 0);
+ if (newarr!=NULL) {
+ ret = newarr->ob_type->tp_as_number->nb_@name@(newarr, m2);
+ Py_DECREF(newarr);
+ }
+ else ret=NULL;
+ }
+ return ret;
+ }
+ if (!PyArray_IsScalar(m2, Generic)) {
+ if (PyArray_Check(m2)) {
+ ret = m2->ob_type->tp_as_number->nb_@name@(m1,m2);
+ }
+ else {
+ PyObject *newarr;
+ /* Convert object to Array and try again */
+ newarr = PyArray_FromAny(m2, NULL, 0, 0, 0);
+ if (newarr!=NULL) {
+ ret = newarr->ob_type->tp_as_number->nb_@name@(m1, newarr);
+ Py_DECREF(newarr);
+ }
+ else ret=NULL;
+ }
+ return ret;
+ }
+ arr=tup=NULL;
+ arr = PyArray_FromScalar(m1, NULL);
+ tup = PyArray_FromScalar(m2, NULL);
+ if (arr == NULL || tup == NULL) {
+ Py_XDECREF(tup); Py_XDECREF(arr); return NULL;
+ }
+ ret = arr->ob_type->tp_as_number->nb_@name@(arr, tup);
+ Py_DECREF(arr);
+ Py_DECREF(tup);
+ return ret;
+}
+/**end repeat**/
+
+
+static PyObject *
+gentype_multiply(PyObject *m1, PyObject *m2)
+{
+ PyObject *arr, *ret=NULL, *tup;
+ long repeat;
+
+ if (!PyArray_IsScalar(m1, Generic)) {
+ if (PyArray_Check(m1)) {
+ ret = m1->ob_type->tp_as_number->nb_multiply(m1,m2);
+ }
+ else if ((m1->ob_type->tp_as_number == NULL) ||
+ (m1->ob_type->tp_as_number->nb_multiply == NULL)) {
+ /* Convert m2 to an int and assume sequence
+ repeat */
+ repeat = PyInt_AsLong(m2);
+ if (repeat == -1 && PyErr_Occurred()) return NULL;
+ ret = PySequence_Repeat(m1, (int) repeat);
+ if (ret == NULL) {
+ PyErr_Clear();
+ arr = PyArray_FromScalar(m2, NULL);
+ if (arr == NULL) return NULL;
+ ret = arr->ob_type->tp_as_number->\
+ nb_multiply(m1, arr);
+ Py_DECREF(arr);
+ }
+ }
+ else {
+ PyObject *newarr;
+ /* Convert object to Array scalar and try again */
+ newarr = PyArray_FromAny(m1, NULL, 0, 0, 0);
+ if (newarr!=NULL) {
+ ret = newarr->ob_type->tp_as_number->nb_multiply(newarr, m2);
+ Py_DECREF(newarr);
+ }
+ else ret=NULL;
+ }
+ return ret;
+ }
+ if (!PyArray_IsScalar(m2, Generic)) {
+ if (PyArray_Check(m2)) {
+ ret = m2->ob_type->tp_as_number->nb_multiply(m1,m2);
+ }
+ else if ((m2->ob_type->tp_as_number == NULL) ||
+ (m2->ob_type->tp_as_number->nb_multiply == NULL)) {
+ /* Convert m1 to an int and assume sequence
+ repeat */
+ repeat = PyInt_AsLong(m1);
+ if (repeat == -1 && PyErr_Occurred()) return NULL;
+ ret = PySequence_Repeat(m2, (int) repeat);
+ if (ret == NULL) {
+ PyErr_Clear();
+ arr = PyArray_FromScalar(m1, NULL);
+ if (arr == NULL) return NULL;
+ ret = arr->ob_type->tp_as_number-> \
+ nb_multiply(arr, m2);
+ Py_DECREF(arr);
+ }
+ }
+ else {
+ PyObject *newarr;
+ /* Convert object to Array scalar and try again */
+ newarr = PyArray_FromAny(m2, NULL, 0, 0, 0);
+ if (newarr!=NULL) {
+ ret = newarr->ob_type->tp_as_number->nb_multiply(m1, newarr);
+ Py_DECREF(newarr);
+ }
+ else ret =NULL;
+ }
+ return ret;
+ }
+ /* Both are array scalar objects */
+ arr=tup=NULL;
+ arr = PyArray_FromScalar(m1, NULL);
+ tup = PyArray_FromScalar(m2, NULL);
+ if (arr == NULL || tup == NULL) {
+ Py_XDECREF(tup); Py_XDECREF(arr); return NULL;
+ }
+ ret = arr->ob_type->tp_as_number->nb_multiply(arr, tup);
+ Py_DECREF(arr);
+ Py_DECREF(tup);
+ return ret;
+
+}
+
+
+
+/**begin repeat
+
+#name=negative, absolute, invert, int, long, float, oct, hex#
+*/
+
+static PyObject *
+gentype_@name@(PyObject *m1)
+{
+ PyObject *arr, *ret;
+
+ arr = PyArray_FromScalar(m1, NULL);
+ if (arr == NULL) return NULL;
+ ret = arr->ob_type->tp_as_number->nb_@name@(arr);
+ Py_DECREF(arr);
+ return ret;
+}
+/**end repeat**/
+
+static int
+gentype_nonzero_number(PyObject *m1)
+{
+ PyObject *arr;
+ int ret;
+
+ arr = PyArray_FromScalar(m1, NULL);
+ if (arr == NULL) return -1;
+ ret = arr->ob_type->tp_as_number->nb_nonzero(arr);
+ Py_DECREF(arr);
+ return ret;
+}
+
+static PyObject *
+gentype_str(PyObject *self)
+{
+ PyArrayObject *arr;
+ PyObject *ret, *tmp;
+
+ arr = (PyArrayObject *)PyArray_FromScalar(self, NULL);
+ if (arr==NULL) return NULL;
+ ret = PyObject_Str((tmp=arr->descr->f->getitem(arr->data, arr)));
+ Py_DECREF(arr);
+ Py_XDECREF(tmp);
+ return ret;
+}
+
+static PyObject *
+gentype_repr(PyObject *self)
+{
+ PyArrayObject *arr;
+ PyObject *ret, *tmp ;
+
+ arr = (PyArrayObject *)PyArray_FromScalar(self, NULL);
+ if (arr==NULL) return NULL;
+ ret = PyObject_Repr((tmp=arr->descr->f->getitem(arr->data, arr)));
+ Py_DECREF(arr);
+ Py_XDECREF(tmp);
+ return ret;
+}
+
+static void
+format_longdouble(char *buf, size_t buflen, longdouble val, int precision)
+{
+ register char *cp;
+
+ PyOS_snprintf(buf, buflen, "%.*" LONGDOUBLE_FMT, precision, val);
+ cp = buf;
+ if (*cp == '-')
+ cp++;
+ for (; *cp != '\0'; cp++) {
+ if (!isdigit(Py_CHARMASK(*cp)))
+ break;
+ }
+ if (*cp == '\0') {
+ *cp++ = '.';
+ *cp++ = '0';
+ *cp++ = '\0';
+ }
+}
+
+#if SIZEOF_LONGDOUBLE == SIZEOF_DOUBLE
+#define PREC_REPR 15
+#define PREC_STR 15
+#else
+#define PREC_REPR 21
+#define PREC_STR 21
+#endif
+
+static PyObject *
+longdoubletype_repr(PyObject *self)
+{
+ static char buf[100];
+ format_longdouble(buf, sizeof(buf), ((PyLongDoubleScalarObject *)self)->obval, PREC_REPR);
+ return PyString_FromString(buf);
+}
+
+static PyObject *
+clongdoubletype_repr(PyObject *self)
+{
+ static char buf1[100];
+ static char buf2[100];
+ static char buf3[202];
+ clongdouble x;
+ x = ((PyCLongDoubleScalarObject *)self)->obval;
+ format_longdouble(buf1, sizeof(buf1), x.real, PREC_REPR);
+ format_longdouble(buf2, sizeof(buf2), x.imag, PREC_REPR);
+
+ snprintf(buf3, sizeof(buf3), "(%s+%sj)", buf1, buf2);
+ return PyString_FromString(buf3);
+}
+
+#define longdoubletype_str longdoubletype_repr
+#define clongdoubletype_str clongdoubletype_repr
+
+/** Could improve this with a PyLong_FromLongDouble(longdouble ldval)
+ but this would need some more work...
+**/
+
+/**begin repeat
+
+#name=(int, long, hex, oct, float)*2#
+#KIND=(Long*4, Float)*2#
+#char=,,,,,c*5#
+#CHAR=,,,,,C*5#
+#POST=,,,,,.real*5#
+*/
+static PyObject *
+@char@longdoubletype_@name@(PyObject *self)
+{
+ double dval;
+ PyObject *obj, *ret;
+
+ dval = (double)(((Py@CHAR@LongDoubleScalarObject *)self)->obval)@POST@;
+ obj = Py@KIND@_FromDouble(dval);
+ ret = obj->ob_type->tp_as_number->nb_@name@(obj);
+ Py_DECREF(obj);
+ return ret;
+}
+/**end repeat**/
+
+
+static PyObject *gentype_copy(PyObject *, PyObject *);
+
+static PyNumberMethods gentype_as_number = {
+ (binaryfunc)gentype_add, /*nb_add*/
+ (binaryfunc)gentype_subtract, /*nb_subtract*/
+ (binaryfunc)gentype_multiply, /*nb_multiply*/
+ (binaryfunc)gentype_divide, /*nb_divide*/
+ (binaryfunc)gentype_remainder, /*nb_remainder*/
+ (binaryfunc)gentype_divmod, /*nb_divmod*/
+ (ternaryfunc)gentype_power, /*nb_power*/
+ (unaryfunc)gentype_negative,
+ (unaryfunc)gentype_copy, /*nb_pos*/
+ (unaryfunc)gentype_absolute, /*(unaryfunc)gentype_abs,*/
+ (inquiry)gentype_nonzero_number, /*nb_nonzero*/
+ (unaryfunc)gentype_invert, /*nb_invert*/
+ (binaryfunc)gentype_lshift, /*nb_lshift*/
+ (binaryfunc)gentype_rshift, /*nb_rshift*/
+ (binaryfunc)gentype_and, /*nb_and*/
+ (binaryfunc)gentype_xor, /*nb_xor*/
+ (binaryfunc)gentype_or, /*nb_or*/
+ 0, /*nb_coerce*/
+ (unaryfunc)gentype_int, /*nb_int*/
+ (unaryfunc)gentype_long, /*nb_long*/
+ (unaryfunc)gentype_float, /*nb_float*/
+ (unaryfunc)gentype_oct, /*nb_oct*/
+ (unaryfunc)gentype_hex, /*nb_hex*/
+ 0, /*inplace_add*/
+ 0, /*inplace_subtract*/
+ 0, /*inplace_multiply*/
+ 0, /*inplace_divide*/
+ 0, /*inplace_remainder*/
+ 0, /*inplace_power*/
+ 0, /*inplace_lshift*/
+ 0, /*inplace_rshift*/
+ 0, /*inplace_and*/
+ 0, /*inplace_xor*/
+ 0, /*inplace_or*/
+ (binaryfunc)gentype_floor_divide, /*nb_floor_divide*/
+ (binaryfunc)gentype_true_divide, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+
+};
+
+static PyObject *
+gentype_richcompare(PyObject *self, PyObject *other, int cmp_op)
+{
+
+ PyObject *arr, *ret;
+
+ arr = PyArray_FromScalar(self, NULL);
+ if (arr == NULL) return NULL;
+ ret = arr->ob_type->tp_richcompare(arr, other, cmp_op);
+ Py_DECREF(arr);
+ return ret;
+}
+
+static PyObject *
+gentype_ndim_get(PyObject *self)
+{
+ return PyInt_FromLong(0);
+}
+
+static PyObject *
+gentype_flags_get(PyObject *self)
+{
+ static int flags=CONTIGUOUS | OWNDATA | FORTRAN | ALIGNED;
+
+ return PyObject_CallMethod(_scipy_internal, "flagsobj", "Oii",
+ self, flags, 1);
+}
+
+static PyObject *
+voidtype_flags_get(PyVoidScalarObject *self)
+{
+ return PyObject_CallMethod(_scipy_internal, "flagsobj", "Oii",
+ self, self->flags, 1);
+}
+
+static PyObject *
+voidtype_dtypedescr_get(PyVoidScalarObject *self)
+{
+ Py_INCREF(self->descr);
+ return (PyObject *)self->descr;
+}
+
+
+
+static PyObject *
+gentype_shape_get(PyObject *self)
+{
+ return PyTuple_New(0);
+}
+
+/*
+static int
+gentype_shape_set(PyObject *self, PyObject *val)
+{
+ if (!PyTuple_Check(val) || PyTuple_GET_SIZE(val) > 0) {
+ PyErr_SetString(PyExc_ValueError, \
+ "invalid shape for scalar");
+ return -1;
+ }
+ return 0;
+}
+*/
+
+static PyObject *
+gentype_dataptr_get(PyObject *self)
+{
+ return Py_BuildValue("NO",PyString_FromString(""),Py_True);
+}
+
+
+static PyObject *
+gentype_data_get(PyObject *self)
+{
+ PyArray_Descr *typecode;
+ PyObject *ret;
+
+ typecode = PyArray_DescrFromScalar(self);
+ ret = PyBuffer_FromObject(self, 0, typecode->elsize);
+ Py_DECREF(typecode);
+ return ret;
+}
+
+
+static PyObject *
+gentype_itemsize_get(PyObject *self)
+{
+ PyArray_Descr *typecode;
+ PyObject *ret;
+
+ typecode = PyArray_DescrFromScalar(self);
+ ret = PyInt_FromLong((long) typecode->elsize);
+ Py_DECREF(typecode);
+ return ret;
+}
+
+static PyObject *
+gentype_size_get(PyObject *self)
+{
+ return PyInt_FromLong(1);
+}
+
+
+static PyObject *
+gentype_typechar_get(PyObject *self)
+{
+ PyArray_Descr *descr;
+ char type;
+ int elsize;
+
+ descr = PyArray_DescrFromScalar(self);
+ type = descr->type;
+ elsize = descr->elsize;
+ Py_DECREF(descr);
+ if (PyArray_IsScalar(self, Flexible))
+ return PyString_FromFormat("%c%d", (int)type, elsize);
+ else
+ return PyString_FromStringAndSize(&type, 1);
+}
+
+static void
+gentype_struct_free(void *ptr, void *arr)
+{
+ Py_DECREF((PyObject *)arr);
+ _pya_free(ptr);
+}
+
+static PyObject *
+gentype_struct_get(PyObject *self)
+{
+ PyArrayObject *arr;
+ PyArrayInterface *inter;
+
+ arr = (PyArrayObject *)PyArray_FromScalar(self, NULL);
+ inter = (PyArrayInterface *)_pya_malloc(sizeof(PyArrayInterface));
+ inter->version = 2;
+ inter->nd = 0;
+ inter->flags = arr->flags;
+ inter->typekind = arr->descr->kind;
+ inter->itemsize = arr->descr->elsize;
+ inter->strides = NULL;
+ inter->shape = NULL;
+ inter->data = arr->data;
+ return PyCObject_FromVoidPtrAndDesc(inter, arr, gentype_struct_free);
+}
+
+static PyObject *
+gentype_typestr_get(PyObject *self)
+{
+ PyArrayObject *arr;
+ PyObject *ret;
+
+ arr = (PyArrayObject *)PyArray_FromScalar(self, NULL);
+ ret = PyObject_GetAttrString((PyObject *)arr, "dtypestr");
+ Py_DECREF(arr);
+ return ret;
+}
+
+static PyObject *
+gentype_descr_get(PyObject *self)
+{
+ PyArrayObject *arr;
+ PyObject *ret;
+
+ arr = (PyArrayObject *)PyArray_FromScalar(self, NULL);
+ ret = PyObject_GetAttrString((PyObject *)arr, "__array_descr__");
+ Py_DECREF(arr);
+ return ret;
+}
+
+
+static PyObject *
+gentype_type_get(PyObject *self)
+{
+ Py_INCREF(self->ob_type);
+ return (PyObject *)self->ob_type;
+}
+
+static PyObject *
+gentype_typedescr_get(PyObject *self)
+{
+ return (PyObject *)PyArray_DescrFromScalar(self);
+}
+
+
+static PyObject *
+gentype_base_get(PyObject *self)
+{
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+
+static PyArray_Descr *
+_realdescr_fromcomplexscalar(PyObject *self, int *typenum)
+{
+ if PyArray_IsScalar(self, CDouble) {
+ *typenum = PyArray_CDOUBLE;
+ return PyArray_DescrFromType(PyArray_DOUBLE);
+ }
+ if PyArray_IsScalar(self, CFloat) {
+ *typenum = PyArray_CFLOAT;
+ return PyArray_DescrFromType(PyArray_FLOAT);
+ }
+ if PyArray_IsScalar(self, CLongDouble) {
+ *typenum = PyArray_CLONGDOUBLE;
+ return PyArray_DescrFromType(PyArray_LONGDOUBLE);
+ }
+ return NULL;
+}
+
+static PyObject *
+gentype_real_get(PyObject *self)
+{
+ PyArray_Descr *typecode;
+ PyObject *ret;
+ int typenum;
+
+ if (PyArray_IsScalar(self, ComplexFloating)) {
+ typecode = _realdescr_fromcomplexscalar(self, &typenum);
+ ret = PyArray_Scalar(_SOFFSET_(self, typenum), typecode,
+ NULL);
+ Py_DECREF(typecode);
+ return ret;
+ }
+ else if PyArray_IsScalar(self, Object) {
+ PyObject *obj = ((PyObjectScalarObject *)self)->obval;
+ ret = PyObject_GetAttrString(obj, "real");
+ if (ret != NULL) return ret;
+ PyErr_Clear();
+ }
+ Py_INCREF(self);
+ return (PyObject *)self;
+}
+
+static PyObject *
+gentype_imag_get(PyObject *self)
+{
+ PyArray_Descr *typecode;
+ PyObject *ret;
+ int typenum;
+
+ typecode = _realdescr_fromcomplexscalar(self, &typenum);
+ if (PyArray_IsScalar(self, ComplexFloating)) {
+ ret = PyArray_Scalar(_SOFFSET_(self, typenum) \
+ + typecode->elsize, typecode, NULL);
+ }
+ else if PyArray_IsScalar(self, Object) {
+ PyObject *obj = ((PyObjectScalarObject *)self)->obval;
+ PyArray_Descr *newtype;
+ ret = PyObject_GetAttrString(obj, "imag");
+ if (ret == NULL) {
+ PyErr_Clear();
+ obj = PyInt_FromLong(0);
+ newtype = PyArray_DescrFromType(PyArray_OBJECT);
+ ret = PyArray_Scalar((char *)&obj, newtype, NULL);
+ Py_DECREF(newtype);
+ Py_DECREF(obj);
+ }
+ }
+ else {
+ char *temp;
+ temp = PyDataMem_NEW(typecode->elsize);
+ memset(temp, '\0', typecode->elsize);
+ ret = PyArray_Scalar(temp, typecode, NULL);
+ PyDataMem_FREE(temp);
+ }
+
+ Py_DECREF(typecode);
+ return ret;
+}
+
+static PyObject *
+gentype_flat_get(PyObject *self)
+{
+ PyObject *ret, *arr;
+
+ arr = PyArray_FromScalar(self, NULL);
+ if (arr == NULL) return NULL;
+ ret = PyArray_IterNew(arr);
+ Py_DECREF(arr);
+ return ret;
+}
+
+static PyGetSetDef gentype_getsets[] = {
+ {"ndim",
+ (getter)gentype_ndim_get,
+ (setter) 0,
+ "number of array dimensions"},
+ {"flags",
+ (getter)gentype_flags_get,
+ (setter)0,
+ "integer value of flags"},
+ {"shape",
+ (getter)gentype_shape_get,
+ (setter)0,
+ "tuple of array dimensions"},
+ {"strides",
+ (getter)gentype_shape_get,
+ (setter) 0,
+ "tuple of bytes steps in each dimension"},
+ {"data",
+ (getter)gentype_data_get,
+ (setter) 0,
+ "pointer to start of data"},
+ {"itemsize",
+ (getter)gentype_itemsize_get,
+ (setter)0,
+ "length of one element in bytes"},
+ {"size",
+ (getter)gentype_size_get,
+ (setter)0,
+ "number of elements in the gentype"},
+ {"nbytes",
+ (getter)gentype_itemsize_get,
+ (setter)0,
+ "length of item in bytes"},
+ {"base",
+ (getter)gentype_base_get,
+ (setter)0,
+ "base object"},
+ {"dtype",
+ (getter)gentype_type_get,
+ (setter)0,
+ "get gentype type class"},
+ {"dtypechar",
+ (getter)gentype_typechar_get,
+ (setter)0,
+ "get gentype type character code"},
+ {"dtypestr",
+ (getter)gentype_typestr_get,
+ NULL,
+ "get array type string"},
+ {"dtypedescr",
+ (getter)gentype_typedescr_get,
+ NULL,
+ "get array data-descriptor"},
+ {"real",
+ (getter)gentype_real_get,
+ (setter)0,
+ "real part of scalar"},
+ {"imag",
+ (getter)gentype_imag_get,
+ (setter)0,
+ "imaginary part of scalar"},
+ {"flat",
+ (getter)gentype_flat_get,
+ (setter)0,
+ "a 1-d view of scalar"},
+ {"__array_data__",
+ (getter)gentype_dataptr_get,
+ NULL,
+ "Array protocol: data"},
+ {"__array_typestr__",
+ (getter)gentype_typestr_get,
+ NULL,
+ "Array protocol: typestr"},
+ {"__array_descr__",
+ (getter)gentype_descr_get,
+ NULL,
+ "Array protocol: descr"},
+ {"__array_shape__",
+ (getter)gentype_shape_get,
+ NULL,
+ "Array protocol: shape"},
+ {"__array_strides__",
+ (getter)gentype_shape_get,
+ NULL,
+ "Array protocol: strides"},
+ {"__array_struct__",
+ (getter)gentype_struct_get,
+ NULL,
+ "Array protocol: struct"},
+ /* Does not have __array_priority__ because it is not a subtype.
+ */
+ {NULL, NULL, NULL, NULL} /* Sentinel */
+};
+
+
+/* 0-dim array from scalar object */
+
+static char doc_getarray[] = "sc.__array__(|type) return 0-dim array";
+
+static PyObject *
+gentype_getarray(PyObject *scalar, PyObject *args)
+{
+ PyArray_Descr *outcode=NULL;
+ PyObject *ret;
+
+ if (!PyArg_ParseTuple(args, "|O&", &PyArray_DescrConverter,
+ &outcode)) return NULL;
+ ret = PyArray_FromScalar(scalar, outcode);
+ return ret;
+}
+
+static char doc_sc_wraparray[] = "sc.__array_wrap__(obj) return scalar from array";
+
+static PyObject *
+gentype_wraparray(PyObject *scalar, PyObject *args)
+{
+ PyObject *arr;
+
+ if (PyTuple_Size(args) < 1) {
+ PyErr_SetString(PyExc_TypeError,
+ "only accepts 1 argument.");
+ return NULL;
+ }
+ arr = PyTuple_GET_ITEM(args, 0);
+ if (!PyArray_Check(arr)) {
+ PyErr_SetString(PyExc_TypeError,
+ "can only be called with ndarray object");
+ return NULL;
+ }
+
+ return PyArray_Scalar(PyArray_DATA(arr), PyArray_DESCR(arr), arr);
+}
+
+
+/**begin repeat
+
+#name=tolist, item, tostring, astype, copy, resize, __deepcopy__, choose, searchsorted, argmax, argmin, reshape, view, swapaxes, max, min, ptp, conj, conjugate, nonzero, all, any, flatten, ravel, fill, transpose, newbyteorder#
+*/
+
+static PyObject *
+gentype_@name@(PyObject *self, PyObject *args)
+{
+ return gentype_generic_method(self, args, NULL, "@name@");
+}
+/**end repeat**/
+
+static PyObject *
+gentype_squeeze(PyObject *self, PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+ Py_INCREF(self);
+ return self;
+}
+
+static int
+gentype_getreadbuf(PyObject *, int, void **);
+
+static PyObject *
+gentype_byteswap(PyObject *self, PyObject *args)
+{
+ Bool inplace=FALSE;
+
+ if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace))
+ return NULL;
+
+ if (inplace) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot byteswap a scalar in-place");
+ return NULL;
+ }
+ else {
+ /* get the data, copyswap it and pass it to a new Array scalar
+ */
+ char *data;
+ int numbytes;
+ PyArray_Descr *descr;
+ PyObject *new;
+ char *newmem;
+
+ numbytes = gentype_getreadbuf(self, 0, (void **)&data);
+ descr = PyArray_DescrFromScalar(self);
+ newmem = _pya_malloc(descr->elsize);
+ if (newmem == NULL) {Py_DECREF(descr); return PyErr_NoMemory();}
+ else memcpy(newmem, data, descr->elsize);
+ descr->f->copyswap(newmem, NULL, 1, descr->elsize);
+ new = PyArray_Scalar(newmem, descr, NULL);
+ _pya_free(newmem);
+ Py_DECREF(descr);
+ return new;
+ }
+}
+
+
+/**begin repeat
+
+#name=take, getfield, put, putmask, repeat, tofile, mean, trace, diagonal, clip, std, var, sum, cumsum, prod, cumprod, compress, sort, argsort#
+*/
+
+static PyObject *
+gentype_@name@(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ return gentype_generic_method(self, args, kwds, "@name@");
+}
+/**end repeat**/
+
+static PyObject *
+voidtype_getfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *ret;
+
+ ret = gentype_generic_method((PyObject *)self, args, kwds, "getfield");
+ if (!ret) return ret;
+ if (PyArray_IsScalar(ret, Generic) && \
+ (!PyArray_IsScalar(ret, Void))) {
+ PyArray_Descr *new;
+ if (!PyArray_ISNBO(self->descr->byteorder)) {
+ new = PyArray_DescrFromScalar(ret);
+ new->f->copyswap(_SOFFSET_(ret,
+ new->type_num),
+ NULL, 1, new->elsize);
+ Py_DECREF(new);
+ }
+ }
+ return ret;
+}
+
+static PyObject *
+gentype_setfield(PyObject *self, PyObject *args, PyObject *kwds)
+{
+
+ PyErr_SetString(PyExc_TypeError,
+ "Can't set fields in a non-void array scalar.");
+ return NULL;
+}
+
+static PyObject *
+voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds)
+{
+ PyArray_Descr *typecode;
+ int offset = 0;
+ PyObject *value, *src;
+ int mysize;
+ char *dptr;
+ static char *kwlist[] = {"value", "dtype", "offset", 0};
+
+ if ((self->flags & WRITEABLE) != WRITEABLE) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Can't write to memory");
+ return NULL;
+ }
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|i", kwlist,
+ &value,
+ PyArray_DescrConverter,
+ &typecode, &offset)) return NULL;
+
+ mysize = self->ob_size;
+
+ if (offset < 0 || (offset + typecode->elsize) > mysize) {
+ PyErr_Format(PyExc_ValueError,
+ "Need 0 <= offset <= %d for requested type " \
+ "but received offset = %d",
+ mysize-typecode->elsize, offset);
+ Py_DECREF(typecode);
+ return NULL;
+ }
+
+ dptr = self->obval + offset;
+
+ /* Copy data from value to correct place in dptr */
+ src = PyArray_FromAny(value, typecode, 0, 0, CARRAY_FLAGS);
+ if (src == NULL) return NULL;
+ typecode->f->copyswap(dptr, PyArray_DATA(src),
+ !PyArray_ISNBO(self->descr->byteorder),
+ PyArray_ITEMSIZE(src));
+ Py_DECREF(src);
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+
+static PyObject *
+gentype_reduce(PyObject *self, PyObject *args)
+{
+ PyObject *ret=NULL, *obj=NULL, *mod=NULL;
+ const char *buffer;
+ int buflen;
+
+ /* Return a tuple of (callable object, arguments) */
+
+ ret = PyTuple_New(2);
+ if (ret == NULL) return NULL;
+ if (PyObject_AsReadBuffer(self, (const void **)&buffer, &buflen)<0) {
+ Py_DECREF(ret); return NULL;
+ }
+ mod = PyImport_ImportModule("scipy.base.multiarray");
+ if (mod == NULL) return NULL;
+ obj = PyObject_GetAttrString(mod, "scalar");
+ Py_DECREF(mod);
+ if (obj == NULL) return NULL;
+ PyTuple_SET_ITEM(ret, 0, obj);
+ obj = PyObject_GetAttrString((PyObject *)self, "dtypedescr");
+ if PyArray_IsScalar(self, Object) {
+ mod = ((PyObjectScalarObject *)self)->obval;
+ PyTuple_SET_ITEM(ret, 1,
+ Py_BuildValue("NO", obj, mod));
+ }
+ else {
+ mod = PyString_FromStringAndSize(buffer, buflen);
+ PyTuple_SET_ITEM(ret, 1,
+ Py_BuildValue("NN", obj, mod));
+ }
+ return ret;
+}
+
+/* ignores everything */
+static PyObject *
+gentype_setstate(PyObject *self, PyObject *args)
+{
+ Py_INCREF(Py_None);
+ return (Py_None);
+}
+
+static PyObject *
+gentype_dump(PyObject *self, PyObject *args)
+{
+ PyObject *file=NULL;
+ int ret;
+
+ if (!PyArg_ParseTuple(args, "O", &file))
+ return NULL;
+ ret = PyArray_Dump(self, file, 2);
+ if (ret < 0) return NULL;
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *
+gentype_dumps(PyObject *self, PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, ""))
+ return NULL;
+ return PyArray_Dumps(self, 2);
+}
+
+
+/* setting flags cannot be done for scalars */
+static PyObject *
+gentype_setflags(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+
+/* need to fill in doc-strings for these methods on import -- copy from
+ array docstrings
+*/
+static PyMethodDef gentype_methods[] = {
+ {"tolist", (PyCFunction)gentype_tolist, 1, NULL},
+ {"item", (PyCFunction)gentype_item, METH_VARARGS, NULL},
+ {"tofile", (PyCFunction)gentype_tofile,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"tostring", (PyCFunction)gentype_tostring, METH_VARARGS, NULL},
+ {"byteswap", (PyCFunction)gentype_byteswap,1, NULL},
+ {"astype", (PyCFunction)gentype_astype, 1, NULL},
+ {"getfield", (PyCFunction)gentype_getfield,
+ METH_VARARGS | METH_KEYWORDS, NULL},
+ {"setfield", (PyCFunction)gentype_setfield,
+ METH_VARARGS | METH_KEYWORDS, NULL},
+ {"copy", (PyCFunction)gentype_copy, 1, NULL},
+ {"resize", (PyCFunction)gentype_resize, 1, NULL},
+
+ {"__array__", (PyCFunction)gentype_getarray, 1, doc_getarray},
+ {"__array_wrap__", (PyCFunction)gentype_wraparray, 1, doc_sc_wraparray},
+
+ /* for the copy module */
+ {"__copy__", (PyCFunction)gentype_copy, 1, NULL},
+ {"__deepcopy__", (PyCFunction)gentype___deepcopy__, 1, NULL},
+
+
+ {"__reduce__", (PyCFunction) gentype_reduce, 1, NULL},
+ /* For consistency does nothing */
+ {"__setstate__", (PyCFunction) gentype_setstate, 1, NULL},
+
+ {"dumps", (PyCFunction) gentype_dumps, 1, NULL},
+ {"dump", (PyCFunction) gentype_dump, 1, NULL},
+
+ /* Methods for array */
+ {"fill", (PyCFunction)gentype_fill,
+ METH_VARARGS, NULL},
+ {"transpose", (PyCFunction)gentype_transpose,
+ METH_VARARGS, NULL},
+ {"take", (PyCFunction)gentype_take,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"put", (PyCFunction)gentype_put,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"putmask", (PyCFunction)gentype_putmask,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"repeat", (PyCFunction)gentype_repeat,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"choose", (PyCFunction)gentype_choose,
+ METH_VARARGS, NULL},
+ {"sort", (PyCFunction)gentype_sort,
+ METH_VARARGS, NULL},
+ {"argsort", (PyCFunction)gentype_argsort,
+ METH_VARARGS, NULL},
+ {"searchsorted", (PyCFunction)gentype_searchsorted,
+ METH_VARARGS, NULL},
+ {"argmax", (PyCFunction)gentype_argmax,
+ METH_VARARGS, NULL},
+ {"argmin", (PyCFunction)gentype_argmin,
+ METH_VARARGS, NULL},
+ {"reshape", (PyCFunction)gentype_reshape,
+ METH_VARARGS, NULL},
+ {"squeeze", (PyCFunction)gentype_squeeze,
+ METH_VARARGS, NULL},
+ {"view", (PyCFunction)gentype_view,
+ METH_VARARGS, NULL},
+ {"swapaxes", (PyCFunction)gentype_swapaxes,
+ METH_VARARGS, NULL},
+ {"max", (PyCFunction)gentype_max,
+ METH_VARARGS, NULL},
+ {"min", (PyCFunction)gentype_min,
+ METH_VARARGS, NULL},
+ {"ptp", (PyCFunction)gentype_ptp,
+ METH_VARARGS, NULL},
+ {"mean", (PyCFunction)gentype_mean,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"trace", (PyCFunction)gentype_trace,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"diagonal", (PyCFunction)gentype_diagonal,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"clip", (PyCFunction)gentype_clip,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"conj", (PyCFunction)gentype_conj,
+ METH_VARARGS, NULL},
+ {"conjugate", (PyCFunction)gentype_conjugate,
+ METH_VARARGS, NULL},
+ {"nonzero", (PyCFunction)gentype_nonzero,
+ METH_VARARGS, NULL},
+ {"std", (PyCFunction)gentype_std,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"var", (PyCFunction)gentype_var,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"sum", (PyCFunction)gentype_sum,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"cumsum", (PyCFunction)gentype_cumsum,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"prod", (PyCFunction)gentype_prod,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"cumprod", (PyCFunction)gentype_cumprod,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"all", (PyCFunction)gentype_all,
+ METH_VARARGS, NULL},
+ {"any", (PyCFunction)gentype_any,
+ METH_VARARGS, NULL},
+ {"compress", (PyCFunction)gentype_compress,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"flatten", (PyCFunction)gentype_flatten,
+ METH_VARARGS, NULL},
+ {"ravel", (PyCFunction)gentype_ravel,
+ METH_VARARGS, NULL},
+ {"setflags", (PyCFunction)gentype_setflags,
+ METH_VARARGS|METH_KEYWORDS, NULL},
+ {"newbyteorder", (PyCFunction)gentype_newbyteorder,
+ METH_VARARGS, NULL},
+ {NULL, NULL} /* sentinel */
+};
+
+
+static PyGetSetDef voidtype_getsets[] = {
+ {"flags",
+ (getter)voidtype_flags_get,
+ (setter)0,
+ "integer value of flags"},
+ {"dtypedescr",
+ (getter)voidtype_dtypedescr_get,
+ (setter)0,
+ "dtypedescr object"},
+ {NULL, NULL}
+};
+
+static PyMethodDef voidtype_methods[] = {
+ {"getfield", (PyCFunction)voidtype_getfield,
+ METH_VARARGS | METH_KEYWORDS, NULL},
+ {"setfield", (PyCFunction)voidtype_setfield,
+ METH_VARARGS | METH_KEYWORDS, NULL},
+ {NULL, NULL}
+};
+
+/************* As_mapping functions for void array scalar ************/
+
+
+static int
+voidtype_length(PyVoidScalarObject *self)
+{
+ if (!self->descr->fields || self->descr->fields == Py_None) {
+ return 0;
+ }
+ else { /* return the number of fields */
+ PyObject *key;
+ PyObject *flist;
+ key = PyInt_FromLong(-1);
+ flist = PyDict_GetItem(self->descr->fields, key);
+ Py_DECREF(key);
+ if (!flist) return 0;
+ return PyList_GET_SIZE(flist);
+ }
+}
+
+/* get field by name or number */
+static PyObject *
+voidtype_subscript(PyVoidScalarObject *self, PyObject *ind)
+{
+ int n, m;
+ char *msg = "invalid index";
+ PyObject *flist=NULL, *key, *fieldinfo;
+
+ if (!self->descr->fields || self->descr->fields == Py_None) {
+ PyErr_SetString(PyExc_IndexError,
+ "can't index void scalar without fields");
+ return NULL;
+ }
+
+ if (PyString_Check(ind) || PyUnicode_Check(ind)) {
+ /* look up in fields */
+ fieldinfo = PyDict_GetItem(self->descr->fields, ind);
+ if (!fieldinfo) {
+ PyErr_SetString(PyExc_IndexError, msg);
+ return NULL;
+ }
+ return voidtype_getfield(self, fieldinfo, NULL);
+ }
+
+ /* try to convert it to a number */
+ n = PyArray_PyIntAsIntp(ind);
+ if (error_converting(n)) {
+ PyErr_Clear();
+ goto fail;
+ }
+ key = PyInt_FromLong(-1);
+ flist = PyDict_GetItem(self->descr->fields, key);
+ Py_DECREF(key);
+ if (!flist) m = 0;
+ m = PyList_GET_SIZE(flist);
+ if (n < 0) n += m;
+ if (n < 0 || n >= m) goto fail;
+ fieldinfo = PyDict_GetItem(self->descr->fields,
+ PyList_GET_ITEM(flist, n));
+ return voidtype_getfield(self, fieldinfo, NULL);
+
+ fail:
+ PyErr_SetString(PyExc_IndexError, msg);
+ return NULL;
+}
+
+static int
+voidtype_ass_subscript(PyVoidScalarObject *self, PyObject *ind, PyObject *val)
+{
+ int n, m;
+ char *msg = "invalid index";
+ PyObject *flist=NULL, *key, *fieldinfo, *newtup;
+ PyObject *res;
+
+ if (!self->descr->fields || self->descr->fields == Py_None) {
+ PyErr_SetString(PyExc_IndexError,
+ "can't index void scalar without fields");
+ return -1;
+ }
+
+ if (PyString_Check(ind) || PyUnicode_Check(ind)) {
+ /* look up in fields */
+ fieldinfo = PyDict_GetItem(self->descr->fields, ind);
+ if (!fieldinfo) {
+ PyErr_SetString(PyExc_IndexError, msg);
+ return -1;
+ }
+ newtup = Py_BuildValue("(OOO)", val,
+ PyTuple_GET_ITEM(fieldinfo, 0),
+ PyTuple_GET_ITEM(fieldinfo, 1));
+ res = voidtype_setfield(self, newtup, NULL);
+ Py_DECREF(newtup);
+ if (!res) return -1;
+ Py_DECREF(res);
+ return 0;
+ }
+
+ /* try to convert it to a number */
+ n = PyArray_PyIntAsIntp(ind);
+ if (error_converting(n)) {
+ PyErr_Clear();
+ goto fail;
+ }
+ key = PyInt_FromLong(-1);
+ flist = PyDict_GetItem(self->descr->fields, key);
+ Py_DECREF(key);
+ if (!flist) m = 0;
+ m = PyList_GET_SIZE(flist);
+ if (n < 0) n += m;
+ if (n < 0 || n >= m) goto fail;
+ fieldinfo = PyDict_GetItem(self->descr->fields,
+ PyList_GET_ITEM(flist, n));
+ newtup = Py_BuildValue("(OOO)", val,
+ PyTuple_GET_ITEM(fieldinfo, 0),
+ PyTuple_GET_ITEM(fieldinfo, 1));
+ res = voidtype_setfield(self, fieldinfo, NULL);
+ Py_DECREF(newtup);
+ if (!res) return -1;
+ Py_DECREF(res);
+ return 0;
+
+ fail:
+ PyErr_SetString(PyExc_IndexError, msg);
+ return -1;
+}
+
+static PyMappingMethods voidtype_as_mapping = {
+ (inquiry)voidtype_length, /*mp_length*/
+ (binaryfunc)voidtype_subscript, /*mp_subscript*/
+ (objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/
+};
+
+
+static int
+gentype_getreadbuf(PyObject *self, int segment, void **ptrptr)
+{
+ int numbytes;
+ PyArray_Descr *outcode;
+
+ if (segment != 0) {
+ PyErr_SetString(PyExc_SystemError,
+ "Accessing non-existent array segment");
+ return -1;
+ }
+
+ outcode = PyArray_DescrFromScalar(self);
+ numbytes = outcode->elsize;
+ if PyArray_IsScalar(self, Flexible) {
+ if PyArray_IsScalar(self, String)
+ *ptrptr = PyString_AS_STRING(self);
+ else if PyArray_IsScalar(self, Unicode)
+ *ptrptr = (char *)PyUnicode_AS_DATA(self);
+ else if PyArray_IsScalar(self, Void)
+ *ptrptr = ((PyVoidScalarObject *)self)->obval;
+ }
+ else
+ *ptrptr = (void *)_SOFFSET_(self, outcode->type_num);
+
+ Py_DECREF(outcode);
+ return numbytes;
+}
+
+static int
+gentype_getsegcount(PyObject *self, int *lenp)
+{
+ PyArray_Descr *outcode;
+
+ outcode = PyArray_DescrFromScalar(self);
+ if (lenp)
+ *lenp = outcode->elsize;
+ Py_DECREF(outcode);
+ return 1;
+}
+
+static int
+gentype_getcharbuf(PyObject *self, int segment, const char **ptrptr)
+{
+ if (PyArray_IsScalar(self, String) || \
+ PyArray_IsScalar(self, Unicode))
+ return gentype_getreadbuf(self, segment, (void **)ptrptr);
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "Non-character array cannot be interpreted "\
+ "as character buffer.");
+ return -1;
+ }
+}
+
+
+static PyBufferProcs gentype_as_buffer = {
+ (getreadbufferproc)gentype_getreadbuf, /*bf_getreadbuffer*/
+ (getwritebufferproc)0, /*bf_getwritebuffer*/
+ (getsegcountproc)gentype_getsegcount, /*bf_getsegcount*/
+ (getcharbufferproc)gentype_getcharbuf, /*bf_getcharbuffer*/
+};
+
+
+#define BASEFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES
+#define LEAFFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES
+
+static PyTypeObject PyGenericArrType_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "generic_arrtype", /*tp_name*/
+ sizeof(PyObject), /*tp_basicsize*/
+};
+
+static void
+unicode_dealloc(PyObject *v)
+{
+ PyDataMem_FREE(((PyVoidScalarObject *)v)->obval);
+ v->ob_type->tp_free(v);
+}
+
+static void
+void_dealloc(PyVoidScalarObject *v)
+{
+ if (v->flags & OWNDATA)
+ PyDataMem_FREE(v->obval);
+ Py_XDECREF(v->descr);
+ Py_XDECREF(v->base);
+ v->ob_type->tp_free(v);
+}
+
+static void
+object_arrtype_dealloc(PyObject *v)
+{
+ Py_XDECREF(((PyObjectScalarObject *)v)->obval);
+ v->ob_type->tp_free(v);
+}
+
+/* string and unicode inherit from Python Type first and so GET_ITEM is different to
+ get to the Python Type.
+ */
+
+/**begin repeat
+#name=byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, string, unicode, object#
+#TYPE=BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, OBJECT#
+#num=1*16,0,0,1#
+*/
+static PyObject *
+@name@_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyObject *obj=NULL;
+ PyObject *arr;
+ PyArray_Descr *typecode;
+
+ if (type->tp_bases && (PyTuple_GET_SIZE(type->tp_bases)==2)) {
+ PyTypeObject *sup;
+ PyObject *ret;
+ /* We are inheriting from a Python type as well so
+ give it first dibs on conversion */
+ sup = (PyTypeObject *)PyTuple_GET_ITEM(type->tp_bases, @num@);
+ ret = sup->tp_new(type, args, kwds);
+ if (ret) return ret;
+ PyErr_Clear();
+ /* now do default conversion */
+ }
+
+ if (!PyArg_ParseTuple(args, "O", &obj)) return NULL;
+
+ typecode = PyArray_DescrFromType(PyArray_@TYPE@);
+ arr = PyArray_FromAny(obj, typecode, 0, 0, FORCECAST);
+ return PyArray_Return((PyArrayObject *)arr);
+}
+/**end repeat**/
+
+/* bool->tp_new only returns Py_True or Py_False */
+static PyObject *
+bool_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyObject *obj=NULL;
+ PyObject *arr;
+
+ if (!PyArg_ParseTuple(args, "O", &obj)) return NULL;
+
+ arr = PyArray_FROM_OTF(obj, PyArray_BOOL, FORCECAST);
+ return PyArray_Return((PyArrayObject *)arr);
+}
+
+static PyObject *
+void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyObject *obj, *arr;
+ ulonglong memu=1;
+ PyObject *new=NULL;
+ char *destptr;
+
+ if (!PyArg_ParseTuple(args, "O", &obj)) return NULL;
+ /* For a VOID scalar first see if obj is an integer or long
+ and create new memory of that size (filled with 0) for the scalar
+ */
+
+ if (PyLong_Check(obj) || PyInt_Check(obj) || \
+ PyArray_IsScalar(obj, Integer) ||
+ (PyArray_Check(obj) && PyArray_NDIM(obj)==0 && \
+ PyArray_ISINTEGER(obj))) {
+ new = obj->ob_type->tp_as_number->nb_long(obj);
+ }
+ if (new && PyLong_Check(new)) {
+ PyObject *ret;
+ memu = PyLong_AsUnsignedLongLong(new);
+ Py_DECREF(new);
+ if (PyErr_Occurred() || (memu > MAX_INT)) {
+ PyErr_Clear();
+ PyErr_Format(PyExc_OverflowError,
+ "size must be smaller than %d",
+ (int) MAX_INT);
+ return NULL;
+ }
+ destptr = PyDataMem_NEW((int) memu);
+ if (destptr == NULL) return PyErr_NoMemory();
+ ret = type->tp_alloc(type, 0);
+ if (ret == NULL) {
+ PyDataMem_FREE(destptr);
+ return PyErr_NoMemory();
+ }
+ ((PyVoidScalarObject *)ret)->obval = destptr;
+ ((PyVoidScalarObject *)ret)->ob_size = (int) memu;
+ ((PyVoidScalarObject *)ret)->descr = \
+ PyArray_DescrNewFromType(PyArray_VOID);
+ ((PyVoidScalarObject *)ret)->descr->elsize = (int) memu;
+ ((PyVoidScalarObject *)ret)->flags = BEHAVED_FLAGS | OWNDATA;
+ ((PyVoidScalarObject *)ret)->base = NULL;
+ memset(destptr, '\0', (size_t) memu);
+ return ret;
+ }
+
+ arr = PyArray_FROM_OTF(obj, PyArray_VOID, FORCECAST);
+ return PyArray_Return((PyArrayObject *)arr);
+}
+
+
+/**************** Define Hash functions ********************/
+
+/**begin repeat
+#lname=bool,ubyte,ushort#
+#name=Bool,UByte, UShort#
+ */
+static long
+@lname@_arrtype_hash(PyObject *obj)
+{
+ return (long)(((Py@name@ScalarObject *)obj)->obval);
+}
+/**end repeat**/
+
+/**begin repeat
+#lname=byte,short,uint,ulong#
+#name=Byte,Short,UInt,ULong#
+ */
+static long
+@lname@_arrtype_hash(PyObject *obj)
+{
+ long x = (long)(((Py@name@ScalarObject *)obj)->obval);
+ if (x == -1) x=-2;
+ return x;
+}
+/**end repeat**/
+
+#if SIZEOF_INT != SIZEOF_LONG
+static long
+int_arrtype_hash(PyObject *obj)
+{
+ long x = (long)(((PyIntScalarObject *)obj)->obval);
+ if (x == -1) x=-2;
+ return x;
+}
+#endif
+
+/**begin repeat
+#char=,u#
+#Char=,U#
+#ext=&& (x >= LONG_MIN),#
+*/
+#if SIZEOF_LONG != SIZEOF_LONGLONG
+/* we assume SIZEOF_LONGLONG=2*SIZEOF_LONG */
+static long
+@char@longlong_arrtype_hash(PyObject *obj)
+{
+ long y;
+ @char@longlong x = (((Py@Char@LongLongScalarObject *)obj)->obval);
+
+ if ((x <= LONG_MAX)@ext@) {
+ y = (long) x;
+ }
+ else {
+ union Mask {
+ long hashvals[2];
+ @char@longlong v;
+ } both;
+
+ both.v = x;
+ y = both.hashvals[0] + (1000003)*both.hashvals[1];
+ }
+ if (y == -1) y = -2;
+ return y;
+}
+#endif
+/**end repeat**/
+
+#if SIZEOF_LONG==SIZEOF_LONGLONG
+static long
+ulonglong_arrtype_hash(PyObject *obj)
+{
+ long x = (long)(((PyULongLongScalarObject *)obj)->obval);
+ if (x == -1) x=-2;
+ return x;
+}
+#endif
+
+
+
+/* Wrong thing to do for longdouble, but....*/
+/**begin repeat
+#lname=float, longdouble#
+#name=Float, LongDouble#
+ */
+static long
+@lname@_arrtype_hash(PyObject *obj)
+{
+ return _Py_HashDouble((double) ((Py@name@ScalarObject *)obj)->obval);
+}
+
+/* borrowed from complex_hash */
+static long
+c@lname@_arrtype_hash(PyObject *obj)
+{
+ long hashreal, hashimag, combined;
+ hashreal = _Py_HashDouble((double) \
+ (((PyC@name@ScalarObject *)obj)->obval).real);
+
+ if (hashreal == -1) return -1;
+ hashimag = _Py_HashDouble((double) \
+ (((PyC@name@ScalarObject *)obj)->obval).imag);
+ if (hashimag == -1) return -1;
+
+ combined = hashreal + 1000003 * hashimag;
+ if (combined == -1) combined = -2;
+ return combined;
+}
+/**end repeat**/
+
+static long
+object_arrtype_hash(PyObject *obj)
+{
+ return PyObject_Hash(((PyObjectScalarObject *)obj)->obval);
+}
+
+/* just hash the pointer */
+static long
+void_arrtype_hash(PyObject *obj)
+{
+ return _Py_HashPointer((void *)(((PyVoidScalarObject *)obj)->obval));
+}
+
+
+/*object arrtype getattro and setattro */
+static PyObject *
+object_getattro(PyObjectScalarObject *obj, PyObject *attr) {
+ PyObject *res;
+ /* first look in generic type and then hand off to actual object */
+
+ res = PyObject_GenericGetAttr((PyObject *)obj, attr);
+ if (res) return res;
+ PyErr_Clear();
+ return PyObject_GenericGetAttr(obj->obval, attr);
+}
+
+static int
+object_setattro(PyObjectScalarObject *obj, PyObject *attr, PyObject *val) {
+ int res;
+ /* first look in generic type and then hand off to actual object */
+
+ res = PyObject_GenericSetAttr((PyObject *)obj, attr, val);
+ if (res >= 0) return res;
+ PyErr_Clear();
+ return PyObject_GenericSetAttr(obj->obval, attr, val);
+}
+
+
+/**begin repeat
+#name=bool, string, unicode, void, object#
+#NAME=Bool, String, Unicode, Void, Object#
+*/
+static PyTypeObject Py@NAME@ArrType_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "@name@_arrtype", /*tp_name*/
+ sizeof(Py@NAME@ScalarObject), /*tp_basicsize*/
+};
+/**end repeat**/
+
+/**begin repeat
+#NAME=Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble#
+#name=int*5, uint*5, float*3, complex*3#
+#CNAME=(CHAR, SHORT, INT, LONG, LONGLONG)*2, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE#
+*/
+static PyTypeObject Py@NAME@ArrType_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "@name@" STRBITSOF_@CNAME@ "_arrtype", /*tp_name*/
+ sizeof(Py@NAME@ScalarObject), /*tp_basicsize*/
+};
+
+/**end repeat**/
+
+
+static PyNumberMethods longdoubletype_as_number;
+static PyNumberMethods clongdoubletype_as_number;
+
+
+static void
+initialize_numeric_types(void)
+{
+ PyGenericArrType_Type.tp_dealloc = (destructor)gentype_dealloc;
+ PyGenericArrType_Type.tp_as_number = &gentype_as_number;
+ PyGenericArrType_Type.tp_as_buffer = &gentype_as_buffer;
+ PyGenericArrType_Type.tp_flags = BASEFLAGS;
+ PyGenericArrType_Type.tp_methods = gentype_methods;
+ PyGenericArrType_Type.tp_getset = gentype_getsets;
+ PyGenericArrType_Type.tp_new = NULL;
+ PyGenericArrType_Type.tp_alloc = gentype_alloc;
+ PyGenericArrType_Type.tp_free = _pya_free;
+ PyGenericArrType_Type.tp_repr = gentype_repr;
+ PyGenericArrType_Type.tp_str = gentype_str;
+ PyGenericArrType_Type.tp_richcompare = gentype_richcompare;
+
+ PyStringArrType_Type.tp_alloc = NULL;
+ PyStringArrType_Type.tp_free = NULL;
+
+ PyVoidArrType_Type.tp_methods = voidtype_methods;
+ PyVoidArrType_Type.tp_getset = voidtype_getsets;
+ PyVoidArrType_Type.tp_as_mapping = &voidtype_as_mapping;
+
+ /**begin repeat
+#NAME=Numeric, Integer, SignedInteger, UnsignedInteger, Inexact, Floating,
+ComplexFloating, Flexible, Character#
+ */
+ Py@NAME@ArrType_Type.tp_flags = BASEFLAGS;
+ /**end repeat**/
+
+ /**begin repeat
+#name=bool, byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, string, unicode, void, object#
+#NAME=Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble, String, Unicode, Void, Object#
+ */
+ Py@NAME@ArrType_Type.tp_flags = LEAFFLAGS;
+ Py@NAME@ArrType_Type.tp_new = @name@_arrtype_new;
+ Py@NAME@ArrType_Type.tp_richcompare = gentype_richcompare;
+ /**end repeat**/
+ /* Allow the Void type to be subclassed -- for adding new types */
+ PyVoidArrType_Type.tp_flags = BASEFLAGS;
+
+ /**begin repeat
+#name=bool, byte, short, ubyte, ushort, uint, ulong, ulonglong, float, longdouble, cfloat, clongdouble, void, object#
+#NAME=Bool, Byte, Short, UByte, UShort, UInt, ULong, ULongLong, Float, LongDouble, CFloat, CLongDouble, Void, Object#
+ */
+ Py@NAME@ArrType_Type.tp_hash = @name@_arrtype_hash;
+ /**end repeat**/
+
+#if SIZEOF_INT != SIZEOF_LONG
+ /* We won't be inheriting from Python Int type. */
+ PyIntArrType_Type.tp_hash = int_arrtype_hash;
+#endif
+
+#if SIZEOF_LONG != SIZEOF_LONGLONG
+ /* We won't be inheriting from Python Int type. */
+ PyLongLongArrType_Type.tp_hash = longlong_arrtype_hash;
+#endif
+
+ /* These need to be coded specially because getitem does not
+ return a normal Python type
+ */
+ PyLongDoubleArrType_Type.tp_as_number = &longdoubletype_as_number;
+ PyCLongDoubleArrType_Type.tp_as_number = &clongdoubletype_as_number;
+
+ /**begin repeat
+#name=int, long, hex, oct, float, repr, str#
+#kind=tp_as_number->nb*5, tp*2#
+ */
+ PyLongDoubleArrType_Type.@kind@_@name@ = longdoubletype_@name@;
+ PyCLongDoubleArrType_Type.@kind@_@name@ = clongdoubletype_@name@;
+ /**end repeat**/
+
+ PyStringArrType_Type.tp_itemsize = sizeof(char);
+ PyVoidArrType_Type.tp_dealloc = (destructor) void_dealloc;
+ PyUnicodeArrType_Type.tp_dealloc = unicode_dealloc;
+ PyObjectArrType_Type.tp_dealloc = object_arrtype_dealloc;
+ PyObjectArrType_Type.tp_getattro = (getattrofunc) object_getattro;
+ PyObjectArrType_Type.tp_setattro = (setattrofunc) object_setattro;
+
+
+
+ PyArrayIter_Type.tp_iter = PyObject_SelfIter;
+ PyArrayMapIter_Type.tp_iter = PyObject_SelfIter;
+
+/**begin repeat
+#name=Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble, Object,#
+#num=BOOL, BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, NTYPES#
+**/
+ PyArrayScalar_Offset[PyArray_@num@] = (int) offsetof(Py@name@ScalarObject, obval);
+/**end repeat**/
+}
+
+
+/* the order of this table is important */
+static PyTypeObject *typeobjects[] = {
+ &PyBoolArrType_Type,
+ &PyByteArrType_Type,
+ &PyUByteArrType_Type,
+ &PyShortArrType_Type,
+ &PyUShortArrType_Type,
+ &PyIntArrType_Type,
+ &PyUIntArrType_Type,
+ &PyLongArrType_Type,
+ &PyULongArrType_Type,
+ &PyLongLongArrType_Type,
+ &PyULongLongArrType_Type,
+ &PyFloatArrType_Type,
+ &PyDoubleArrType_Type,
+ &PyLongDoubleArrType_Type,
+ &PyCFloatArrType_Type,
+ &PyCDoubleArrType_Type,
+ &PyCLongDoubleArrType_Type,
+ &PyObjectArrType_Type,
+ &PyStringArrType_Type,
+ &PyUnicodeArrType_Type,
+ &PyVoidArrType_Type
+};
+
+static int
+_typenum_fromtypeobj(PyObject *type, int user)
+{
+ int typenum, i;
+
+ typenum = PyArray_NOTYPE;
+ i = 0;
+ while(i < PyArray_NTYPES) {
+ if (type == (PyObject *)typeobjects[i]) {
+ typenum = i;
+ break;
+ }
+ i++;
+ }
+
+ if (!user) return typenum;
+
+ /* Search any registered types */
+ i = 0;
+ while (i < PyArray_NUMUSERTYPES) {
+ if (type == (PyObject *)(userdescrs[i]->typeobj)) {
+ typenum = i + PyArray_USERDEF;
+ break;
+ }
+ i++;
+ }
+ return typenum;
+}
+
+/* new reference */
+static PyArray_Descr *
+PyArray_DescrFromTypeObject(PyObject *type)
+{
+ int typenum;
+ PyArray_Descr *new, *conv=NULL;
+
+ /* if it's a builtin type, then use the typenumber */
+ typenum = _typenum_fromtypeobj(type,1);
+ if (typenum != PyArray_NOTYPE) {
+ new = PyArray_DescrFromType(typenum);
+ if (PyTypeNum_ISUSERDEF(typenum)) goto finish;
+ return new;
+ }
+
+ /* Check the generic types */
+ if ((type == (PyObject *) &PyNumericArrType_Type) || \
+ (type == (PyObject *) &PyInexactArrType_Type) || \
+ (type == (PyObject *) &PyFloatingArrType_Type))
+ typenum = PyArray_DOUBLE;
+ else if (type == (PyObject *)&PyComplexFloatingArrType_Type)
+ typenum = PyArray_CDOUBLE;
+ else if ((type == (PyObject *)&PyIntegerArrType_Type) || \
+ (type == (PyObject *)&PySignedIntegerArrType_Type))
+ typenum = PyArray_LONG;
+ else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type)
+ typenum = PyArray_ULONG;
+ else if (type == (PyObject *) &PyCharacterArrType_Type)
+ typenum = PyArray_STRING;
+ else if ((type == (PyObject *) &PyGenericArrType_Type) || \
+ (type == (PyObject *) &PyFlexibleArrType_Type))
+ typenum = PyArray_VOID;
+
+ if (typenum != PyArray_NOTYPE) {
+ return PyArray_DescrFromType(typenum);
+ }
+
+ /* Otherwise --- type is a sub-type of an array scalar
+ currently only VOID allows it -- use it as the type-object.
+ */
+ /* look for a dtypedescr attribute */
+ new = PyArray_DescrNewFromType(PyArray_VOID);
+
+ finish:
+ conv = _arraydescr_fromobj(type);
+ if (conv) {
+ new->fields = conv->fields;
+ Py_INCREF(new->fields);
+ new->elsize = conv->elsize;
+ new->subarray = conv->subarray;
+ conv->subarray = NULL;
+ Py_DECREF(conv);
+ }
+ Py_DECREF(new->typeobj);
+ new->typeobj = (PyTypeObject *)type;
+ Py_INCREF(type);
+ return new;
+}
+
+/* New reference */
+/*OBJECT_API
+ Return descr object from array scalar.
+*/
+static PyArray_Descr *
+PyArray_DescrFromScalar(PyObject *sc)
+{
+ int type_num;
+ PyArray_Descr *descr;
+
+ if PyArray_IsScalar(sc, Void) {
+ descr = ((PyVoidScalarObject *)sc)->descr;
+ Py_INCREF(descr);
+ return descr;
+ }
+ descr = PyArray_DescrFromTypeObject((PyObject *)sc->ob_type);
+ if (descr->elsize == 0) {
+ PyArray_DESCR_REPLACE(descr);
+ type_num = descr->type_num;
+ if (type_num == PyArray_STRING)
+ descr->elsize = PyString_GET_SIZE(sc);
+ else if (type_num == PyArray_UNICODE)
+ descr->elsize = PyUnicode_GET_DATA_SIZE(sc);
+ else {
+ descr->elsize = \
+ ((PyVoidScalarObject *)sc)->ob_size;
+ descr->fields = PyObject_GetAttrString(sc, "fields");
+ if (!descr->fields || !PyDict_Check(descr->fields) || \
+ (descr->fields == Py_None)) {
+ Py_XDECREF(descr->fields);
+ descr->fields = NULL;
+ }
+ PyErr_Clear();
+ }
+ }
+ return descr;
+}
+
+/* New reference */
+/*OBJECT_API
+ Get a typeobject from a type-number
+*/
+static PyObject *
+PyArray_TypeObjectFromType(int type)
+{
+ PyArray_Descr *descr;
+ PyObject *obj;
+
+ descr = PyArray_DescrFromType(type);
+ if (descr == NULL) return NULL;
+ Py_INCREF((PyObject *)descr->typeobj);
+ obj = (PyObject *)descr->typeobj;
+ Py_DECREF(descr);
+ return obj;
+}
+
diff --git a/numpy/base/src/ufuncobject.c b/numpy/base/src/ufuncobject.c
new file mode 100644
index 000000000..47aad4828
--- /dev/null
+++ b/numpy/base/src/ufuncobject.c
@@ -0,0 +1,3145 @@
+
+/*
+ Python Universal Functions Object -- Math for all types, plus fast
+ arrays math
+
+ Full description
+
+ This supports mathematical (and Boolean) functions on arrays and other python
+ objects. Math on large arrays of basic C types is rather efficient.
+
+ Travis E. Oliphant (2005)
+ Assistant Professor
+ Brigham Young University
+
+ based on the
+
+ Original Implementation:
+ Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu
+
+ with inspiration and code from
+ Numarray
+ Space Science Telescope Institute
+ J. Todd Miller
+ Perry Greenfield
+ Rick White
+
+*/
+
+
+typedef double (DoubleBinaryFunc)(double x, double y);
+typedef float (FloatBinaryFunc)(float x, float y);
+typedef longdouble (LongdoubleBinaryFunc)(longdouble x, longdouble y);
+
+typedef void (CdoubleBinaryFunc)(cdouble *x, cdouble *y, cdouble *res);
+typedef void (CfloatBinaryFunc)(cfloat *x, cfloat *y, cfloat *res);
+typedef void (ClongdoubleBinaryFunc)(clongdouble *x, clongdouble *y, \
+ clongdouble *res);
+
+/*UFUNC_API*/
+static void
+PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, n=dimensions[0];
+ register intp is1=steps[0],is2=steps[1],os=steps[2];
+ char *ip1=args[0], *ip2=args[1], *op=args[2];
+
+ for(i=0; i<n; i++, ip1+=is1, ip2+=is2, op+=os) {
+ *(float *)op = (float)((DoubleBinaryFunc *)func) \
+ ((double)*(float *)ip1, (double)*(float *)ip2);
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_ff_f(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, n=dimensions[0];
+ register intp is1=steps[0],is2=steps[1],os=steps[2];
+ char *ip1=args[0], *ip2=args[1], *op=args[2];
+
+
+ for(i=0; i<n; i++, ip1+=is1, ip2+=is2, op+=os) {
+ *(float *)op = ((FloatBinaryFunc *)func)(*(float *)ip1,
+ *(float *)ip2);
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_dd_d(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, n=dimensions[0];
+ register intp is1=steps[0],is2=steps[1],os=steps[2];
+ char *ip1=args[0], *ip2=args[1], *op=args[2];
+
+
+ for(i=0; i<n; i++, ip1+=is1, ip2+=is2, op+=os) {
+ *(double *)op = ((DoubleBinaryFunc *)func)\
+ (*(double *)ip1, *(double *)ip2);
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_gg_g(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, n=dimensions[0];
+ register intp is1=steps[0],is2=steps[1],os=steps[2];
+ char *ip1=args[0], *ip2=args[1], *op=args[2];
+
+ for(i=0; i<n; i++, ip1+=is1, ip2+=is2, op+=os) {
+ *(longdouble *)op = \
+ ((LongdoubleBinaryFunc *)func)(*(longdouble *)ip1,
+ *(longdouble *)ip2);
+ }
+}
+
+
+/*UFUNC_API*/
+static void
+PyUFunc_FF_F_As_DD_D(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i,n=dimensions[0],is1=steps[0],is2=steps[1],os=steps[2];
+ char *ip1=args[0], *ip2=args[1], *op=args[2];
+ cdouble x, y, r;
+
+ for(i=0; i<n; i++, ip1+=is1, ip2+=is2, op+=os) {
+ x.real = ((float *)ip1)[0]; x.imag = ((float *)ip1)[1];
+ y.real = ((float *)ip2)[0]; y.imag = ((float *)ip2)[1];
+ ((CdoubleBinaryFunc *)func)(&x, &y, &r);
+ ((float *)op)[0] = (float)r.real;
+ ((float *)op)[1] = (float)r.imag;
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_DD_D(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0];
+ char *ip1=args[0], *ip2=args[1], *op=args[2];
+ cdouble x,y,r;
+
+ for(i=0; i<n; i++, ip1+=is1, ip2+=is2, op+=os) {
+ x.real = ((double *)ip1)[0]; x.imag = ((double *)ip1)[1];
+ y.real = ((double *)ip2)[0]; y.imag = ((double *)ip2)[1];
+ ((CdoubleBinaryFunc *)func)(&x, &y, &r);
+ ((double *)op)[0] = r.real;
+ ((double *)op)[1] = r.imag;
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_FF_F(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0];
+ char *ip1=args[0], *ip2=args[1], *op=args[2];
+ cfloat x,y,r;
+
+ for(i=0; i<n; i++, ip1+=is1, ip2+=is2, op+=os) {
+ x.real = ((float *)ip1)[0]; x.imag = ((float *)ip1)[1];
+ y.real = ((float *)ip2)[0]; y.imag = ((float *)ip2)[1];
+ ((CfloatBinaryFunc *)func)(&x, &y, &r);
+ ((float *)op)[0] = r.real;
+ ((float *)op)[1] = r.imag;
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_GG_G(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0];
+ char *ip1=args[0], *ip2=args[1], *op=args[2];
+ clongdouble x,y,r;
+
+ for(i=0; i<n; i++, ip1+=is1, ip2+=is2, op+=os) {
+ x.real = ((longdouble *)ip1)[0];
+ x.imag = ((longdouble *)ip1)[1];
+ y.real = ((longdouble *)ip2)[0];
+ y.imag = ((longdouble *)ip2)[1];
+ ((ClongdoubleBinaryFunc *)func)(&x, &y, &r);
+ ((longdouble *)op)[0] = r.real;
+ ((longdouble *)op)[1] = r.imag;
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_OO_O(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, is1=steps[0],is2=steps[1],os=steps[2], \
+ n=dimensions[0];
+ char *ip1=args[0], *ip2=args[1], *op=args[2];
+ PyObject *tmp;
+ PyObject *x1, *x2;
+
+ for(i=0; i<n; i++, ip1+=is1, ip2+=is2, op+=os) {
+ x1 = *((PyObject **)ip1);
+ x2 = *((PyObject **)ip2);
+ if ((x1 == NULL) || (x2 == NULL)) goto done;
+ if ( (void *) func == (void *) PyNumber_Power)
+ tmp = ((ternaryfunc)func)(x1, x2, Py_None);
+ else
+ tmp = ((binaryfunc)func)(x1, x2);
+ if (PyErr_Occurred()) goto done;
+ Py_XDECREF(*((PyObject **)op));
+ *((PyObject **)op) = tmp;
+ }
+ done:
+ return;
+}
+
+
+typedef double DoubleUnaryFunc(double x);
+typedef float FloatUnaryFunc(float x);
+typedef longdouble LongdoubleUnaryFunc(longdouble x);
+typedef void CdoubleUnaryFunc(cdouble *x, cdouble *res);
+typedef void CfloatUnaryFunc(cfloat *x, cfloat *res);
+typedef void ClongdoubleUnaryFunc(clongdouble *x, clongdouble *res);
+
+/*UFUNC_API*/
+static void
+PyUFunc_f_f_As_d_d(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, n=dimensions[0];
+ char *ip1=args[0], *op=args[1];
+ for(i=0; i<n; i++, ip1+=steps[0], op+=steps[1]) {
+ *(float *)op = (float)((DoubleUnaryFunc *)func)((double)*(float *)ip1);
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func)
+{
+ intp i;
+ char *ip1=args[0], *op=args[1];
+ for(i=0; i<*dimensions; i++, ip1+=steps[0], op+=steps[1]) {
+ *(double *)op = ((DoubleUnaryFunc *)func)(*(double *)ip1);
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_f_f(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp n=dimensions[0];
+ char *ip1=args[0], *op=args[1];
+ for(i=0; i<n; i++, ip1+=steps[0], op+=steps[1]) {
+ *(float *)op = ((FloatUnaryFunc *)func)(*(float *)ip1);
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_g_g(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp n=dimensions[0];
+ char *ip1=args[0], *op=args[1];
+ for(i=0; i<n; i++, ip1+=steps[0], op+=steps[1]) {
+ *(longdouble *)op = ((LongdoubleUnaryFunc *)func)\
+ (*(longdouble *)ip1);
+ }
+}
+
+
+/*UFUNC_API*/
+static void
+PyUFunc_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i; cdouble x, res;
+ intp n=dimensions[0];
+ char *ip1=args[0], *op=args[1];
+ for(i=0; i<n; i++, ip1+=steps[0], op+=steps[1]) {
+ x.real = ((float *)ip1)[0]; x.imag = ((float *)ip1)[1];
+ ((CdoubleUnaryFunc *)func)(&x, &res);
+ ((float *)op)[0] = (float)res.real;
+ ((float *)op)[1] = (float)res.imag;
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_F_F(char **args, intp *dimensions, intp *steps, void *func)
+{
+ intp i; cfloat x, res;
+ char *ip1=args[0], *op=args[1];
+ for(i=0; i<*dimensions; i++, ip1+=steps[0], op+=steps[1]) {
+ x.real = ((float *)ip1)[0];
+ x.imag = ((float *)ip1)[1];
+ ((CfloatUnaryFunc *)func)(&x, &res);
+ ((float *)op)[0] = res.real;
+ ((float *)op)[1] = res.imag;
+ }
+}
+
+
+/*UFUNC_API*/
+static void
+PyUFunc_D_D(char **args, intp *dimensions, intp *steps, void *func)
+{
+ intp i; cdouble x, res;
+ char *ip1=args[0], *op=args[1];
+ for(i=0; i<*dimensions; i++, ip1+=steps[0], op+=steps[1]) {
+ x.real = ((double *)ip1)[0];
+ x.imag = ((double *)ip1)[1];
+ ((CdoubleUnaryFunc *)func)(&x, &res);
+ ((double *)op)[0] = res.real;
+ ((double *)op)[1] = res.imag;
+ }
+}
+
+
+/*UFUNC_API*/
+static void
+PyUFunc_G_G(char **args, intp *dimensions, intp *steps, void *func)
+{
+ intp i; clongdouble x, res;
+ char *ip1=args[0], *op=args[1];
+ for(i=0; i<*dimensions; i++, ip1+=steps[0], op+=steps[1]) {
+ x.real = ((longdouble *)ip1)[0];
+ x.imag = ((longdouble *)ip1)[1];
+ ((ClongdoubleUnaryFunc *)func)(&x, &res);
+ ((double *)op)[0] = res.real;
+ ((double *)op)[1] = res.imag;
+ }
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_O_O(char **args, intp *dimensions, intp *steps, void *func)
+{
+ intp i; PyObject *tmp, *x1;
+ char *ip1=args[0], *op=args[1];
+
+ for(i=0; i<*dimensions; i++, ip1+=steps[0], op+=steps[1]) {
+ x1 = *(PyObject **)ip1;
+ if (x1 == NULL) goto done;
+ tmp = ((unaryfunc)func)(x1);
+ if ((tmp==NULL) || PyErr_Occurred()) goto done;
+ Py_XDECREF(*((PyObject **)op));
+ *((PyObject **)op) = tmp;
+ }
+ done:
+ return;
+}
+
+/*UFUNC_API*/
+static void
+PyUFunc_O_O_method(char **args, intp *dimensions, intp *steps, void *func)
+{
+ intp i; PyObject *tmp, *meth, *arglist, *x1;
+ char *ip1=args[0], *op=args[1];
+
+ for(i=0; i<*dimensions; i++, ip1+=steps[0], op+=steps[1]) {
+ x1 = *(PyObject **)ip1;
+ if (x1 == NULL) goto done;
+ meth = PyObject_GetAttrString(x1, (char *)func);
+ if (meth != NULL) {
+ arglist = PyTuple_New(0);
+ tmp = PyEval_CallObject(meth, arglist);
+ Py_DECREF(arglist);
+ Py_DECREF(meth);
+ if ((tmp==NULL) || PyErr_Occurred()) goto done;
+ Py_XDECREF(*((PyObject **)op));
+ *((PyObject **)op) = tmp;
+ }
+ }
+ done:
+ return;
+
+}
+
+
+
+/* a general-purpose ufunc that deals with general-purpose Python callable.
+ func is a structure with nin, nout, and a Python callable function
+*/
+
+/*UFUNC_API*/
+static void
+PyUFunc_On_Om(char **args, intp *dimensions, intp *steps, void *func)
+{
+ intp i, j;
+ intp n=dimensions[0];
+ PyUFunc_PyFuncData *data = (PyUFunc_PyFuncData *)func;
+ int nin = data->nin, nout=data->nout;
+ int ntot;
+ PyObject *tocall = data->callable;
+ char *ptrs[MAX_ARGS];
+ PyObject *arglist, *result;
+ PyObject *in, **op;
+
+ ntot = nin+nout;
+
+ for (j=0; j < ntot; j++) ptrs[j] = args[j];
+ for(i=0; i<n; i++) {
+ arglist = PyTuple_New(nin);
+ if (arglist == NULL) return;
+ for (j=0; j < nin; j++) {
+ in = *((PyObject **)ptrs[j]);
+ if (in == NULL) {Py_DECREF(arglist); return;}
+ PyTuple_SET_ITEM(arglist, j, in);
+ Py_INCREF(in);
+ }
+ result = PyEval_CallObject(tocall, arglist);
+ Py_DECREF(arglist);
+ if (result == NULL) return;
+ if PyTuple_Check(result) {
+ if (nout != PyTuple_Size(result)) {
+ Py_DECREF(result);
+ return;
+ }
+ for (j=0; j < nout; j++) {
+ op = (PyObject **)ptrs[j+nin];
+ Py_XDECREF(*op);
+ *op = PyTuple_GET_ITEM(result, j);
+ Py_INCREF(*op);
+ }
+ Py_DECREF(result);
+ }
+ else {
+ op = (PyObject **)ptrs[nin];
+ Py_XDECREF(*op);
+ *op = result;
+ }
+ for (j=0; j < ntot; j++) ptrs[j] += steps[j];
+ }
+ return;
+}
+
+
+
+
+/* ---------------------------------------------------------------- */
+
+
+/* fpstatus is the ufunc_formatted hardware status
+ errmask is the handling mask specified by the user.
+ errobj is a Python object with (string, callable object or None)
+ or NULL
+*/
+
+/*
+ 2. for each of the flags
+ determine whether to ignore, warn, raise error, or call Python function.
+ If ignore, do nothing
+ If warn, print a warning and continue
+ If raise return an error
+ If call, call a user-defined function with string
+*/
+
+static int
+_error_handler(int method, PyObject *errobj, char *errtype, int retstatus)
+{
+ PyObject *pyfunc, *ret, *args;
+ char *name=PyString_AS_STRING(PyTuple_GET_ITEM(errobj,0));
+ char msg[100];
+
+ ALLOW_C_API_DEF
+
+ ALLOW_C_API
+
+ switch(method) {
+ case UFUNC_ERR_WARN:
+ snprintf(msg, 100, "%s encountered in %s", errtype, name);
+ if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) goto fail;
+ break;
+ case UFUNC_ERR_RAISE:
+ PyErr_Format(PyExc_FloatingPointError,
+ "%s encountered in %s",
+ errtype, name);
+ goto fail;
+ case UFUNC_ERR_CALL:
+ pyfunc = PyTuple_GET_ITEM(errobj, 1);
+
+ if (pyfunc == Py_None) {
+ PyErr_Format(PyExc_NameError,
+ "python callback specified for %s (in " \
+ " %s) but no function found.",
+ errtype, name);
+ goto fail;
+ }
+ args = Py_BuildValue("NN", PyString_FromString(errtype),
+ PyInt_FromLong((long) retstatus));
+ if (args == NULL) goto fail;
+ ret = PyObject_CallObject(pyfunc, args);
+ Py_DECREF(args);
+ if (ret == NULL) goto fail;
+ Py_DECREF(ret);
+
+ break;
+ }
+ DISABLE_C_API
+ return 0;
+
+ fail:
+ DISABLE_C_API
+ return -1;
+}
+
+
+/*UFUNC_API*/
+static int
+PyUFunc_checkfperr(int errmask, PyObject *errobj)
+{
+ int retstatus;
+ int handle;
+
+ /* 1. check hardware flag --- this is platform dependent code */
+
+ UFUNC_CHECK_STATUS(retstatus) /* no semicolon */
+
+ /* End platform dependent code */
+
+#define HANDLEIT(NAME, str) {if (retstatus & UFUNC_FPE_##NAME) { \
+ handle = errmask & UFUNC_MASK_##NAME;\
+ if (handle && \
+ _error_handler(handle >> UFUNC_SHIFT_##NAME, \
+ errobj, str, retstatus) < 0) \
+ return -1; \
+ }}
+
+ if (errmask && retstatus) {
+ HANDLEIT(DIVIDEBYZERO, "divide by zero");
+ HANDLEIT(OVERFLOW, "overflow");
+ HANDLEIT(UNDERFLOW, "underflow");
+ HANDLEIT(INVALID, "invalid");
+ }
+
+#undef HANDLEIT
+
+ return 0;
+}
+
+
+/* Checking the status flag clears it */
+/*UFUNC_API*/
+static void
+PyUFunc_clearfperr()
+{
+ int retstatus;
+
+ UFUNC_CHECK_STATUS(retstatus)
+}
+
+
+#define UFUNC_NOSCALAR 0
+#define UFUNC_BOOL_SCALAR 1
+#define UFUNC_INTPOS_SCALAR 2
+#define UFUNC_INTNEG_SCALAR 3
+#define UFUNC_FLOAT_SCALAR 4
+#define UFUNC_COMPLEX_SCALAR 5
+#define UFUNC_OBJECT_SCALAR 6
+
+#define NO_UFUNCLOOP 0
+#define ZERODIM_REDUCELOOP 0
+#define ONE_UFUNCLOOP 1
+#define ONEDIM_REDUCELOOP 1
+#define NOBUFFER_UFUNCLOOP 2
+#define NOBUFFER_REDUCELOOP 2
+#define BUFFER_UFUNCLOOP 3
+#define BUFFER_REDUCELOOP 3
+
+
+static char
+_lowest_type(char intype)
+{
+ switch(intype) {
+ /* case PyArray_BYTE */
+ case PyArray_SHORT:
+ case PyArray_INT:
+ case PyArray_LONG:
+ case PyArray_LONGLONG:
+ return PyArray_BYTE;
+ /* case PyArray_UBYTE */
+ case PyArray_USHORT:
+ case PyArray_UINT:
+ case PyArray_ULONG:
+ case PyArray_ULONGLONG:
+ return PyArray_UBYTE;
+ /* case PyArray_FLOAT:*/
+ case PyArray_DOUBLE:
+ case PyArray_LONGDOUBLE:
+ return PyArray_FLOAT;
+ /* case PyArray_CFLOAT:*/
+ case PyArray_CDOUBLE:
+ case PyArray_CLONGDOUBLE:
+ return PyArray_CFLOAT;
+ default:
+ return intype;
+ }
+}
+
+/* Called to determine coercion
+ */
+
+static int
+_cancoerce(char thistype, char neededtype, char scalar)
+{
+
+ switch(scalar) {
+ case UFUNC_NOSCALAR:
+ case UFUNC_BOOL_SCALAR:
+ case UFUNC_OBJECT_SCALAR:
+ return PyArray_CanCastSafely(thistype, neededtype);
+ case UFUNC_INTPOS_SCALAR:
+ return (neededtype >= PyArray_UBYTE);
+ case UFUNC_INTNEG_SCALAR:
+ return (neededtype >= PyArray_BYTE) && \
+ !(PyTypeNum_ISUNSIGNED(neededtype));
+ case UFUNC_FLOAT_SCALAR:
+ return (neededtype >= PyArray_FLOAT);
+ case UFUNC_COMPLEX_SCALAR:
+ return (neededtype >= PyArray_CFLOAT);
+ }
+ fprintf(stderr, "\n**Error** coerce fall through: %d %d %d\n\n",
+ thistype, neededtype, scalar);
+ return 1; /* should never get here... */
+}
+
+
+static int
+select_types(PyUFuncObject *self, int *arg_types,
+ PyUFuncGenericFunction *function, void **data,
+ char *scalars)
+{
+
+ int i=0, j;
+ char start_type;
+
+ if (PyTypeNum_ISUSERDEF((arg_types[0]))) {
+ PyObject *key, *obj;
+ for (i=0; i<self->nin; i++) {
+ if (arg_types[i] != arg_types[0]) {
+ PyErr_SetString(PyExc_TypeError,
+ "ufuncs on user defined" \
+ " types don't support "\
+ "coercion");
+ return -1;
+ }
+ }
+ for (i=self->nin; i<self->nargs; i++) {
+ arg_types[i] = arg_types[0];
+ }
+
+ obj = NULL;
+ if (self->userloops) {
+ key = PyInt_FromLong((long) arg_types[0]);
+ if (key == NULL) return -1;
+ obj = PyDict_GetItem(self->userloops, key);
+ Py_DECREF(key);
+ }
+ if (obj == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "no registered loop for this " \
+ "user-defined type");
+ return -1;
+ }
+ if PyTuple_Check(obj) {
+ *function = (PyUFuncGenericFunction) \
+ PyCObject_AsVoidPtr(PyTuple_GET_ITEM(obj, 0));
+ *data = PyCObject_AsVoidPtr(PyTuple_GET_ITEM(obj, 1));
+ }
+ else {
+ *function = (PyUFuncGenericFunction) \
+ PyCObject_AsVoidPtr(obj);
+ *data = NULL;
+ }
+ Py_DECREF(obj);
+ return 0;
+ }
+
+
+ start_type = arg_types[0];
+ /* If the first argument is a scalar we need to place
+ the start type as the lowest type in the class
+ */
+ if (scalars[0] != UFUNC_NOSCALAR) {
+ start_type = _lowest_type(start_type);
+ }
+
+ while (i<self->ntypes && start_type > self->types[i*self->nargs])
+ i++;
+
+ for(;i<self->ntypes; i++) {
+ for(j=0; j<self->nin; j++) {
+ if (!_cancoerce(arg_types[j],
+ self->types[i*self->nargs+j],
+ scalars[j]))
+ break;
+ }
+ if (j == self->nin) break;
+ }
+ if(i>=self->ntypes) {
+ PyErr_SetString(PyExc_TypeError,
+ "function not supported for these types, "\
+ "and can't coerce safely to supported types");
+ return -1;
+ }
+ for(j=0; j<self->nargs; j++)
+ arg_types[j] = self->types[i*self->nargs+j];
+
+ if (self->data)
+ *data = self->data[i];
+ else
+ *data = NULL;
+ *function = self->functions[i];
+
+ return 0;
+}
+
+static int PyUFunc_USEDEFAULTS=0;
+
+/*UFUNC_API*/
+static int
+PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject **errobj)
+{
+ PyObject *thedict;
+ PyObject *ref=NULL;
+ PyObject *retval;
+ static PyObject *thestring=NULL;
+
+ if (!PyUFunc_USEDEFAULTS) {
+ if (thestring == NULL) {
+ thestring = PyString_InternFromString(UFUNC_PYVALS_NAME);
+ }
+ thedict = PyEval_GetLocals();
+ ref = PyDict_GetItem(thedict, thestring);
+ if (ref == NULL) {
+ thedict = PyEval_GetGlobals();
+ ref = PyDict_GetItem(thedict, thestring);
+ }
+ if (ref == NULL) {
+ thedict = PyEval_GetBuiltins();
+ ref = PyDict_GetItem(thedict, thestring);
+ }
+ }
+ if (ref == NULL) {
+ *errmask = UFUNC_ERR_DEFAULT;
+ *errobj = Py_BuildValue("NO",
+ PyString_FromString(name),
+ Py_None);
+ *bufsize = PyArray_BUFSIZE;
+ return 0;
+ }
+ *errobj = NULL;
+ if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) {
+ PyErr_Format(PyExc_TypeError, "%s must be a length 3 list.",
+ UFUNC_PYVALS_NAME);
+ return -1;
+ }
+
+ *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0));
+ if ((*bufsize == -1) && PyErr_Occurred()) return -1;
+ if ((*bufsize < PyArray_MIN_BUFSIZE) || \
+ (*bufsize > PyArray_MAX_BUFSIZE) || \
+ (*bufsize % 16 != 0)) {
+ PyErr_Format(PyExc_ValueError,
+ "buffer size (%d) is not " \
+ "in range (%d - %d) or not a multiple of 16",
+ *bufsize, PyArray_MIN_BUFSIZE,
+ PyArray_MAX_BUFSIZE);
+ return -1;
+ }
+
+ *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1));
+ if (*errmask < 0) {
+ if (PyErr_Occurred()) return -1;
+ PyErr_Format(PyExc_ValueError, \
+ "invalid error mask (%d)",
+ *errmask);
+ return -1;
+ }
+
+ retval = PyList_GET_ITEM(ref, 2);
+ if (retval != Py_None && !PyCallable_Check(retval)) {
+ PyErr_SetString(PyExc_TypeError,
+ "callback function must be callable");
+ return -1;
+ }
+
+ *errobj = Py_BuildValue("NO",
+ PyString_FromString(name),
+ retval);
+ if (*errobj == NULL) return -1;
+
+ return 0;
+}
+
+
+static char
+_scalar_kind(int typenum, PyArrayObject **arr)
+{
+ if (PyTypeNum_ISSIGNED(typenum)) return UFUNC_INTNEG_SCALAR;
+ if (PyTypeNum_ISFLOAT(typenum)) return UFUNC_FLOAT_SCALAR;
+ if (PyTypeNum_ISCOMPLEX(typenum)) return UFUNC_COMPLEX_SCALAR;
+ if (PyTypeNum_ISUNSIGNED(typenum)) return UFUNC_INTPOS_SCALAR;
+ if (PyTypeNum_ISBOOL(typenum)) return UFUNC_BOOL_SCALAR;
+ return UFUNC_OBJECT_SCALAR;
+}
+
+
+/* Create copies for any arrays that are less than loop->bufsize
+ in total size and are mis-behaved or in need
+ of casting.
+*/
+
+static int
+_create_copies(PyUFuncLoopObject *loop, int *arg_types, PyArrayObject **mps)
+{
+ int nin = loop->ufunc->nin;
+ int i;
+ intp size;
+ PyObject *new;
+ PyArray_Descr *ntype;
+ PyArray_Descr *atype;
+
+ for (i=0; i<nin; i++) {
+ size = PyArray_SIZE(mps[i]);
+ /* if the type of mps[i] is equivalent to arg_types[i] */
+ /* then set arg_types[i] equal to type of
+ mps[i] for later checking....
+ */
+ if (PyArray_TYPE(mps[i]) != arg_types[i]) {
+ ntype = mps[i]->descr;
+ atype = PyArray_DescrFromType(arg_types[i]);
+ if (PyArray_EquivTypes(atype, ntype)) {
+ arg_types[i] = ntype->type_num;
+ }
+ Py_DECREF(atype);
+ }
+ if (size < loop->bufsize) {
+ if (!(PyArray_ISBEHAVED_RO(mps[i])) || \
+ PyArray_TYPE(mps[i]) != arg_types[i]) {
+ ntype = PyArray_DescrFromType(arg_types[i]);
+ new = PyArray_FromAny((PyObject *)mps[i],
+ ntype, 0, 0,
+ FORCECAST | ALIGNED);
+ if (new == NULL) return -1;
+ Py_DECREF(mps[i]);
+ mps[i] = (PyArrayObject *)new;
+ }
+ }
+ }
+
+ return 0;
+}
+
+#define _GETATTR_(str, rstr) if (strcmp(name, #str) == 0) { \
+ return PyObject_HasAttrString(op, "__" #rstr "__");}
+
+static int
+_has_reflected_op(PyObject *op, char *name)
+{
+ _GETATTR_(add, radd)
+ _GETATTR_(subtract, rsub)
+ _GETATTR_(multiply, rmul)
+ _GETATTR_(divide, rdiv)
+ _GETATTR_(true_divide, rtruediv)
+ _GETATTR_(floor_divide, rfloordiv)
+ _GETATTR_(remainder, rmod)
+ _GETATTR_(power, rpow)
+ _GETATTR_(left_shift, rrlshift)
+ _GETATTR_(right_shift, rrshift)
+ _GETATTR_(bitwise_and, rand)
+ _GETATTR_(bitwise_xor, rxor)
+ _GETATTR_(bitwise_or, ror)
+ return 0;
+}
+
+#undef _GETATTR_
+
+
+static int
+construct_matrices(PyUFuncLoopObject *loop, PyObject *args, PyArrayObject **mps)
+{
+ int nargs, i, maxsize;
+ int arg_types[MAX_ARGS];
+ char scalars[MAX_ARGS];
+ PyUFuncObject *self=loop->ufunc;
+ Bool allscalars=TRUE;
+ PyTypeObject *subtype=&PyArray_Type;
+
+ /* Check number of arguments */
+ nargs = PyTuple_Size(args);
+ if ((nargs != self->nin) && (nargs != self->nargs)) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid number of arguments");
+ return -1;
+ }
+
+
+ /* Get each input argument */
+ for (i=0; i<self->nin; i++) {
+ mps[i] = (PyArrayObject *)\
+ PyArray_FromAny(PyTuple_GET_ITEM(args,i),
+ NULL, 0, 0, 0);
+ if (mps[i] == NULL) return -1;
+ arg_types[i] = PyArray_TYPE(mps[i]);
+ if (PyTypeNum_ISFLEXIBLE(arg_types[i])) {
+ loop->notimplemented = 1;
+ return nargs;
+ }
+ /*
+ fprintf(stderr, "array %d has reference %d\n", i,
+ (mps[i])->ob_refcnt);
+ */
+
+ /* Scalars are 0-dimensional arrays
+ at this point
+ */
+ if (mps[i]->nd > 0) {
+ scalars[i] = UFUNC_NOSCALAR;
+ allscalars=FALSE;
+ }
+ else scalars[i] = _scalar_kind(arg_types[i], &(mps[i]));
+
+ /* If any input is a big-array */
+ if (!PyType_IsSubtype(mps[i]->ob_type, &PyArray_Type)) {
+ subtype = &PyBigArray_Type;
+ }
+ }
+
+ /* If everything is a scalar, then use normal coercion rules */
+ if (allscalars) {
+ for (i=0; i<self->nin; i++) {
+ scalars[i] = UFUNC_NOSCALAR;
+ }
+ }
+
+ /* Select an appropriate function for these argument types. */
+ if (select_types(loop->ufunc, arg_types, &(loop->function),
+ &(loop->funcdata), scalars) == -1)
+ return -1;
+
+ /* FAIL with NotImplemented if the other object has
+ the __r<op>__ method and has __array_priority__ as
+ an attribute (signalling it can handle ndarray's)
+ and is not already an ndarray or bigndarray
+ */
+ if ((arg_types[1] == PyArray_OBJECT) && \
+ (loop->ufunc->nin==2) && (loop->ufunc->nout == 1)) {
+ PyObject *_obj = PyTuple_GET_ITEM(args, 1);
+ if (!PyArray_CheckExact(_obj) && \
+ !PyBigArray_CheckExact(_obj) && \
+ PyObject_HasAttrString(_obj, "__array_priority__") && \
+ _has_reflected_op(_obj, loop->ufunc->name)) {
+ loop->notimplemented = 1;
+ return nargs;
+ }
+ }
+ loop->notimplemented=0;
+
+ /* Create copies for some of the arrays if appropriate */
+ if (_create_copies(loop, arg_types, mps) < 0) return -1;
+
+ /* Create Iterators for the Inputs */
+ for (i=0; i<self->nin; i++) {
+ loop->iters[i] = (PyArrayIterObject *) \
+ PyArray_IterNew((PyObject *)mps[i]);
+ if (loop->iters[i] == NULL) return -1;
+ }
+
+ /* Broadcast the result */
+ loop->numiter = self->nin;
+ if (PyArray_Broadcast((PyArrayMultiIterObject *)loop) < 0)
+ return -1;
+
+ /* Get any return arguments */
+ for (i=self->nin; i<nargs; i++) {
+ mps[i] = (PyArrayObject *)PyTuple_GET_ITEM(args, i);
+ if (((PyObject *)mps[i])==Py_None) {
+ mps[i] = NULL;
+ continue;
+ }
+ Py_INCREF(mps[i]);
+ if (!PyArray_Check((PyObject *)mps[i])) {
+ PyObject *new;
+ if (PyArrayIter_Check(mps[i])) {
+ new = PyObject_CallMethod((PyObject *)mps[i],
+ "__array__", NULL);
+ Py_DECREF(mps[i]);
+ mps[i] = (PyArrayObject *)new;
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "return arrays must be "\
+ "of ArrayType");
+ Py_DECREF(mps[i]);
+ mps[i] = NULL;
+ return -1;
+ }
+ }
+ if (!PyArray_CompareLists(mps[i]->dimensions,
+ loop->dimensions, loop->nd)) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid return array shape");
+ Py_DECREF(mps[i]);
+ mps[i] = NULL;
+ return -1;
+ }
+ if (!PyArray_ISWRITEABLE(mps[i])) {
+ PyErr_SetString(PyExc_ValueError,
+ "return array is not writeable");
+ Py_DECREF(mps[i]);
+ mps[i] = NULL;
+ return -1;
+ }
+ }
+
+ /* construct any missing return arrays and make output iterators */
+
+ for (i=self->nin; i<self->nargs; i++) {
+ PyArray_Descr *ntype;
+
+ if (mps[i] == NULL) {
+ mps[i] = (PyArrayObject *)PyArray_New(subtype,
+ loop->nd,
+ loop->dimensions,
+ arg_types[i],
+ NULL, NULL,
+ 0, 0, NULL);
+ if (mps[i] == NULL) return -1;
+ }
+
+ /* reset types for outputs that are equivalent
+ -- no sense casting uselessly
+ */
+ else {
+ if (mps[i]->descr->type_num != arg_types[i]) {
+ PyArray_Descr *atype;
+ ntype = mps[i]->descr;
+ atype = PyArray_DescrFromType(arg_types[i]);
+ if (PyArray_EquivTypes(atype, ntype)) {
+ arg_types[i] = ntype->type_num;
+ }
+ Py_DECREF(atype);
+ }
+
+ /* still not the same -- or will we have to use buffers?*/
+ if (mps[i]->descr->type_num != arg_types[i] ||
+ !PyArray_ISBEHAVED_RO(mps[i])) {
+ if (loop->size < loop->bufsize) {
+ PyObject *new;
+ /* Copy the array to a temporary copy
+ and set the UPDATEIFCOPY flag
+ */
+ ntype = PyArray_DescrFromType(arg_types[i]);
+ new = PyArray_FromAny((PyObject *)mps[i],
+ ntype, 0, 0,
+ FORCECAST | ALIGNED |
+ UPDATEIFCOPY);
+ if (new == NULL) return -1;
+ Py_DECREF(mps[i]);
+ mps[i] = (PyArrayObject *)new;
+ }
+ }
+ }
+
+ loop->iters[i] = (PyArrayIterObject *) \
+ PyArray_IterNew((PyObject *)mps[i]);
+ if (loop->iters[i] == NULL) return -1;
+ }
+
+
+ /* If any of different type, or misaligned or swapped
+ then must use buffers */
+
+ loop->bufcnt = 0;
+
+ loop->obj = 0;
+
+ /* Determine looping method needed */
+ loop->meth = NO_UFUNCLOOP;
+
+ maxsize = 0;
+ for (i=0; i<self->nargs; i++) {
+ loop->needbuffer[i] = 0;
+ if (arg_types[i] != mps[i]->descr->type_num ||
+ !PyArray_ISBEHAVED_RO(mps[i])) {
+ loop->meth = BUFFER_UFUNCLOOP;
+ loop->needbuffer[i] = 1;
+ }
+ if (!loop->obj && mps[i]->descr->type_num == PyArray_OBJECT) {
+ loop->obj = 1;
+ }
+ }
+
+ if (loop->meth == NO_UFUNCLOOP) {
+
+ loop->meth = ONE_UFUNCLOOP;
+
+ /* All correct type and BEHAVED */
+ /* Check for non-uniform stridedness */
+
+ for (i=0; i<self->nargs; i++) {
+ if (!(loop->iters[i]->contiguous)) {
+ /* may still have uniform stride
+ if (broadcated result) <= 1-d */
+ if (mps[i]->nd != 0 && \
+ (loop->iters[i]->nd_m1 > 0)) {
+ loop->meth = NOBUFFER_UFUNCLOOP;
+ break;
+ }
+ }
+ }
+ if (loop->meth == ONE_UFUNCLOOP) {
+ for (i=0; i<self->nargs; i++) {
+ loop->bufptr[i] = mps[i]->data;
+ }
+ }
+ }
+
+ loop->numiter = self->nargs;
+
+ /* Fill in steps */
+ if (loop->meth != ONE_UFUNCLOOP) {
+ int ldim = 0;
+ intp maxdim=-1;
+ PyArrayIterObject *it;
+
+ /* Fix iterators */
+
+ /* Find the **largest** dimension */
+
+ maxdim = -1;
+ for (i=loop->nd - 1; i>=0; i--) {
+ if (loop->dimensions[i] > maxdim) {
+ ldim = i;
+ maxdim = loop->dimensions[i];
+ }
+ }
+
+ loop->size /= maxdim;
+ loop->bufcnt = maxdim;
+ loop->lastdim = ldim;
+
+ /* Fix the iterators so the inner loop occurs over the
+ largest dimensions -- This can be done by
+ setting the size to 1 in that dimension
+ (just in the iterators)
+ */
+
+ for (i=0; i<loop->numiter; i++) {
+ it = loop->iters[i];
+ it->contiguous = 0;
+ it->size /= (it->dims_m1[ldim]+1);
+ it->dims_m1[ldim] = 0;
+ it->backstrides[ldim] = 0;
+
+ /* (won't fix factors because we
+ don't use PyArray_ITER_GOTO1D
+ so don't change them) */
+
+ /* Set the steps to the strides in that dimension */
+ loop->steps[i] = it->strides[ldim];
+ }
+
+ /* fix up steps where we will be copying data to
+ buffers and calculate the ninnerloops and leftover
+ values -- if step size is already zero that is not changed...
+ */
+ if (loop->meth == BUFFER_UFUNCLOOP) {
+ loop->leftover = maxdim % loop->bufsize;
+ loop->ninnerloops = (maxdim / loop->bufsize) + 1;
+ for (i=0; i<self->nargs; i++) {
+ if (loop->needbuffer[i] && loop->steps[i]) {
+ loop->steps[i] = mps[i]->descr->elsize;
+ }
+ /* These are changed later if casting is needed */
+ }
+ }
+ }
+ else { /* uniformly-strided case ONE_UFUNCLOOP */
+ for (i=0; i<self->nargs; i++) {
+ if (PyArray_SIZE(mps[i]) == 1)
+ loop->steps[i] = 0;
+ else
+ loop->steps[i] = mps[i]->strides[mps[i]->nd-1];
+ }
+ }
+
+
+ /* Finally, create memory for buffers if we need them */
+
+ /* buffers for scalars are specially made small -- scalars are
+ not copied multiple times */
+ if (loop->meth == BUFFER_UFUNCLOOP) {
+ int cnt = 0, cntcast = 0; /* keeps track of bytes to allocate */
+ int scnt = 0, scntcast = 0;
+ char *castptr;
+ char *bufptr;
+ int last_was_scalar=0;
+ int last_cast_was_scalar=0;
+ int oldbufsize=0;
+ int oldsize=0;
+ int scbufsize = 4*sizeof(double);
+ int memsize;
+ PyArray_Descr *descr;
+
+ /* compute the element size */
+ for (i=0; i<self->nargs;i++) {
+ if (!loop->needbuffer) continue;
+ if (arg_types[i] != mps[i]->descr->type_num) {
+ descr = PyArray_DescrFromType(arg_types[i]);
+ if (loop->steps[i])
+ cntcast += descr->elsize;
+ else
+ scntcast += descr->elsize;
+ if (i < self->nin) {
+ loop->cast[i] = \
+ mps[i]->descr->f->cast[arg_types[i]];
+ }
+ else {
+ loop->cast[i] = descr->f-> \
+ cast[mps[i]->descr->type_num];
+ }
+ Py_DECREF(descr);
+ }
+ loop->swap[i] = !(PyArray_ISNOTSWAPPED(mps[i]));
+ if (loop->steps[i])
+ cnt += mps[i]->descr->elsize;
+ else
+ scnt += mps[i]->descr->elsize;
+ }
+ memsize = loop->bufsize*(cnt+cntcast) + scbufsize*(scnt+scntcast);
+ loop->buffer[0] = PyDataMem_NEW(memsize);
+
+ /* fprintf(stderr, "Allocated buffer at %p of size %d, cnt=%d, cntcast=%d\n", loop->buffer[0], loop->bufsize * (cnt + cntcast), cnt, cntcast); */
+
+ if (loop->buffer[0] == NULL) {PyErr_NoMemory(); return -1;}
+ castptr = loop->buffer[0] + loop->bufsize*cnt + scbufsize*scnt;
+ bufptr = loop->buffer[0];
+ for (i=0; i<self->nargs; i++) {
+ if (!loop->needbuffer[i]) continue;
+ loop->buffer[i] = bufptr + (last_was_scalar ? scbufsize : \
+ loop->bufsize)*oldbufsize;
+ last_was_scalar = (loop->steps[i] == 0);
+ bufptr = loop->buffer[i];
+ oldbufsize = mps[i]->descr->elsize;
+ /* fprintf(stderr, "buffer[%d] = %p\n", i, loop->buffer[i]); */
+ if (loop->cast[i]) {
+ PyArray_Descr *descr;
+ loop->castbuf[i] = castptr + (last_cast_was_scalar ? scbufsize : \
+ loop->bufsize)*oldsize;
+ last_cast_was_scalar = last_was_scalar;
+ /* fprintf(stderr, "castbuf[%d] = %p\n", i, loop->castbuf[i]); */
+ descr = PyArray_DescrFromType(arg_types[i]);
+ oldsize = descr->elsize;
+ Py_DECREF(descr);
+ loop->bufptr[i] = loop->castbuf[i];
+ castptr = loop->castbuf[i];
+ if (loop->steps[i])
+ loop->steps[i] = oldsize;
+ }
+ else {
+ loop->bufptr[i] = loop->buffer[i];
+ }
+ }
+ }
+ return nargs;
+}
+
+static void
+ufuncreduce_dealloc(PyUFuncReduceObject *self)
+{
+ if (self->ufunc) {
+ Py_XDECREF(self->it);
+ Py_XDECREF(self->rit);
+ Py_XDECREF(self->ret);
+ Py_XDECREF(self->errobj);
+ Py_XDECREF(self->decref);
+ if (self->buffer) PyDataMem_FREE(self->buffer);
+ Py_DECREF(self->ufunc);
+ }
+ _pya_free(self);
+}
+
+static void
+ufuncloop_dealloc(PyUFuncLoopObject *self)
+{
+ int i;
+
+ if (self->ufunc != NULL) {
+ for (i=0; i<self->ufunc->nargs; i++)
+ Py_XDECREF(self->iters[i]);
+ if (self->buffer[0]) PyDataMem_FREE(self->buffer[0]);
+ Py_XDECREF(self->errobj);
+ Py_DECREF(self->ufunc);
+ }
+ _pya_free(self);
+}
+
+static PyUFuncLoopObject *
+construct_loop(PyUFuncObject *self, PyObject *args, PyArrayObject **mps)
+{
+ PyUFuncLoopObject *loop;
+ int i;
+
+ if (self == NULL) {
+ PyErr_SetString(PyExc_ValueError, "function not supported");
+ return NULL;
+ }
+ if ((loop = _pya_malloc(sizeof(PyUFuncLoopObject)))==NULL) {
+ PyErr_NoMemory(); return loop;
+ }
+
+ loop->index = 0;
+ loop->ufunc = self;
+ Py_INCREF(self);
+ loop->buffer[0] = NULL;
+ for (i=0; i<self->nargs; i++) {
+ loop->iters[i] = NULL;
+ loop->cast[i] = NULL;
+ }
+ loop->errobj = NULL;
+
+ if (PyUFunc_GetPyValues((self->name ? self->name : ""),
+ &(loop->bufsize), &(loop->errormask),
+ &(loop->errobj)) < 0) goto fail;
+
+ /* Setup the matrices */
+ if (construct_matrices(loop, args, mps) < 0) goto fail;
+
+ PyUFunc_clearfperr();
+
+ return loop;
+
+ fail:
+ ufuncloop_dealloc(loop);
+ return NULL;
+}
+
+
+/*
+static void
+_printbytebuf(PyUFuncLoopObject *loop, int bufnum)
+{
+ int i;
+
+ fprintf(stderr, "Printing byte buffer %d\n", bufnum);
+ for (i=0; i<loop->bufcnt; i++) {
+ fprintf(stderr, " %d\n", *(((byte *)(loop->buffer[bufnum]))+i));
+ }
+}
+
+static void
+_printlongbuf(PyUFuncLoopObject *loop, int bufnum)
+{
+ int i;
+
+ fprintf(stderr, "Printing long buffer %d\n", bufnum);
+ for (i=0; i<loop->bufcnt; i++) {
+ fprintf(stderr, " %ld\n", *(((long *)(loop->buffer[bufnum]))+i));
+ }
+}
+
+static void
+_printlongbufptr(PyUFuncLoopObject *loop, int bufnum)
+{
+ int i;
+
+ fprintf(stderr, "Printing long buffer %d\n", bufnum);
+ for (i=0; i<loop->bufcnt; i++) {
+ fprintf(stderr, " %ld\n", *(((long *)(loop->bufptr[bufnum]))+i));
+ }
+}
+
+
+
+static void
+_printcastbuf(PyUFuncLoopObject *loop, int bufnum)
+{
+ int i;
+
+ fprintf(stderr, "Printing long buffer %d\n", bufnum);
+ for (i=0; i<loop->bufcnt; i++) {
+ fprintf(stderr, " %ld\n", *(((long *)(loop->castbuf[bufnum]))+i));
+ }
+}
+
+*/
+
+
+
+
+/* currently generic ufuncs cannot be built for use on flexible arrays.
+
+ The cast functions in the generic loop would need to be fixed to pass
+ in something besides NULL, NULL.
+
+ Also the underlying ufunc loops would not know the element-size unless
+ that was passed in as data (which could be arranged).
+
+*/
+
+/* This generic function is called with the ufunc object, the arguments to it,
+ and an array of (pointers to) PyArrayObjects which are NULL. The
+ arguments are parsed and placed in mps in construct_loop (construct_matrices)
+*/
+
+/*UFUNC_API*/
+static int
+PyUFunc_GenericFunction(PyUFuncObject *self, PyObject *args,
+ PyArrayObject **mps)
+{
+ PyUFuncLoopObject *loop;
+ int i;
+ BEGIN_THREADS_DEF
+
+ if (!(loop = construct_loop(self, args, mps))) return -1;
+ if (loop->notimplemented) {ufuncloop_dealloc(loop); return -2;}
+
+ LOOP_BEGIN_THREADS
+
+ switch(loop->meth) {
+ case ONE_UFUNCLOOP:
+ /* Everything is contiguous, notswapped, aligned,
+ and of the right type. -- Fastest.
+ Or if not contiguous, then a single-stride
+ increment moves through the entire array.
+ */
+ /*fprintf(stderr, "ONE...%d\n", loop->size);*/
+ loop->function((char **)loop->bufptr, &(loop->size),
+ loop->steps, loop->funcdata);
+ UFUNC_CHECK_ERROR(loop);
+ break;
+ case NOBUFFER_UFUNCLOOP:
+ /* Everything is notswapped, aligned and of the
+ right type but not contiguous. -- Almost as fast.
+ */
+ /*fprintf(stderr, "NOBUFFER...%d\n", loop->size);*/
+ while (loop->index < loop->size) {
+ for (i=0; i<self->nargs; i++)
+ loop->bufptr[i] = loop->iters[i]->dataptr;
+
+ loop->function((char **)loop->bufptr, &(loop->bufcnt),
+ loop->steps, loop->funcdata);
+ UFUNC_CHECK_ERROR(loop);
+
+ for (i=0; i<self->nargs; i++) {
+ PyArray_ITER_NEXT(loop->iters[i]);
+ }
+ loop->index++;
+ }
+ break;
+ case BUFFER_UFUNCLOOP: {
+ PyArray_CopySwapNFunc *copyswapn[MAX_ARGS];
+ PyArrayIterObject **iters=loop->iters;
+ int *swap=loop->swap;
+ void **dptr=loop->dptr;
+ int mpselsize[MAX_ARGS];
+ intp laststrides[MAX_ARGS];
+ int fastmemcpy[MAX_ARGS];
+ int *needbuffer=loop->needbuffer;
+ intp index=loop->index, size=loop->size;
+ int bufsize;
+ intp bufcnt;
+ int copysizes[MAX_ARGS];
+ void **bufptr = loop->bufptr;
+ void **buffer = loop->buffer;
+ void **castbuf = loop->castbuf;
+ intp *steps = loop->steps;
+ char *tptr[MAX_ARGS];
+ int ninnerloops = loop->ninnerloops;
+ Bool pyobject[MAX_ARGS];
+ int datasize[MAX_ARGS];
+ int i, j, k, stopcondition;
+ char *myptr1, *myptr2;
+
+
+ for (i=0; i<self->nargs; i++) {
+ copyswapn[i] = mps[i]->descr->f->copyswapn;
+ mpselsize[i] = mps[i]->descr->elsize;
+ pyobject[i] = (loop->obj && \
+ (mps[i]->descr->type_num == PyArray_OBJECT));
+ laststrides[i] = iters[i]->strides[loop->lastdim];
+ if (steps[i] && laststrides[i] != mpselsize[i]) fastmemcpy[i] = 0;
+ else fastmemcpy[i] = 1;
+ }
+ /* Do generic buffered looping here (works for any kind of
+ arrays -- some need buffers, some don't.
+ */
+
+ /* New algorithm: N is the largest dimension. B is the buffer-size.
+ quotient is loop->ninnerloops-1
+ remainder is loop->leftover
+
+ Compute N = quotient * B + remainder.
+ quotient = N / B # integer math
+ (store quotient + 1) as the number of innerloops
+ remainder = N % B # integer remainder
+
+ On the inner-dimension we will have (quotient + 1) loops where
+ the size of the inner function is B for all but the last when the niter size is
+ remainder.
+
+ So, the code looks very similar to NOBUFFER_LOOP except the inner-most loop is
+ replaced with...
+
+ for(i=0; i<quotient+1; i++) {
+ if (i==quotient+1) make itersize remainder size
+ copy only needed items to buffer.
+ swap input buffers if needed
+ cast input buffers if needed
+ call loop_function()
+ cast outputs in buffers if needed
+ swap outputs in buffers if needed
+ copy only needed items back to output arrays.
+ update all data-pointers by strides*niter
+ }
+ */
+
+
+ /* fprintf(stderr, "BUFFER...%d,%d,%d\n", loop->size,
+ loop->ninnerloops, loop->leftover);
+ */
+ /*
+ for (i=0; i<self->nargs; i++) {
+ fprintf(stderr, "iters[%d]->dataptr = %p, %p of size %d\n", i,
+ iters[i], iters[i]->ao->data, PyArray_NBYTES(iters[i]->ao));
+ }
+ */
+
+ stopcondition = ninnerloops;
+ if (loop->leftover == 0) stopcondition--;
+ while (index < size) {
+ bufsize=loop->bufsize;
+ for (i=0; i<self->nargs; i++) {
+ tptr[i] = loop->iters[i]->dataptr;
+ if (needbuffer[i]) {
+ dptr[i] = bufptr[i];
+ datasize[i] = (steps[i] ? bufsize : 1);
+ copysizes[i] = datasize[i] * mpselsize[i];
+ }
+ else {
+ dptr[i] = tptr[i];
+ }
+ }
+
+ /* This is the inner function over the last dimension */
+ for (k=1; k<=stopcondition; k++) {
+ if (k==ninnerloops) {
+ bufsize = loop->leftover;
+ for (i=0; i<self->nargs;i++) {
+ if (!needbuffer[i]) continue;
+ datasize[i] = (steps[i] ? bufsize : 1);
+ copysizes[i] = datasize[i] * mpselsize[i];
+ }
+ }
+
+ for (i=0; i<self->nin; i++) {
+ if (!needbuffer[i]) continue;
+ if (fastmemcpy[i])
+ memcpy(buffer[i], tptr[i],
+ copysizes[i]);
+ else {
+ myptr1 = buffer[i];
+ myptr2 = tptr[i];
+ for (j=0; j<bufsize; j++) {
+ memcpy(myptr1, myptr2, mpselsize[i]);
+ myptr1 += mpselsize[i];
+ myptr2 += laststrides[i];
+ }
+ }
+
+ /* swap the buffer if necessary */
+ if (swap[i]) {
+ /* fprintf(stderr, "swapping...\n");*/
+ copyswapn[i](buffer[i], NULL,
+ (intp) datasize[i], 1,
+ mpselsize[i]);
+ }
+ /* cast to the other buffer if necessary */
+ if (loop->cast[i]) {
+ loop->cast[i](buffer[i],
+ castbuf[i],
+ (intp) datasize[i],
+ NULL, NULL);
+ }
+ }
+
+ bufcnt = (intp) bufsize;
+ loop->function((char **)dptr, &bufcnt, steps, loop->funcdata);
+
+ for (i=self->nin; i<self->nargs; i++) {
+ if (!needbuffer[i]) continue;
+ if (loop->cast[i]) {
+ loop->cast[i](castbuf[i],
+ buffer[i],
+ (intp) datasize[i],
+ NULL, NULL);
+ }
+ if (swap[i]) {
+ copyswapn[i](buffer[i], NULL,
+ (intp) datasize[i], 1,
+ mpselsize[i]);
+ }
+ /* copy back to output arrays */
+ /* decref what's already there for object arrays */
+ if (pyobject[i]) {
+ myptr1 = tptr[i];
+ for (j=0; j<datasize[i]; j++) {
+ Py_XDECREF(*((PyObject **)myptr1));
+ myptr1 += laststrides[i];
+ }
+ }
+ if (fastmemcpy[i])
+ memcpy(tptr[i], buffer[i], copysizes[i]);
+ else {
+ myptr2 = buffer[i];
+ myptr1 = tptr[i];
+ for (j=0; j<bufsize; j++) {
+ memcpy(myptr1, myptr2,
+ mpselsize[i]);
+ myptr1 += laststrides[i];
+ myptr2 += mpselsize[i];
+ }
+ }
+ }
+ if (k == stopcondition) continue;
+ for (i=0; i<self->nargs; i++) {
+ tptr[i] += bufsize * laststrides[i];
+ if (!needbuffer[i]) dptr[i] = tptr[i];
+ }
+ }
+
+ if (loop->obj) { /* DECREF castbuf for object arrays */
+ for (i=0; i<self->nargs; i++) {
+ if (pyobject[i]) {
+ if (steps[i] == 0) {
+ Py_XDECREF(*((PyObject **)castbuf[i]));
+ }
+ else {
+ int size = loop->bufsize;
+ PyObject **objptr = castbuf[i];
+ /* size is loop->bufsize unless there
+ was only one loop */
+ if (ninnerloops == 1) \
+ size = loop->leftover;
+
+ for (j=0; j<size; j++) {
+ Py_XDECREF(*objptr);
+ objptr += 1;
+ }
+ }
+ }
+ }
+
+ }
+
+ UFUNC_CHECK_ERROR(loop);
+
+ for (i=0; i<self->nargs; i++) {
+ PyArray_ITER_NEXT(loop->iters[i]);
+ }
+ index++;
+ }
+ }
+ }
+
+ LOOP_END_THREADS
+
+ ufuncloop_dealloc(loop);
+ return 0;
+
+ fail:
+ LOOP_END_THREADS
+
+ if (loop) ufuncloop_dealloc(loop);
+ return -1;
+}
+
+static PyArrayObject *
+_getidentity(PyUFuncObject *self, int otype, char *str)
+{
+ PyObject *obj, *arr;
+ PyArray_Descr *typecode;
+
+ if (self->identity == PyUFunc_None) {
+ PyErr_Format(PyExc_ValueError,
+ "zero-size array to ufunc.%s " \
+ "without identity", str);
+ return NULL;
+ }
+ if (self->identity == PyUFunc_One) {
+ obj = PyInt_FromLong((long) 1);
+ } else {
+ obj = PyInt_FromLong((long) 0);
+ }
+
+ typecode = PyArray_DescrFromType(otype);
+ arr = PyArray_FromAny(obj, typecode, 0, 0, CARRAY_FLAGS);
+ Py_DECREF(obj);
+ return (PyArrayObject *)arr;
+}
+
+static int
+_create_reduce_copy(PyUFuncReduceObject *loop, PyArrayObject **arr, int rtype)
+{
+ intp maxsize;
+ PyObject *new;
+ PyArray_Descr *ntype;
+
+ maxsize = PyArray_SIZE(*arr);
+
+ if (maxsize < loop->bufsize) {
+ if (!(PyArray_ISBEHAVED_RO(*arr)) || \
+ PyArray_TYPE(*arr) != rtype) {
+ ntype = PyArray_DescrFromType(rtype);
+ new = PyArray_FromAny((PyObject *)(*arr),
+ ntype, 0, 0,
+ FORCECAST | ALIGNED);
+ if (new == NULL) return -1;
+ *arr = (PyArrayObject *)new;
+ loop->decref = new;
+ }
+ }
+
+ /* Don't decref *arr before re-assigning
+ because it was not going to be DECREF'd anyway.
+
+ If a copy is made, then the copy will be removed
+ on deallocation of the loop structure by setting
+ loop->decref.
+ */
+
+ return 0;
+}
+
+static PyUFuncReduceObject *
+construct_reduce(PyUFuncObject *self, PyArrayObject **arr, int axis,
+ int otype, int operation, intp ind_size, char *str)
+{
+ PyUFuncReduceObject *loop;
+ PyArrayObject *idarr;
+ PyArrayObject *aar;
+ intp loop_i[MAX_DIMS];
+ int arg_types[3] = {otype, otype, otype};
+ char scalars[3] = {UFUNC_NOSCALAR, UFUNC_NOSCALAR, UFUNC_NOSCALAR};
+ int i, j;
+ int nd = (*arr)->nd;
+ /* Reduce type is the type requested of the input
+ during reduction */
+
+ if ((loop = _pya_malloc(sizeof(PyUFuncReduceObject)))==NULL) {
+ PyErr_NoMemory(); return loop;
+ }
+
+ loop->swap = 0;
+ loop->index = 0;
+ loop->ufunc = self;
+ Py_INCREF(self);
+ loop->cast = NULL;
+ loop->buffer = NULL;
+ loop->ret = NULL;
+ loop->it = NULL;
+ loop->rit = NULL;
+ loop->errobj = NULL;
+ loop->decref=NULL;
+ loop->N = (*arr)->dimensions[axis];
+ loop->instrides = (*arr)->strides[axis];
+
+ if (select_types(loop->ufunc, arg_types, &(loop->function),
+ &(loop->funcdata), scalars) == -1) goto fail;
+
+ /* output type may change -- if it does
+ reduction is forced into that type
+ and we need to select the reduction function again
+ */
+ if (otype != arg_types[2]) {
+ otype = arg_types[2];
+ arg_types[0] = otype;
+ arg_types[1] = otype;
+ if (select_types(loop->ufunc, arg_types, &(loop->function),
+ &(loop->funcdata), scalars) == -1)
+ goto fail;
+ }
+
+ /* get looping parameters from Python */
+ if (PyUFunc_GetPyValues(str, &(loop->bufsize), &(loop->errormask),
+ &(loop->errobj)) < 0) goto fail;
+
+ /* Make copy if misbehaved or not otype for small arrays */
+ if (_create_reduce_copy(loop, arr, otype) < 0) goto fail;
+ aar = *arr;
+
+ if (loop->N == 0) {
+ loop->meth = ZERODIM_REDUCELOOP;
+ }
+ else if (PyArray_ISBEHAVED_RO(aar) && \
+ otype == (aar)->descr->type_num) {
+ if (loop->N == 1) {
+ loop->meth = ONEDIM_REDUCELOOP;
+ }
+ else {
+ loop->meth = NOBUFFER_UFUNCLOOP;
+ loop->steps[0] = (aar)->strides[axis];
+ loop->N -= 1;
+ }
+ }
+ else {
+ loop->meth = BUFFER_UFUNCLOOP;
+ loop->swap = !(PyArray_ISNOTSWAPPED(aar));
+ }
+
+ /* Determine if object arrays are involved */
+ if (otype == PyArray_OBJECT || aar->descr->type_num == PyArray_OBJECT)
+ loop->obj = 1;
+ else
+ loop->obj = 0;
+
+ if (loop->meth == ZERODIM_REDUCELOOP) {
+ idarr = _getidentity(self, otype, str);
+ if (idarr == NULL) goto fail;
+ if (idarr->descr->elsize > UFUNC_MAXIDENTITY) {
+ PyErr_Format(PyExc_RuntimeError,
+ "UFUNC_MAXIDENTITY (%d)" \
+ " is too small (needs to be at least %d)",
+ UFUNC_MAXIDENTITY, idarr->descr->elsize);
+ Py_DECREF(idarr);
+ goto fail;
+ }
+ memcpy(loop->idptr, idarr->data, idarr->descr->elsize);
+ Py_DECREF(idarr);
+ }
+
+ /* Construct return array */
+ switch(operation) {
+ case UFUNC_REDUCE:
+ for (j=0, i=0; i<nd; i++) {
+ if (i != axis)
+ loop_i[j++] = (aar)->dimensions[i];
+
+ }
+ loop->ret = (PyArrayObject *) \
+ PyArray_New(aar->ob_type, aar->nd-1, loop_i, otype,
+ NULL, NULL, 0, 0, (PyObject *)aar);
+ break;
+ case UFUNC_ACCUMULATE:
+ loop->ret = (PyArrayObject *) \
+ PyArray_New(aar->ob_type, aar->nd, aar->dimensions,
+ otype, NULL, NULL, 0, 0, (PyObject *)aar);
+ break;
+ case UFUNC_REDUCEAT:
+ memcpy(loop_i, aar->dimensions, nd*sizeof(intp));
+ /* Index is 1-d array */
+ loop_i[axis] = ind_size;
+ loop->ret = (PyArrayObject *)\
+ PyArray_New(aar->ob_type, aar->nd, loop_i, otype,
+ NULL, NULL, 0, 0, (PyObject *)aar);
+ if (loop->ret == NULL) goto fail;
+ if (ind_size == 0) {
+ loop->meth = ZERODIM_REDUCELOOP;
+ return loop;
+ }
+ if (loop->meth == ONEDIM_REDUCELOOP)
+ loop->meth = NOBUFFER_REDUCELOOP;
+ break;
+ }
+ if (loop->ret == NULL) goto fail;
+ loop->insize = aar->descr->elsize;
+ loop->outsize = loop->ret->descr->elsize;
+ loop->bufptr[1] = loop->ret->data;
+
+ if (loop->meth == ZERODIM_REDUCELOOP) {
+ loop->size = PyArray_SIZE(loop->ret);
+ return loop;
+ }
+
+ loop->it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)aar);
+ if (loop->it == NULL) return NULL;
+
+ if (loop->meth == ONEDIM_REDUCELOOP) {
+ loop->size = loop->it->size;
+ return loop;
+ }
+
+ /* Fix iterator to loop over correct dimension */
+ /* Set size in axis dimension to 1 */
+
+ loop->it->contiguous = 0;
+ loop->it->size /= (loop->it->dims_m1[axis]+1);
+ loop->it->dims_m1[axis] = 0;
+ loop->it->backstrides[axis] = 0;
+
+
+ loop->size = loop->it->size;
+
+ if (operation == UFUNC_REDUCE) {
+ loop->steps[1] = 0;
+ }
+ else {
+ loop->rit = (PyArrayIterObject *) \
+ PyArray_IterNew((PyObject *)(loop->ret));
+ if (loop->rit == NULL) return NULL;
+
+ /* Fix iterator to loop over correct dimension */
+ /* Set size in axis dimension to 1 */
+
+ loop->rit->contiguous = 0;
+ loop->rit->size /= (loop->rit->dims_m1[axis]+1);
+ loop->rit->dims_m1[axis] = 0;
+ loop->rit->backstrides[axis] = 0;
+
+ if (operation == UFUNC_ACCUMULATE)
+ loop->steps[1] = loop->ret->strides[axis];
+ else
+ loop->steps[1] = 0;
+ }
+ loop->steps[2] = loop->steps[1];
+ loop->bufptr[2] = loop->bufptr[1] + loop->steps[2];
+
+
+ if (loop->meth == BUFFER_UFUNCLOOP) {
+ int _size;
+ loop->steps[0] = loop->outsize;
+ if (otype != aar->descr->type_num) {
+ _size=loop->bufsize*(loop->outsize + \
+ aar->descr->elsize);
+ loop->buffer = PyDataMem_NEW(_size);
+ if (loop->buffer == NULL) goto fail;
+ if (loop->obj) memset(loop->buffer, 0, _size);
+ loop->castbuf = loop->buffer + \
+ loop->bufsize*aar->descr->elsize;
+ loop->bufptr[0] = loop->castbuf;
+ loop->cast = aar->descr->f->cast[otype];
+ }
+ else {
+ _size = loop->bufsize * loop->outsize;
+ loop->buffer = PyDataMem_NEW(_size);
+ if (loop->buffer == NULL) goto fail;
+ if (loop->obj) memset(loop->buffer, 0, _size);
+ loop->bufptr[0] = loop->buffer;
+ }
+ }
+
+
+ PyUFunc_clearfperr();
+ return loop;
+
+ fail:
+ ufuncreduce_dealloc(loop);
+ return NULL;
+}
+
+
+/* We have two basic kinds of loops */
+/* One is used when arr is not-swapped and aligned and output type
+ is the same as input type.
+ and another using buffers when one of these is not satisfied.
+
+ Zero-length and one-length axes-to-be-reduced are handled separately.
+*/
+
+static PyObject *
+PyUFunc_Reduce(PyUFuncObject *self, PyArrayObject *arr, int axis, int otype)
+{
+ PyArrayObject *ret=NULL;
+ PyUFuncReduceObject *loop;
+ intp i, n;
+ char *dptr;
+ BEGIN_THREADS_DEF
+
+ /* Construct loop object */
+ loop = construct_reduce(self, &arr, axis, otype, UFUNC_REDUCE, 0,
+ "reduce");
+ if (!loop) return NULL;
+
+ LOOP_BEGIN_THREADS
+ switch(loop->meth) {
+ case ZERODIM_REDUCELOOP:
+ /* fprintf(stderr, "ZERO..%d\n", loop->size); */
+ for(i=0; i<loop->size; i++) {
+ if (loop->obj) Py_INCREF(*((PyObject **)loop->idptr));
+ memmove(loop->bufptr[1], loop->idptr, loop->outsize);
+ loop->bufptr[1] += loop->outsize;
+ }
+ break;
+ case ONEDIM_REDUCELOOP:
+ /*fprintf(stderr, "ONEDIM..%d\n", loop->size); */
+ while(loop->index < loop->size) {
+ if (loop->obj) Py_INCREF(*((PyObject **)loop->it->dataptr));
+ memmove(loop->bufptr[1], loop->it->dataptr,
+ loop->outsize);
+ PyArray_ITER_NEXT(loop->it);
+ loop->bufptr[1] += loop->outsize;
+ loop->index++;
+ }
+ break;
+ case NOBUFFER_UFUNCLOOP:
+ /*fprintf(stderr, "NOBUFFER..%d\n", loop->size); */
+ while(loop->index < loop->size) {
+ /* Copy first element to output */
+ if (loop->obj)
+ Py_INCREF(*((PyObject **)loop->it->dataptr));
+ memmove(loop->bufptr[1], loop->it->dataptr,
+ loop->outsize);
+ /* Adjust input pointer */
+ loop->bufptr[0] = loop->it->dataptr+loop->steps[0];
+ loop->function((char **)loop->bufptr,
+ &(loop->N),
+ loop->steps, loop->funcdata);
+ UFUNC_CHECK_ERROR(loop);
+
+ PyArray_ITER_NEXT(loop->it)
+ loop->bufptr[1] += loop->outsize;
+ loop->bufptr[2] = loop->bufptr[1];
+ loop->index++;
+ }
+ break;
+ case BUFFER_UFUNCLOOP:
+ /* use buffer for arr */
+ /*
+ For each row to reduce
+ 1. copy first item over to output (casting if necessary)
+ 2. Fill inner buffer
+ 3. When buffer is filled or end of row
+ a. Cast input buffers if needed
+ b. Call inner function.
+ 4. Repeat 2 until row is done.
+ */
+ /* fprintf(stderr, "BUFFERED..%d %d\n", loop->size,
+ loop->swap); */
+ while(loop->index < loop->size) {
+ loop->inptr = loop->it->dataptr;
+ /* Copy (cast) First term over to output */
+ if (loop->cast) {
+ /* A little tricky because we need to
+ cast it first */
+ arr->descr->f->copyswap(loop->buffer,
+ loop->inptr,
+ loop->swap,
+ loop->insize);
+ loop->cast(loop->buffer, loop->castbuf,
+ 1, NULL, NULL);
+ if (loop->obj)
+ Py_INCREF(*((PyObject **)loop->castbuf));
+ memcpy(loop->bufptr[1], loop->castbuf,
+ loop->outsize);
+ }
+ else { /* Simple copy */
+ arr->descr->f->copyswap(loop->bufptr[1],
+ loop->inptr,
+ loop->swap,
+ loop->insize);
+ }
+ loop->inptr += loop->instrides;
+ n = 1;
+ while(n < loop->N) {
+ /* Copy up to loop->bufsize elements to
+ buffer */
+ dptr = loop->buffer;
+ for (i=0; i<loop->bufsize; i++, n++) {
+ if (n == loop->N) break;
+ arr->descr->f->copyswap(dptr,
+ loop->inptr,
+ loop->swap,
+ loop->insize);
+ loop->inptr += loop->instrides;
+ dptr += loop->insize;
+ }
+ if (loop->cast)
+ loop->cast(loop->buffer,
+ loop->castbuf,
+ i, NULL, NULL);
+ loop->function((char **)loop->bufptr,
+ &i,
+ loop->steps, loop->funcdata);
+ UFUNC_CHECK_ERROR(loop);
+ }
+ PyArray_ITER_NEXT(loop->it);
+ loop->bufptr[1] += loop->outsize;
+ loop->bufptr[2] = loop->bufptr[1];
+ loop->index++;
+ }
+ }
+
+ LOOP_END_THREADS
+
+ ret = loop->ret;
+ /* Hang on to this reference -- will be decref'd with loop */
+ Py_INCREF(ret);
+ ufuncreduce_dealloc(loop);
+ return (PyObject *)ret;
+
+ fail:
+ LOOP_END_THREADS
+
+ if (loop) ufuncreduce_dealloc(loop);
+ return NULL;
+}
+
+
+static PyObject *
+PyUFunc_Accumulate(PyUFuncObject *self, PyArrayObject *arr, int axis,
+ int otype)
+{
+ PyArrayObject *ret=NULL;
+ PyUFuncReduceObject *loop;
+ intp i, n;
+ char *dptr;
+
+ /* Construct loop object */
+ loop = construct_reduce(self, &arr, axis, otype, UFUNC_ACCUMULATE, 0,
+ "accumulate");
+ if (!loop) return NULL;
+
+ LOOP_BEGIN_THREADS
+ switch(loop->meth) {
+ case ZERODIM_REDUCELOOP: /* Accumulate */
+ /* fprintf(stderr, "ZERO..%d\n", loop->size); */
+ for(i=0; i<loop->size; i++) {
+ if (loop->obj)
+ Py_INCREF(*((PyObject **)loop->idptr));
+ memcpy(loop->bufptr[1], loop->idptr, loop->outsize);
+ loop->bufptr[1] += loop->outsize;
+ }
+ break;
+ case ONEDIM_REDUCELOOP: /* Accumulate */
+ /* fprintf(stderr, "ONEDIM..%d\n", loop->size); */
+ while(loop->index < loop->size) {
+ if (loop->obj)
+ Py_INCREF(*((PyObject **)loop->it->dataptr));
+ memcpy(loop->bufptr[1], loop->it->dataptr,
+ loop->outsize);
+ PyArray_ITER_NEXT(loop->it);
+ loop->bufptr[1] += loop->outsize;
+ loop->index++;
+ }
+ break;
+ case NOBUFFER_UFUNCLOOP: /* Accumulate */
+ /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */
+ while(loop->index < loop->size) {
+ /* Copy first element to output */
+ if (loop->obj)
+ Py_INCREF(*((PyObject **)loop->it->dataptr));
+ memcpy(loop->bufptr[1], loop->it->dataptr,
+ loop->outsize);
+ /* Adjust input pointer */
+ loop->bufptr[0] = loop->it->dataptr+loop->steps[0];
+ loop->function((char **)loop->bufptr,
+ &(loop->N),
+ loop->steps, loop->funcdata);
+ UFUNC_CHECK_ERROR(loop);
+
+ PyArray_ITER_NEXT(loop->it);
+ PyArray_ITER_NEXT(loop->rit);
+ loop->bufptr[1] = loop->rit->dataptr;
+ loop->bufptr[2] = loop->bufptr[1] + loop->steps[1];
+ loop->index++;
+ }
+ break;
+ case BUFFER_UFUNCLOOP: /* Accumulate */
+ /* use buffer for arr */
+ /*
+ For each row to reduce
+ 1. copy identity over to output (casting if necessary)
+ 2. Fill inner buffer
+ 3. When buffer is filled or end of row
+ a. Cast input buffers if needed
+ b. Call inner function.
+ 4. Repeat 2 until row is done.
+ */
+ /* fprintf(stderr, "BUFFERED..%d %p\n", loop->size,
+ loop->cast); */
+ while(loop->index < loop->size) {
+ loop->inptr = loop->it->dataptr;
+ /* Copy (cast) First term over to output */
+ if (loop->cast) {
+ /* A little tricky because we need to
+ cast it first */
+ arr->descr->f->copyswap(loop->buffer,
+ loop->inptr,
+ loop->swap,
+ loop->insize);
+ loop->cast(loop->buffer, loop->castbuf,
+ 1, NULL, NULL);
+ if (loop->obj)
+ Py_INCREF(*((PyObject **)loop->castbuf));
+ memcpy(loop->bufptr[1], loop->castbuf,
+ loop->outsize);
+ }
+ else { /* Simple copy */
+ arr->descr->f->copyswap(loop->bufptr[1],
+ loop->inptr,
+ loop->swap,
+ loop->insize);
+ }
+ loop->inptr += loop->instrides;
+ n = 1;
+ while(n < loop->N) {
+ /* Copy up to loop->bufsize elements to
+ buffer */
+ dptr = loop->buffer;
+ for (i=0; i<loop->bufsize; i++, n++) {
+ if (n == loop->N) break;
+ arr->descr->f->copyswap(dptr,
+ loop->inptr,
+ loop->swap,
+ loop->insize);
+ loop->inptr += loop->instrides;
+ dptr += loop->insize;
+ }
+ if (loop->cast)
+ loop->cast(loop->buffer,
+ loop->castbuf,
+ i, NULL, NULL);
+ loop->function((char **)loop->bufptr,
+ &i,
+ loop->steps, loop->funcdata);
+ UFUNC_CHECK_ERROR(loop);
+ }
+ PyArray_ITER_NEXT(loop->it);
+ PyArray_ITER_NEXT(loop->rit);
+ loop->bufptr[1] = loop->rit->dataptr;
+ loop->bufptr[2] = loop->bufptr[1] + loop->steps[1];
+ loop->index++;
+ }
+ }
+
+ LOOP_END_THREADS
+ ret = loop->ret;
+ /* Hang on to this reference -- will be decref'd with loop */
+ Py_INCREF(ret);
+ ufuncreduce_dealloc(loop);
+ return (PyObject *)ret;
+
+ fail:
+ LOOP_END_THREADS
+
+ if (loop) ufuncreduce_dealloc(loop);
+ return NULL;
+}
+
+/* Reduceat performs a reduce over an axis using the indices as a guide
+
+op.reduceat(array,indices) computes
+op.reduce(array[indices[i]:indices[i+1]]
+ for i=0..end with an implicit indices[i+1]=len(array)
+ assumed when i=end-1
+
+if indices[i+1] <= indices[i]+1
+ then the result is array[indices[i]] for that value
+
+op.accumulate(array) is the same as
+op.reduceat(array,indices)[::2]
+where indices is range(len(array)-1) with a zero placed in every other sample
+ indices = zeros(len(array)*2-1)
+ indices[1::2] = range(1,len(array))
+
+output shape is based on the size of indices
+ */
+
+static PyObject *
+PyUFunc_Reduceat(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *ind,
+ int axis, int otype)
+{
+ PyArrayObject *ret;
+ PyUFuncReduceObject *loop;
+ intp *ptr=(intp *)ind->data;
+ intp nn=ind->dimensions[0];
+ intp mm=arr->dimensions[axis]-1;
+ intp n, i, j;
+ char *dptr;
+
+ /* Check for out-of-bounds values in indices array */
+ for (i=0; i<nn; i++) {
+ if ((*ptr < 0) || (*ptr > mm)) {
+ PyErr_Format(PyExc_IndexError,
+ "index out-of-bounds (0, %d)", (int) mm);
+ return NULL;
+ }
+ ptr++;
+ }
+
+ ptr = (intp *)ind->data;
+ /* Construct loop object */
+ loop = construct_reduce(self, &arr, axis, otype, UFUNC_REDUCEAT, nn,
+ "reduceat");
+ if (!loop) return NULL;
+
+ LOOP_BEGIN_THREADS
+ switch(loop->meth) {
+ /* zero-length index -- return array immediately */
+ case ZERODIM_REDUCELOOP:
+ /* fprintf(stderr, "ZERO..\n"); */
+ break;
+ /* NOBUFFER -- behaved array and same type */
+ case NOBUFFER_UFUNCLOOP: /* Reduceat */
+ /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */
+ while(loop->index < loop->size) {
+ ptr = (intp *)ind->data;
+ for (i=0; i<nn; i++) {
+ loop->bufptr[0] = loop->it->dataptr + \
+ (*ptr)*loop->instrides;
+ if (loop->obj)
+ Py_INCREF(*((PyObject **)loop->bufptr[0]));
+ memcpy(loop->bufptr[1], loop->bufptr[0],
+ loop->outsize);
+ mm = (i==nn-1 ? arr->dimensions[axis]-*ptr : \
+ *(ptr+1) - *ptr) - 1;
+ if (mm > 0) {
+ loop->bufptr[0] += loop->instrides;
+ loop->bufptr[2] = loop->bufptr[1];
+ loop->function((char **)loop->bufptr,
+ &mm, loop->steps,
+ loop->funcdata);
+ UFUNC_CHECK_ERROR(loop);
+ }
+ loop->bufptr[1] += loop->ret->strides[axis];
+ ptr++;
+ }
+ PyArray_ITER_NEXT(loop->it);
+ PyArray_ITER_NEXT(loop->rit);
+ loop->bufptr[1] = loop->rit->dataptr;
+ loop->index++;
+ }
+ break;
+
+ /* BUFFER -- misbehaved array or different types */
+ case BUFFER_UFUNCLOOP: /* Reduceat */
+ /* fprintf(stderr, "BUFFERED..%d\n", loop->size); */
+ while(loop->index < loop->size) {
+ ptr = (intp *)ind->data;
+ for (i=0; i<nn; i++) {
+ if (loop->obj)
+ Py_INCREF(*((PyObject **)loop->idptr));
+ memcpy(loop->bufptr[1], loop->idptr,
+ loop->outsize);
+ n = 0;
+ mm = (i==nn-1 ? arr->dimensions[axis] - *ptr :\
+ *(ptr+1) - *ptr);
+ if (mm < 1) mm = 1;
+ loop->inptr = loop->it->dataptr + \
+ (*ptr)*loop->instrides;
+ while (n < mm) {
+ /* Copy up to loop->bufsize elements
+ to buffer */
+ dptr = loop->buffer;
+ for (j=0; j<loop->bufsize; j++, n++) {
+ if (n == mm) break;
+ arr->descr->f->copyswap\
+ (dptr,
+ loop->inptr,
+ loop->swap,
+ loop->insize);
+ loop->inptr += loop->instrides;
+ dptr += loop->insize;
+ }
+ if (loop->cast)
+ loop->cast(loop->buffer,
+ loop->castbuf,
+ j, NULL, NULL);
+ loop->bufptr[2] = loop->bufptr[1];
+ loop->function((char **)loop->bufptr,
+ &j, loop->steps,
+ loop->funcdata);
+ UFUNC_CHECK_ERROR(loop);
+ }
+ loop->bufptr[1] += loop->ret->strides[axis];
+ ptr++;
+ }
+ PyArray_ITER_NEXT(loop->it);
+ PyArray_ITER_NEXT(loop->rit);
+ loop->bufptr[1] = loop->rit->dataptr;
+ loop->index++;
+ }
+ break;
+ }
+
+ LOOP_END_THREADS
+
+ ret = loop->ret;
+ /* Hang on to this reference -- will be decref'd with loop */
+ Py_INCREF(ret);
+ ufuncreduce_dealloc(loop);
+ return (PyObject *)ret;
+
+ fail:
+ LOOP_END_THREADS
+
+ if (loop) ufuncreduce_dealloc(loop);
+ return NULL;
+}
+
+
+/* This code handles reduce, reduceat, and accumulate
+ (accumulate and reduce are special cases of the more general reduceat
+ but they are handled separately for speed)
+*/
+
+static PyObject *
+PyUFunc_GenericReduction(PyUFuncObject *self, PyObject *args,
+ PyObject *kwds, int operation)
+{
+ int axis=0;
+ PyArrayObject *mp, *ret = NULL;
+ PyObject *op, *res=NULL;
+ PyObject *obj_ind;
+ PyArrayObject *indices = NULL;
+ PyArray_Descr *otype=NULL;
+ static char *kwlist1[] = {"array", "axis", "dtype", NULL};
+ static char *kwlist2[] = {"array", "indices", "axis", "dtype", NULL};
+ static char *_reduce_type[] = {"reduce", "accumulate", \
+ "reduceat", NULL};
+ if (self == NULL) {
+ PyErr_SetString(PyExc_ValueError, "function not supported");
+ return NULL;
+ }
+
+ if (self->nin != 2) {
+ PyErr_Format(PyExc_ValueError,
+ "%s only supported for binary functions",
+ _reduce_type[operation]);
+ return NULL;
+ }
+ if (self->nout != 1) {
+ PyErr_Format(PyExc_ValueError,
+ "%s only supported for functions " \
+ "returning a single value",
+ _reduce_type[operation]);
+ return NULL;
+ }
+
+ if (operation == UFUNC_REDUCEAT) {
+ PyArray_Descr *indtype;
+ indtype = PyArray_DescrFromType(PyArray_INTP);
+ if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO|iO&", kwlist2,
+ &op, &obj_ind, &axis,
+ PyArray_DescrConverter,
+ &otype)) return NULL;
+ indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype,
+ 1, 1, CARRAY_FLAGS);
+ if (indices == NULL) return NULL;
+ Py_DECREF(indtype);
+ }
+ else {
+ if(!PyArg_ParseTupleAndKeywords(args, kwds, "O|iO&", kwlist1,
+ &op, &axis,
+ PyArray_DescrConverter,
+ &otype)) return NULL;
+ }
+
+ /* Ensure input is an array */
+ mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0);
+ if (mp == NULL) return NULL;
+
+ /* Check to see if input is zero-dimensional */
+ if (mp->nd == 0) {
+ PyErr_Format(PyExc_TypeError, "cannot %s on a scalar",
+ _reduce_type[operation]);
+ Py_DECREF(mp);
+ return NULL;
+ }
+
+ /* Check to see that type (and otype) is not FLEXIBLE */
+ if (PyArray_ISFLEXIBLE(mp) || (otype && PyTypeNum_ISFLEXIBLE(otype->type_num))) {
+ PyErr_Format(PyExc_TypeError,
+ "cannot perform %s with flexible type",
+ _reduce_type[operation]);
+ Py_DECREF(mp);
+ return NULL;
+ }
+
+ if (axis < 0) axis += mp->nd;
+ if (axis < 0 || axis >= mp->nd) {
+ PyErr_SetString(PyExc_ValueError, "axis not in array");
+ Py_DECREF(mp);
+ return NULL;
+ }
+
+ /* Get default type to reduce over if not given */
+ if (otype == NULL) {
+ /* For integer types --- makes sure at
+ least a long is used */
+ int typenum = PyArray_TYPE(mp);
+ if (PyTypeNum_ISINTEGER(typenum) && \
+ (mp->descr->elsize < sizeof(long))) {
+ if (PyTypeNum_ISUNSIGNED(typenum))
+ typenum = PyArray_ULONG;
+ else
+ typenum = PyArray_LONG;
+ }
+ else if (PyTypeNum_ISBOOL(typenum) && \
+ ((strcmp(self->name,"add")==0) || \
+ (strcmp(self->name,"multiply")==0))) {
+ typenum = PyArray_LONG;
+ }
+ otype = PyArray_DescrFromType(typenum);
+ }
+
+ switch(operation) {
+ case UFUNC_REDUCE:
+ ret = (PyArrayObject *)PyUFunc_Reduce(self, mp, axis,
+ otype->type_num);
+ break;
+ case UFUNC_ACCUMULATE:
+ ret = (PyArrayObject *)PyUFunc_Accumulate(self, mp, axis,
+ otype->type_num);
+ break;
+ case UFUNC_REDUCEAT:
+ ret = (PyArrayObject *)PyUFunc_Reduceat(self, mp, indices,
+ axis, otype->type_num);
+ Py_DECREF(indices);
+ break;
+ }
+ Py_DECREF(mp);
+ Py_DECREF(otype);
+ if (ret==NULL) return NULL;
+ if (op->ob_type != ret->ob_type) {
+ res = PyObject_CallMethod(op, "__array_wrap__", "O", ret);
+ if (res == NULL) PyErr_Clear();
+ else if (res == Py_None) Py_DECREF(res);
+ else {
+ Py_DECREF(ret);
+ return res;
+ }
+ }
+ return PyArray_Return(ret);
+
+}
+
+
+
+/* ---------- */
+
+static PyObject *
+_find_array_wrap(PyObject *args)
+{
+ int nargs, i;
+ int np = 0;
+ int argmax = 0;
+ int val;
+ double priority[MAX_ARGS];
+ double maxpriority = PyArray_SUBTYPE_PRIORITY;
+ PyObject *with_wrap[MAX_ARGS];
+ PyObject *attr;
+ PyObject *obj;
+
+ nargs = PyTuple_Size(args);
+ for (i=0; i<nargs; i++) {
+ obj = PyTuple_GET_ITEM(args, i);
+ if (PyArray_CheckExact(obj) || PyBigArray_CheckExact(obj) || \
+ PyArray_IsAnyScalar(obj))
+ continue;
+ attr = PyObject_GetAttrString(obj, "__array_wrap__");
+ if (attr != NULL) {
+ val = PyCallable_Check(attr);
+ Py_DECREF(attr);
+ if (val) {
+ attr = PyObject_GetAttrString(obj,
+ "__array_priority__");
+ if (attr == NULL)
+ priority[np] = \
+ PyArray_SUBTYPE_PRIORITY;
+ else {
+ priority[np] = PyFloat_AsDouble(attr);
+ if (PyErr_Occurred()) {
+ PyErr_Clear();
+ priority[np] = PyArray_SUBTYPE_PRIORITY;
+ }
+ Py_DECREF(attr);
+ }
+ with_wrap[np] = obj;
+ np += 1;
+ }
+ }
+ PyErr_Clear();
+ }
+
+ if (np == 0) return NULL;
+
+ for (i=0; i<np; i++) {
+ if (priority[i] > maxpriority) {
+ maxpriority = priority[i];
+ argmax = i;
+ }
+ }
+
+ return with_wrap[argmax];
+}
+
+static PyObject *
+ufunc_generic_call(PyUFuncObject *self, PyObject *args)
+{
+ int i;
+ PyTupleObject *ret;
+ PyArrayObject *mps[MAX_ARGS];
+ PyObject *retobj[MAX_ARGS];
+ PyObject *res;
+ PyObject *obj;
+ int errval;
+
+ /* Initialize all array objects to NULL to make cleanup easier
+ if something goes wrong. */
+ for(i=0; i<self->nargs; i++) mps[i] = NULL;
+
+ errval = PyUFunc_GenericFunction(self, args, mps);
+ if (errval < 0) {
+ for(i=0; i<self->nargs; i++) Py_XDECREF(mps[i]);
+ if (errval == -1)
+ return NULL;
+ else {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ }
+
+ for(i=0; i<self->nin; i++) Py_DECREF(mps[i]);
+
+ /* Use __array_wrap__ on all outputs
+ if present on one of the input arguments.
+ If present for multiple inputs:
+ use __array_wrap__ of input object with largest
+ __array_priority__ (default = 0.0)
+ */
+ obj = _find_array_wrap(args);
+
+ /* wrap outputs */
+ for (i=0; i<self->nout; i++) {
+ int j=self->nin+i;
+ /* check to see if any UPDATEIFCOPY flags are set
+ which meant that a temporary output was generated
+ */
+ if (mps[j]->flags & UPDATEIFCOPY) {
+ PyObject *old = mps[j]->base;
+ Py_INCREF(old); /* we want to hang on to this */
+ Py_DECREF(mps[j]); /* should trigger the copy
+ back into old */
+ mps[j] = (PyArrayObject *)old;
+ }
+ if (obj != NULL) {
+ res = PyObject_CallMethod(obj, "__array_wrap__",
+ "O", mps[j]);
+ if (res == NULL) PyErr_Clear();
+ else if (res == Py_None) Py_DECREF(res);
+ else {
+ Py_DECREF(mps[j]);
+ retobj[i] = res;
+ continue;
+ }
+ }
+ retobj[i] = PyArray_Return(mps[j]);
+ }
+
+ if (self->nout == 1) {
+ return retobj[0];
+ } else {
+ ret = (PyTupleObject *)PyTuple_New(self->nout);
+ for(i=0; i<self->nout; i++) {
+ PyTuple_SET_ITEM(ret, i, retobj[i]);
+ }
+ return (PyObject *)ret;
+ }
+
+}
+
+static PyObject *
+ufunc_update_use_defaults(PyObject *dummy, PyObject *args)
+{
+ PyObject *errobj;
+ int errmask, bufsize;
+
+ if (!PyArg_ParseTuple(args, "")) return NULL;
+
+ PyUFunc_USEDEFAULTS = 0;
+ if (PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj) < 0) return NULL;
+
+ if ((errmask == UFUNC_ERR_DEFAULT) && \
+ (bufsize == PyArray_BUFSIZE) && \
+ (PyTuple_GET_ITEM(errobj, 1) == Py_None)) {
+ PyUFunc_USEDEFAULTS = 1;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om};
+
+static char
+doc_frompyfunc[] = "frompyfunc(func, nin, nout) take an arbitrary python function that takes nin objects as input and returns nout objects and return a universal function (ufunc). This ufunc always returns PyObject arrays";
+
+static PyObject *
+ufunc_frompyfunc(PyObject *dummy, PyObject *args, PyObject *kwds) {
+ /* Keywords are ignored for now */
+
+ PyObject *function, *pyname=NULL;
+ int nin, nout, i;
+ PyUFunc_PyFuncData *fdata;
+ PyUFuncObject *self;
+ char *fname, *str;
+ int fname_len=-1;
+ int offset[2];
+
+ if (!PyArg_ParseTuple(args, "Oii", &function, &nin, &nout)) return NULL;
+
+ if (!PyCallable_Check(function)) {
+ PyErr_SetString(PyExc_TypeError, "function must be callable");
+ return NULL;
+ }
+
+ self = _pya_malloc(sizeof(PyUFuncObject));
+ if (self == NULL) return NULL;
+ PyObject_Init((PyObject *)self, &PyUFunc_Type);
+
+ self->userloops = NULL;
+ self->nin = nin;
+ self->nout = nout;
+ self->nargs = nin+nout;
+ self->identity = PyUFunc_None;
+ self->functions = pyfunc_functions;
+
+ self->ntypes = 1;
+ self->check_return = 0;
+
+ pyname = PyObject_GetAttrString(function, "__name__");
+ if (pyname)
+ (void) PyString_AsStringAndSize(pyname, &fname, &fname_len);
+
+ if (PyErr_Occurred()) {
+ fname = "?";
+ fname_len = 1;
+ PyErr_Clear();
+ }
+ Py_XDECREF(pyname);
+
+
+
+ /* self->ptr holds a pointer for enough memory for
+ self->data[0] (fdata)
+ self->data
+ self->name
+ self->types
+
+ To be safest, all of these need their memory aligned on void * pointers
+ Therefore, we may need to allocate extra space.
+ */
+ offset[0] = sizeof(PyUFunc_PyFuncData);
+ i = (sizeof(PyUFunc_PyFuncData) % sizeof(void *));
+ if (i) offset[0] += (sizeof(void *) - i);
+ offset[1] = self->nargs;
+ i = (self->nargs % sizeof(void *));
+ if (i) offset[1] += (sizeof(void *)-i);
+
+ self->ptr = _pya_malloc(offset[0] + offset[1] + sizeof(void *) + \
+ (fname_len+14));
+
+ if (self->ptr == NULL) return PyErr_NoMemory();
+ Py_INCREF(function);
+ self->obj = function;
+ fdata = (PyUFunc_PyFuncData *)(self->ptr);
+ fdata->nin = nin;
+ fdata->nout = nout;
+ fdata->callable = function;
+
+ self->data = (void **)(self->ptr + offset[0]);
+ self->data[0] = (void *)fdata;
+
+ self->types = (char *)self->data + sizeof(void *);
+ for (i=0; i<self->nargs; i++) self->types[i] = PyArray_OBJECT;
+
+ str = self->types + offset[1];
+ memcpy(str, fname, fname_len);
+ memcpy(str+fname_len, " (vectorized)", 14);
+
+ self->name = str;
+
+ /* Do a better job someday */
+ self->doc = "dynamic ufunc based on a python function";
+
+
+ return (PyObject *)self;
+}
+
+
+/*UFUNC_API*/
+static PyObject *
+PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void **data,
+ char *types, int ntypes,
+ int nin, int nout, int identity,
+ char *name, char *doc, int check_return)
+{
+ PyUFuncObject *self;
+
+ self = _pya_malloc(sizeof(PyUFuncObject));
+ if (self == NULL) return NULL;
+ PyObject_Init((PyObject *)self, &PyUFunc_Type);
+
+ self->nin = nin;
+ self->nout = nout;
+ self->nargs = nin+nout;
+ self->identity = identity;
+
+ self->functions = func;
+ self->data = data;
+ self->types = types;
+ self->ntypes = ntypes;
+ self->check_return = check_return;
+ self->ptr = NULL;
+ self->obj = NULL;
+ self->userloops=NULL;
+
+ if (name == NULL) self->name = "?";
+ else self->name = name;
+
+ if (doc == NULL) self->doc = "NULL";
+ else self->doc = doc;
+
+ return (PyObject *)self;
+}
+
+/*UFUNC_API*/
+static int
+PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
+ int usertype,
+ PyUFuncGenericFunction function,
+ void *data)
+{
+ PyArray_Descr *descr;
+ PyObject *key, *cobj;
+ int ret;
+
+ descr=PyArray_DescrFromType(usertype);
+ if ((usertype < PyArray_USERDEF) || (descr==NULL)) {
+ PyErr_SetString(PyExc_TypeError,
+ "unknown type");
+ return -1;
+ }
+ Py_DECREF(descr);
+
+ if (ufunc->userloops == NULL) {
+ ufunc->userloops = PyDict_New();
+ }
+ key = PyInt_FromLong((long) usertype);
+ if (key == NULL) return -1;
+ cobj = PyCObject_FromVoidPtr((void *)function, NULL);
+ if (cobj == NULL) {Py_DECREF(key); return -1;}
+ if (data == NULL) {
+ ret = PyDict_SetItem(ufunc->userloops, key, cobj);
+ Py_DECREF(cobj);
+ Py_DECREF(key);
+ return ret;
+ }
+ else {
+ PyObject *cobj2, *tmp;
+ cobj2 = PyCObject_FromVoidPtr(data, NULL);
+ if (cobj2 == NULL) {
+ Py_DECREF(cobj);
+ Py_DECREF(key);
+ return -1;
+ }
+ tmp=Py_BuildValue("NN", cobj, cobj2);
+ ret = PyDict_SetItem(ufunc->userloops, key, tmp);
+ Py_DECREF(tmp);
+ Py_DECREF(key);
+ return ret;
+ }
+}
+
+static void
+ufunc_dealloc(PyUFuncObject *self)
+{
+ if (self->ptr) _pya_free(self->ptr);
+ Py_XDECREF(self->userloops);
+ Py_XDECREF(self->obj);
+ _pya_free(self);
+}
+
+static PyObject *
+ufunc_repr(PyUFuncObject *self)
+{
+ char buf[100];
+
+ sprintf(buf, "<ufunc '%.50s'>", self->name);
+
+ return PyString_FromString(buf);
+}
+
+
+/* -------------------------------------------------------- */
+
+/* op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b)
+ where a has b.ndim NewAxis terms appended.
+
+ The result has dimensions a.ndim + b.ndim
+ */
+
+static PyObject *
+ufunc_outer(PyUFuncObject *self, PyObject *args)
+{
+ int i;
+ PyObject *ret;
+ PyArrayObject *ap1=NULL, *ap2=NULL, *ap_new=NULL;
+ PyObject *new_args, *tmp;
+ PyObject *shape1, *shape2, *newshape;
+
+ if(self->nin != 2) {
+ PyErr_SetString(PyExc_ValueError,
+ "outer product only supported "\
+ "for binary functions");
+ return NULL;
+ }
+
+ if (PySequence_Length(args) != 2) {
+ PyErr_SetString(PyExc_TypeError,
+ "exactly two arguments expected");
+ return NULL;
+ }
+
+ tmp = PySequence_GetItem(args, 0);
+ if (tmp == NULL) return NULL;
+ ap1 = (PyArrayObject *) \
+ PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0);
+ Py_DECREF(tmp);
+ if (ap1 == NULL) return NULL;
+
+ tmp = PySequence_GetItem(args, 1);
+ if (tmp == NULL) return NULL;
+ ap2 = (PyArrayObject *)PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0);
+ Py_DECREF(tmp);
+ if (ap2 == NULL) {Py_DECREF(ap1); return NULL;}
+
+ /* Construct new shape tuple */
+ shape1 = PyTuple_New(ap1->nd);
+ if (shape1 == NULL) goto fail;
+ for (i=0; i<ap1->nd; i++)
+ PyTuple_SET_ITEM(shape1, i,
+ PyLong_FromLongLong((longlong)ap1-> \
+ dimensions[i]));
+
+ shape2 = PyTuple_New(ap2->nd);
+ for (i=0; i<ap2->nd; i++)
+ PyTuple_SET_ITEM(shape2, i, PyInt_FromLong((long) 1));
+ if (shape2 == NULL) {Py_DECREF(shape1); goto fail;}
+ newshape = PyNumber_Add(shape1, shape2);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
+ if (newshape == NULL) goto fail;
+
+ ap_new = (PyArrayObject *)PyArray_Reshape(ap1, newshape);
+ Py_DECREF(newshape);
+ if (ap_new == NULL) goto fail;
+
+ new_args = Py_BuildValue("(OO)", ap_new, ap2);
+ Py_DECREF(ap1);
+ Py_DECREF(ap2);
+ Py_DECREF(ap_new);
+ ret = ufunc_generic_call(self, new_args);
+ Py_DECREF(new_args);
+ return ret;
+
+ fail:
+ Py_XDECREF(ap1);
+ Py_XDECREF(ap2);
+ Py_XDECREF(ap_new);
+ return NULL;
+
+}
+
+
+static PyObject *
+ufunc_reduce(PyUFuncObject *self, PyObject *args, PyObject *kwds)
+{
+
+ return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCE);
+}
+
+static PyObject *
+ufunc_accumulate(PyUFuncObject *self, PyObject *args, PyObject *kwds)
+{
+
+ return PyUFunc_GenericReduction(self, args, kwds, UFUNC_ACCUMULATE);
+}
+
+static PyObject *
+ufunc_reduceat(PyUFuncObject *self, PyObject *args, PyObject *kwds)
+{
+ return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCEAT);
+}
+
+
+static struct PyMethodDef ufunc_methods[] = {
+ {"reduce", (PyCFunction)ufunc_reduce, METH_VARARGS | METH_KEYWORDS},
+ {"accumulate", (PyCFunction)ufunc_accumulate,
+ METH_VARARGS | METH_KEYWORDS},
+ {"reduceat", (PyCFunction)ufunc_reduceat,
+ METH_VARARGS | METH_KEYWORDS},
+ {"outer", (PyCFunction)ufunc_outer, METH_VARARGS},
+ {NULL, NULL} /* sentinel */
+};
+
+
+
+/* construct the string
+ y1,y2,...,yn
+*/
+
+static void
+_makeargs(int num, char ltr, char *str)
+{
+ int ind=0;
+ int k;
+ static char *digits="123456789ABCDE";
+
+ if (num == 1) {
+ str[0] = ltr;
+ ind = 1;
+ }
+ else {
+ for (k=0; k<num; k++) {
+ str[3*k] = ltr;
+ str[3*k+1] = digits[k];
+ str[3*k+2] = ',';
+ }
+ /* overwrite last comma */
+ ind = 3*k-1;
+ }
+
+ str[ind] = '\0';
+ return;
+}
+
+static char
+_typecharfromnum(int num) {
+ PyArray_Descr *descr;
+ char ret;
+
+ descr = PyArray_DescrFromType(num);
+ ret = descr->type;
+ Py_DECREF(descr);
+ return ret;
+}
+
+static PyObject *
+ufunc_getattr(PyUFuncObject *self, char *name)
+{
+ PyObject *obj;
+ /* Put docstring first or FindMethod finds it...*/
+ /* could so some introspection on name and nin + nout */
+ /* to automate the first part of it */
+ /* the doc string shouldn't need the calling convention */
+ if (strcmp(name, "__doc__") == 0) {
+ static char doc[256];
+ static char tmp1[3*MAX_ARGS+2];
+ static char tmp2[3*MAX_ARGS+2];
+ /* construct
+ y1,y2,,... = name(x1,x2,...) __doc__
+ */
+ _makeargs(self->nout, 'y', tmp1);
+ _makeargs(self->nin, 'x', tmp2);
+ snprintf(doc, 256, "%s = %s(%s) %s", tmp1, self->name,
+ tmp2, self->doc);
+ return PyString_FromString(doc);
+ }
+ obj = Py_FindMethod(ufunc_methods, (PyObject *)self, name);
+ if (obj != NULL) return obj;
+ PyErr_Clear();
+ if (strcmp(name, "nin") == 0) {
+ return PyInt_FromLong(self->nin);
+ }
+ else if (strcmp(name, "nout") == 0) {
+ return PyInt_FromLong(self->nout);
+ }
+ else if (strcmp(name, "nargs") == 0) {
+ return PyInt_FromLong(self->nargs);
+ }
+ else if (strcmp(name, "ntypes") == 0) {
+ return PyInt_FromLong(self->ntypes);
+ }
+ else if (strcmp(name, "types") == 0) {
+ /* return a list with types grouped
+ input->output */
+ PyObject *list;
+ PyObject *str;
+ int k, j, n, nt=self->ntypes;
+ int ni = self->nin;
+ int no = self->nout;
+ char *t;
+ list = PyList_New(nt);
+ if (list == NULL) return NULL;
+ t = _pya_malloc(no+ni+2);
+ n = 0;
+ for (k=0; k<nt; k++) {
+ for (j=0; j<ni; j++) {
+ t[j] = _typecharfromnum(self->types[n]);
+ n++;
+ }
+ t[ni] = '-';
+ t[ni+1] = '>';
+ for (j=0; j<no; j++) {
+ t[ni+2+j] = \
+ _typecharfromnum(self->types[n]);
+ n++;
+ }
+ str = PyString_FromStringAndSize(t, no+ni+2);
+ PyList_SET_ITEM(list, k, str);
+ }
+ _pya_free(t);
+ return list;
+
+ }
+ else if (strcmp(name, "__name__") == 0) {
+ return PyString_FromString(self->name);
+ }
+ else if (strcmp(name, "identity") == 0) {
+ switch(self->identity) {
+ case PyUFunc_One:
+ return PyInt_FromLong(1);
+ case PyUFunc_Zero:
+ return PyInt_FromLong(0);
+ default:
+ Py_INCREF(Py_None);
+ return Py_None;
+ }
+ }
+ PyErr_SetString(PyExc_AttributeError, name);
+ return NULL;
+}
+
+#undef _typecharfromnum
+
+static int
+ufunc_setattr(PyUFuncObject *self, char *name, PyObject *v)
+{
+ return -1;
+}
+
+static char Ufunctype__doc__[] =
+ "Optimized functions make it possible to implement arithmetic "\
+ "with arrays efficiently";
+
+static PyTypeObject PyUFunc_Type = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "scipy.ufunc", /*tp_name*/
+ sizeof(PyUFuncObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)ufunc_dealloc, /*tp_dealloc*/
+ (printfunc)0, /*tp_print*/
+ (getattrfunc)ufunc_getattr, /*tp_getattr*/
+ (setattrfunc)ufunc_setattr, /*tp_setattr*/
+ (cmpfunc)0, /*tp_compare*/
+ (reprfunc)ufunc_repr, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ (hashfunc)0, /*tp_hash*/
+ (ternaryfunc)ufunc_generic_call, /*tp_call*/
+ (reprfunc)ufunc_repr, /*tp_str*/
+
+ /* Space for future expansion */
+ 0L,0L,0L,0L,
+ Ufunctype__doc__ /* Documentation string */
+};
+
+/* End of code for ufunc objects */
+/* -------------------------------------------------------- */
diff --git a/numpy/base/src/umathmodule.c.src b/numpy/base/src/umathmodule.c.src
new file mode 100644
index 000000000..5096f3361
--- /dev/null
+++ b/numpy/base/src/umathmodule.c.src
@@ -0,0 +1,1847 @@
+/* -*- c -*- */
+
+#include "Python.h"
+#include "scipy/arrayobject.h"
+#define _UMATHMODULE
+#include "scipy/ufuncobject.h"
+#include "abstract.h"
+#include <math.h>
+
+
+/* A whole slew of basic math functions are provided originally by Konrad Hinsen. */
+
+#if !defined(__STDC__) && !defined(_MSC_VER)
+extern double fmod (double, double);
+extern double frexp (double, int *);
+extern double ldexp (double, int);
+extern double modf (double, double *);
+#endif
+#ifndef M_PI
+#define M_PI 3.14159265358979323846264338328
+#endif
+
+#ifndef HAVE_INVERSE_HYPERBOLIC
+static double acosh(double x)
+{
+ return log(x + sqrt((x-1.0)*(x+1.0)));
+}
+
+static double asinh(double xx)
+{
+ double x;
+ int sign;
+ if (xx < 0.0) {
+ sign = -1;
+ x = -xx;
+ }
+ else {
+ sign = 1;
+ x = xx;
+ }
+ return sign*log(x + sqrt(x*x+1.0));
+}
+
+static double atanh(double x)
+{
+ return 0.5*log((1.0+x)/(1.0-x));
+}
+#endif
+
+#ifdef HAVE_HYPOT
+#if !defined(NeXT) && !defined(_MSC_VER)
+extern double hypot(double, double);
+#endif
+#else
+double hypot(double x, double y)
+{
+ double yx;
+
+ x = fabs(x);
+ y = fabs(y);
+ if (x < y) {
+ double temp = x;
+ x = y;
+ y = temp;
+ }
+ if (x == 0.)
+ return 0.;
+ else {
+ yx = y/x;
+ return x*sqrt(1.+yx*yx);
+ }
+}
+#endif
+
+
+
+/* Define isnan, isinf, isfinite, signbit if needed */
+/* Use fpclassify if possible */
+/* isnan, isinf --
+ these will use macros and then fpclassify if available before
+ defaulting to a dumb convert-to-double version...
+
+ isfinite -- define a macro if not already available
+ signbit -- if macro available use it, otherwise define a function
+ and a dumb convert-to-double version for other types.
+*/
+
+#if defined(fpclassify)
+
+#if !defined(isnan)
+#define isnan(x) (fpclassify(x) == FP_NAN)
+#endif
+#if !defined(isinf)
+#define isinf(x) (fpclassify(x) == FP_INFINITE)
+#endif
+
+#else /* check to see if already have a function like this */
+
+#if !defined(HAVE_ISNAN)
+
+#if !defined(isnan)
+#include "_isnan.c"
+#endif
+#endif /* HAVE_ISNAN */
+
+#if !defined(HAVE_ISINF)
+#if !defined(isinf)
+#define isinf(x) (!isnan((x)) && isnan((x)-(x)))
+#endif
+#endif /* HAVE_ISINF */
+
+#endif /* defined(fpclassify) */
+
+
+/* Define signbit if needed */
+#if !defined(signbit)
+#include "_signbit.c"
+#endif
+
+
+/* Now defined the extended type macros */
+
+#if !defined(isnan)
+
+#if !defined(HAVE_LONGDOUBLE_FUNCS) || !defined(HAVE_ISNAN)
+#define isnanl(x) isnan((double)(x))
+#endif
+
+#if !defined(HAVE_FLOAT_FUNCS) || !defined(HAVE_ISNAN)
+#define isnanf(x) isnan((double)(x))
+#endif
+
+#else /* !defined(isnan) */
+
+#define isnanl(x) isnan((x))
+#define isnanf(x) isnan((x))
+
+#endif /* !defined(isnan) */
+
+
+#if !defined(isinf)
+
+#if !defined(HAVE_LONGDOUBLE_FUNCS) || !defined(HAVE_ISINF)
+#define isinfl(x) (!isnanl((x)) && isnanl((x)-(x)))
+#endif
+
+#if !defined(HAVE_FLOAT_FUNCS) || !defined(HAVE_ISINF)
+#define isinff(x) (!isnanf((x)) && isnanf((x)-(x)))
+#endif
+
+#else /* !defined(isinf) */
+
+#define isinfl(x) isinf((x))
+#define isinff(x) isinf((x))
+
+#endif /* !defined(isinf) */
+
+
+#if !defined(signbit)
+#define signbitl(x) ((longdouble) signbit((double)(x)))
+#define signbitf(x) ((float) signbit((double) (x)))
+#else
+#define signbitl(x) signbit((x))
+#define signbitf(x) signbit((x))
+#endif
+
+#if !defined(isfinite)
+#define isfinite(x) (!(isinf((x)) || isnan((x))))
+#endif
+#define isfinitef(x) (!(isinff((x)) || isnanf((x))))
+#define isfinitel(x) (!(isinfl((x)) || isnanl((x))))
+
+
+/* First, the C functions that do the real work */
+
+/* if C99 extensions not availble
+
+then define dummy functions that use the double versions for
+
+sin, cos, tan
+sinh, cosh, tanh,
+fabs, floor, ceil, fmod, sqrt, log10, log, exp, fabs
+asin, acos, atan,
+asinh, acosh, atanh
+
+hypot, atan2, pow
+
+*/
+
+/**begin repeat
+
+#kind=(sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,sqrt,log10,log,exp,asin,acos,atan)*2#
+#typ=longdouble*16, float*16#
+#c=l*16,f*16#
+#TYPE=LONGDOUBLE*16, FLOAT*16#
+*/
+#ifndef HAVE_@TYPE@_FUNCS
+@typ@ @kind@@c@(@typ@ x) {
+ return (@typ@) @kind@((double)x);
+}
+#endif
+/**end repeat**/
+
+/**begin repeat
+
+#kind=(atan2,hypot,pow,fmod)*2#
+#typ=longdouble*4, float*4#
+#c=l*4,f*4#
+#TYPE=LONGDOUBLE*4,FLOAT*4#
+*/
+#ifndef HAVE_@TYPE@_FUNCS
+@typ@ @kind@@c@(@typ@ x, @typ@ y) {
+ return (@typ@) @kind@((double)x, (double) y);
+}
+#endif
+/**end repeat**/
+
+/**begin repeat
+#kind=modf*2#
+#typ=longdouble, float#
+#c=l,f#
+#TYPE=LONGDOUBLE, FLOAT#
+*/
+#ifndef HAVE_@TYPE@_FUNCS
+@typ@ modf@c@(@typ@ x, @typ@ *iptr) {
+ double nx, niptr, y;
+ nx = (double) x;
+ y = modf(nx, &niptr);
+ *iptr = (@typ@) niptr;
+ return (@typ@) y;
+}
+#endif
+/**end repeat**/
+
+
+#if !defined(HAVE_INVERSE_HYPERBOLIC_FLOAT)
+#ifdef HAVE_FLOAT_FUNCS
+static float acoshf(float x)
+{
+ return logf(x + sqrtf((x-1.0)*(x+1.0)));
+}
+
+static float asinhf(float xx)
+{
+ float x;
+ int sign;
+ if (xx < 0.0) {
+ sign = -1;
+ x = -xx;
+ }
+ else {
+ sign = 1;
+ x = xx;
+ }
+ return sign*logf(x + sqrtf(x*x+1.0));
+}
+
+static float atanhf(float x)
+{
+ return 0.5*logf((1.0+x)/(1.0-x));
+}
+#else
+static float acoshf(float x)
+{
+ return (float)acosh((double)(x));
+}
+
+static float asinhf(float x)
+{
+ return (float)asinh((double)(x));
+}
+
+static float atanhf(float x)
+{
+ return (float)atanh((double)(x));
+}
+#endif
+#endif
+
+
+#if !defined(HAVE_INVERSE_HYPERBOLIC_LONGDOUBLE)
+#ifdef HAVE_LONGDOUBLE_FUNCS
+static longdouble acoshl(longdouble x)
+{
+ return logl(x + sqrtl((x-1.0)*(x+1.0)));
+}
+
+static longdouble asinhl(longdouble xx)
+{
+ longdouble x;
+ int sign;
+ if (xx < 0.0) {
+ sign = -1;
+ x = -xx;
+ }
+ else {
+ sign = 1;
+ x = xx;
+ }
+ return sign*logl(x + sqrtl(x*x+1.0));
+}
+
+static longdouble atanhl(longdouble x)
+{
+ return 0.5*logl((1.0+x)/(1.0-x));
+}
+#else
+static longdouble acoshl(longdouble x)
+{
+ return (longdouble)acosh((double)(x));
+}
+
+static longdouble asinhl(longdouble x)
+{
+ return (longdouble)asinh((double)(x));
+}
+
+static longdouble atanhl(longdouble x)
+{
+ return (longdouble)atanh((double)(x));
+}
+#endif
+#endif
+
+
+
+
+/* Don't pass structures between functions (only pointers) because how
+ structures are passed is compiler dependent and could cause
+ segfaults if ufuncobject.c is compiled with a different compiler
+ than an extension that makes use of the UFUNC API
+*/
+
+/**begin repeat
+
+#typ=float, double, longdouble#
+#c=f,,l#
+*/
+
+/* constants */
+static c@typ@ nc_1@c@ = {1., 0.};
+static c@typ@ nc_half@c@ = {0.5, 0.};
+static c@typ@ nc_i@c@ = {0., 1.};
+static c@typ@ nc_i2@c@ = {0., 0.5};
+/*
+static c@typ@ nc_mi@c@ = {0., -1.};
+static c@typ@ nc_pi2@c@ = {M_PI/2., 0.};
+*/
+
+static void
+nc_sum@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r)
+{
+ r->real = a->real + b->real;
+ r->imag = a->imag + b->imag;
+ return;
+}
+
+static void
+nc_diff@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r)
+{
+ r->real = a->real - b->real;
+ r->imag = a->imag - b->imag;
+ return;
+}
+
+static void
+nc_neg@c@(c@typ@ *a, c@typ@ *r)
+{
+ r->real = -a->real;
+ r->imag = -a->imag;
+ return;
+}
+
+static void
+nc_prod@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r)
+{
+ @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag;
+ r->real = ar*br - ai*bi;
+ r->imag = ar*bi + ai*br;
+ return;
+}
+
+static void
+nc_quot@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r)
+{
+
+ @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag;
+ @typ@ d = br*br + bi*bi;
+ r->real = (ar*br + ai*bi)/d;
+ r->imag = (ai*br - ar*bi)/d;
+ return;
+}
+
+static void
+nc_floor_quot@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r)
+{
+ @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag;
+ @typ@ d = br*br + bi*bi;
+ r->real = floor@c@((ar*br + ai*bi)/d);
+ r->imag = 0;
+ return;
+}
+
+static void
+nc_sqrt@c@(c@typ@ *x, c@typ@ *r)
+{
+ @typ@ s,d;
+ if (x->real == 0. && x->imag == 0.)
+ *r = *x;
+ else {
+ s = sqrt@c@(0.5*(fabs@c@(x->real) + hypot@c@(x->real,x->imag)));
+ d = 0.5*x->imag/s;
+ if (x->real > 0.) {
+ r->real = s;
+ r->imag = d;
+ }
+ else if (x->imag >= 0.) {
+ r->real = d;
+ r->imag = s;
+ }
+ else {
+ r->real = -d;
+ r->imag = -s;
+ }
+ }
+ return;
+}
+
+static void
+nc_log@c@(c@typ@ *x, c@typ@ *r)
+{
+ @typ@ l = hypot@c@(x->real,x->imag);
+ r->imag = atan2@c@(x->imag, x->real);
+ r->real = log@c@(l);
+ return;
+}
+
+static void
+nc_exp@c@(c@typ@ *x, c@typ@ *r)
+{
+ @typ@ a = exp@c@(x->real);
+ r->real = a*cos@c@(x->imag);
+ r->imag = a*sin@c@(x->imag);
+ return;
+}
+
+static void
+nc_pow@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r)
+{
+ @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag;
+
+ if (br == 0. && bi == 0.) {
+ r->real = 1.;
+ r->imag = 0.;
+ }
+ else if (ar == 0. && ai == 0.) {
+ r->real = 0.;
+ r->imag = 0.;
+ }
+ else {
+ nc_log@c@(a, r);
+ nc_prod@c@(r, b, r);
+ nc_exp@c@(r, r);
+ }
+ return;
+}
+
+
+static void
+nc_prodi@c@(c@typ@ *x, c@typ@ *r)
+{
+ r->real = -x->imag;
+ r->imag = x->real;
+ return;
+}
+
+
+static void
+nc_acos@c@(c@typ@ *x, c@typ@ *r)
+{
+ nc_prod@c@(x,x,r);
+ nc_diff@c@(&nc_1@c@, r, r);
+ nc_sqrt@c@(r, r);
+ nc_prodi@c@(r, r);
+ nc_sum@c@(x, r, r);
+ nc_log@c@(r, r);
+ nc_prodi@c@(r, r);
+ nc_neg@c@(r, r);
+ return;
+ /* return nc_neg(nc_prodi(nc_log(nc_sum(x,nc_prod(nc_i,
+ nc_sqrt(nc_diff(nc_1,nc_prod(x,x))))))));
+ */
+}
+
+static void
+nc_acosh@c@(c@typ@ *x, c@typ@ *r)
+{
+ nc_prod@c@(x, x, r);
+ nc_diff@c@(&nc_1@c@, r, r);
+ nc_sqrt@c@(r, r);
+ nc_prodi@c@(r, r);
+ nc_sum@c@(x, r, r);
+ nc_log@c@(r, r);
+ return;
+ /*
+ return nc_log(nc_sum(x,nc_prod(nc_i,
+ nc_sqrt(nc_diff(nc_1,nc_prod(x,x))))));
+ */
+}
+
+static void
+nc_asin@c@(c@typ@ *x, c@typ@ *r)
+{
+ c@typ@ a, *pa=&a;
+ nc_prod@c@(x, x, r);
+ nc_diff@c@(&nc_1@c@, r, r);
+ nc_sqrt@c@(r, r);
+ nc_prodi@c@(x, pa);
+ nc_sum@c@(pa, r, r);
+ nc_log@c@(r, r);
+ nc_prodi@c@(r, r);
+ nc_neg@c@(r, r);
+ return;
+ /*
+ return nc_neg(nc_prodi(nc_log(nc_sum(nc_prod(nc_i,x),
+ nc_sqrt(nc_diff(nc_1,nc_prod(x,x)))))));
+ */
+}
+
+
+static void
+nc_asinh@c@(c@typ@ *x, c@typ@ *r)
+{
+ nc_prod@c@(x, x, r);
+ nc_sum@c@(&nc_1@c@, r, r);
+ nc_sqrt@c@(r, r);
+ nc_diff@c@(r, x, r);
+ nc_log@c@(r, r);
+ nc_neg@c@(r, r);
+ return;
+ /*
+ return nc_neg(nc_log(nc_diff(nc_sqrt(nc_sum(nc_1,nc_prod(x,x))),x)));
+ */
+}
+
+static void
+nc_atan@c@(c@typ@ *x, c@typ@ *r)
+{
+ c@typ@ a, *pa=&a;
+ nc_diff@c@(&nc_i@c@, x, pa);
+ nc_sum@c@(&nc_i@c@, x, r);
+ nc_quot@c@(r, pa, r);
+ nc_log@c@(r,r);
+ nc_prod@c@(&nc_i2@c@, r, r);
+ return;
+ /*
+ return nc_prod(nc_i2,nc_log(nc_quot(nc_sum(nc_i,x),nc_diff(nc_i,x))));
+ */
+}
+
+static void
+nc_atanh@c@(c@typ@ *x, c@typ@ *r)
+{
+ c@typ@ a, *pa=&a;
+ nc_diff@c@(&nc_1@c@, x, r);
+ nc_sum@c@(&nc_1@c@, x, pa);
+ nc_quot@c@(pa, r, r);
+ nc_log@c@(r, r);
+ nc_prod@c@(&nc_half@c@, r, r);
+ return;
+ /*
+ return nc_prod(nc_half,nc_log(nc_quot(nc_sum(nc_1,x),nc_diff(nc_1,x))));
+ */
+}
+
+static void
+nc_cos@c@(c@typ@ *x, c@typ@ *r)
+{
+ @typ@ xr=x->real, xi=x->imag;
+ r->real = cos@c@(xr)*cosh@c@(xi);
+ r->imag = -sin@c@(xr)*sinh@c@(xi);
+ return;
+}
+
+static void
+nc_cosh@c@(c@typ@ *x, c@typ@ *r)
+{
+ @typ@ xr=x->real, xi=x->imag;
+ r->real = cos(xi)*cosh(xr);
+ r->imag = sin(xi)*sinh(xr);
+ return;
+}
+
+
+#define M_LOG10_E 0.434294481903251827651128918916605082294397
+
+static void
+nc_log10@c@(c@typ@ *x, c@typ@ *r)
+{
+ nc_log@c@(x, r);
+ r->real *= M_LOG10_E;
+ r->imag *= M_LOG10_E;
+ return;
+}
+
+static void
+nc_sin@c@(c@typ@ *x, c@typ@ *r)
+{
+ @typ@ xr=x->real, xi=x->imag;
+ r->real = sin@c@(xr)*cosh@c@(xi);
+ r->imag = cos@c@(xr)*sinh@c@(xi);
+ return;
+}
+
+static void
+nc_sinh@c@(c@typ@ *x, c@typ@ *r)
+{
+ @typ@ xr=x->real, xi=x->imag;
+ r->real = cos@c@(xi)*sinh@c@(xr);
+ r->imag = sin@c@(xi)*cosh@c@(xr);
+ return;
+}
+
+static void
+nc_tan@c@(c@typ@ *x, c@typ@ *r)
+{
+ @typ@ sr,cr,shi,chi;
+ @typ@ rs,is,rc,ic;
+ @typ@ d;
+ @typ@ xr=x->real, xi=x->imag;
+ sr = sin@c@(xr);
+ cr = cos@c@(xr);
+ shi = sinh(xi);
+ chi = cosh(xi);
+ rs = sr*chi;
+ is = cr*shi;
+ rc = cr*chi;
+ ic = -sr*shi;
+ d = rc*rc + ic*ic;
+ r->real = (rs*rc+is*ic)/d;
+ r->imag = (is*rc-rs*ic)/d;
+ return;
+}
+
+static void
+nc_tanh@c@(c@typ@ *x, c@typ@ *r)
+{
+ @typ@ si,ci,shr,chr;
+ @typ@ rs,is,rc,ic;
+ @typ@ d;
+ @typ@ xr=x->real, xi=x->imag;
+ si = sin@c@(xi);
+ ci = cos@c@(xi);
+ shr = sinh@c@(xr);
+ chr = cosh@c@(xr);
+ rs = ci*shr;
+ is = si*chr;
+ rc = ci*chr;
+ ic = si*shr;
+ d = rc*rc + ic*ic;
+ r->real = (rs*rc+is*ic)/d;
+ r->imag = (is*rc-rs*ic)/d;
+ return;
+}
+
+/**end repeat**/
+
+
+/**begin repeat
+
+#TYPE=(BOOL, BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*2#
+#OP=||, +*13, ^, -*13#
+#kind=add*14, subtract*14#
+#typ=(Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*2#
+*/
+
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((@typ@ *)op)=*((@typ@ *)i1) @OP@ *((@typ@ *)i2);
+ }
+}
+
+/**end repeat**/
+
+/**begin repeat
+
+#TYPE=(CFLOAT, CDOUBLE, CLONGDOUBLE)*2#
+#OP=+*3,-*3#
+#kind=add*3,subtract*3#
+#typ=(float, double, longdouble)*2#
+
+*/
+
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ ((@typ@ *)op)[0]=((@typ@ *)i1)[0] @OP@ ((@typ@ *)i2)[0];
+ ((@typ@ *)op)[1]=((@typ@ *)i1)[1] @OP@ ((@typ@ *)i2)[1];
+ }
+}
+
+/**end repeat**/
+
+
+/** Routines borrowed from numarray **/
+
+/* The following routine is used in the event of a detected integer *
+** divide by zero so that a floating divide by zero is generated. *
+** This is done since Numeric uses the floating point exception *
+** sticky bits to detect errors. The last bit is an attempt to *
+** prevent optimization of the divide by zero away, the output value *
+** should always be 0 *
+*/
+
+/* These should really be altered to just set the corresponding bit
+ in the floating point status flag. Need to figure out how to do that
+ on all the platforms...
+*/
+
+static int numeric_zero = 0.0;
+
+#if !defined(generate_divbyzero_error)
+static void generate_divbyzero_error(void) {
+ double dummy;
+ dummy = 1./numeric_zero;
+ return;
+}
+#endif
+
+#if !defined(generate_overflow_error)
+static double numeric_two = 2.0;
+static void generate_overflow_error(void) {
+ double dummy;
+ dummy = pow(numeric_two,1000);
+ return;
+}
+#endif
+
+
+static int ulonglong_overflow(ulonglong a, ulonglong b)
+{
+ ulonglong ah, al, bh, bl, w, x, y, z;
+
+#if SIZEOF_LONGLONG == 64
+ ah = (a >> 32);
+ al = (a & 0xFFFFFFFFL);
+ bh = (b >> 32);
+ bl = (b & 0xFFFFFFFFL);
+#elif SIZEOF_LONGLONG == 128
+ ah = (a >> 64);
+ al = (a & 0xFFFFFFFFFFFFFFFFL);
+ bh = (b >> 64);
+ bl = (b & 0xFFFFFFFFFFFFFFFFL);
+#else
+ ah = al = bh = bl = 0;
+#endif
+
+ /* 128-bit product: z*2**64 + (x+y)*2**32 + w */
+ w = al*bl;
+ x = bh*al;
+ y = ah*bl;
+ z = ah*bh;
+
+ /* *c = ((x + y)<<32) + w; */
+#if SIZEOF_LONGLONG == 64
+ return z || (x>>32) || (y>>32) ||
+ (((x & 0xFFFFFFFFL) + (y & 0xFFFFFFFFL) + (w >> 32)) >> 32);
+#elif SIZEOF_LONGLONG == 128
+ return z || (x>>64) || (y>>64) ||
+ (((x & 0xFFFFFFFFFFFFFFFFL) + (y & 0xFFFFFFFFFFFFFFFFL) + (w >> 64)) >> 64);
+#else
+ return 0;
+#endif
+
+}
+
+static int slonglong_overflow(longlong a0, longlong b0)
+{
+ ulonglong a, b;
+ ulonglong ah, al, bh, bl, w, x, y, z;
+
+ /* Convert to non-negative quantities */
+ if (a0 < 0) { a = -a0; } else { a = a0; }
+ if (b0 < 0) { b = -b0; } else { b = b0; }
+
+
+#if SIZEOF_LONGLONG == 64
+ ah = (a >> 32);
+ al = (a & 0xFFFFFFFFL);
+ bh = (b >> 32);
+ bl = (b & 0xFFFFFFFFL);
+#elif SIZEOF_LONGLONG == 128
+ ah = (a >> 64);
+ al = (a & 0xFFFFFFFFFFFFFFFFL);
+ bh = (b >> 64);
+ bl = (b & 0xFFFFFFFFFFFFFFFFL);
+#else
+ ah = al = bh = bl = 0;
+#endif
+
+ w = al*bl;
+ x = bh*al;
+ y = ah*bl;
+ z = ah*bh;
+
+ /*
+ ulonglong c = ((x + y)<<32) + w;
+ if ((a0 < 0) ^ (b0 < 0))
+ *c = -c;
+ else
+ *c = c
+ */
+
+#if SIZEOF_LONGLONG == 64
+ return z || (x>>31) || (y>>31) ||
+ (((x & 0xFFFFFFFFL) + (y & 0xFFFFFFFFL) + (w >> 32)) >> 31);
+#elif SIZEOF_LONGLONG == 128
+ return z || (x>>63) || (y>>63) ||
+ (((x & 0xFFFFFFFFFFFFFFFFL) + (y & 0xFFFFFFFFFFFFFFFFL) + (w >> 64)) >> 63);
+#else
+ return 0;
+#endif
+}
+
+/** end direct numarray code **/
+
+static void
+BOOL_multiply(char **args, intp *dimensions, intp *steps, void *func) {
+ register intp i;
+ intp is1=steps[0], is2=steps[1], os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for (i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((Bool *)op) = *((Bool *)i1) && *((Bool *)i2);
+ }
+}
+
+/**begin repeat
+
+#TYP= UBYTE,USHORT,UINT, ULONG#
+#typ= ubyte, ushort, uint, ulong#
+#bigtyp= int, int, double, double#
+*/
+
+static void
+@TYP@_multiply(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0], is2=steps[1], os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ @bigtyp@ temp;
+ for (i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ temp = (@bigtyp@)(*((@typ@ *)i1)) * (@bigtyp@)(*((@typ@ *)i2));
+ if (temp > MAX_@TYP@)
+ generate_overflow_error();
+ *((@typ@ *)op) = temp;
+ }
+}
+
+/**end repeat**/
+
+static void
+ULONGLONG_multiply(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0], is2=steps[1], os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ ulonglong temp;
+ for (i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ temp = *((ulonglong *)i1) * *((ulonglong *)i2);
+ if (ulonglong_overflow(*((ulonglong *)i1), *((ulonglong *)i2)))
+ generate_overflow_error();
+ *((ulonglong *)op) = temp;
+ }
+}
+
+/**begin repeat
+
+#TYP= BYTE,SHORT,INT, LONG#
+#typ= byte, short, int, long#
+#bigtyp= int, int, double, double#
+*/
+
+static void
+@TYP@_multiply(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0], is2=steps[1], os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ @bigtyp@ temp;
+ for (i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ temp = (@bigtyp@)*((@typ@ *)i1) * (@bigtyp@)*((@typ@ *)i2);
+ if (temp > MAX_@TYP@)
+ generate_overflow_error();
+ else if (temp < MIN_@TYP@)
+ generate_overflow_error();
+ *((@typ@ *)op) = temp;
+ }
+}
+
+/**end repeat**/
+
+static void
+LONGLONG_multiply(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0], is2=steps[1], os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ longlong temp;
+ for (i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ temp = *((longlong *)i1) * *((longlong *)i2);
+ if (slonglong_overflow(*((longlong *)i1), *((longlong *)i2)))
+ generate_overflow_error();
+ *((longlong *)op) = temp;
+ }
+}
+
+
+/**begin repeat
+
+#TYP=FLOAT,DOUBLE,LONGDOUBLE#
+#typ=float,double,longdouble#
+*/
+static void
+@TYP@_multiply(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ /* fprintf(stderr, "Multiplying %d elements of type @typ@\n", n);
+ fprintf(stderr, "args= %p, %p, %p\n", i1, i2, op);
+ fprintf(stderr, "steps=%d, %d, %d\n", is1, is2, os); */
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((@typ@ *)op)=*((@typ@ *)i1) * *((@typ@ *)i2);
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+
+#TYP=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG#
+#typ=char, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong#
+#otyp=float*4, double*6#
+*/
+static void
+@TYP@_divide(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ if (*((@typ@ *)i2)==0) {
+ generate_divbyzero_error();
+ *((@typ@ *)op)=0;
+ }
+ else {
+ *((@typ@ *)op)= *((@typ@ *)i1) / *((@typ@ *)i2);
+ }
+ }
+}
+static void
+@TYP@_true_divide(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ if (*((@typ@ *)i2)==0) {
+ generate_divbyzero_error();
+ *((@otyp@ *)op)=0;
+ }
+ else {
+ *((@otyp@ *)op)= \
+ *((@typ@ *)i1) / (double)*((@typ@ *)i2);
+ }
+ }
+}
+#define @TYP@_floor_divide @TYP@_divide
+/**end repeat**/
+
+/**begin repeat
+
+#TYP=(FLOAT,DOUBLE,LONGDOUBLE)*2#
+#typ=(float,double,longdouble)*2#
+#kind=divide*3, true_divide*3#
+*/
+static void
+@TYP@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((@typ@ *)op)=*((@typ@ *)i1) / *((@typ@ *)i2);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#TYP=FLOAT,DOUBLE,LONGDOUBLE#
+#typ=float,double,longdouble#
+#c=f,,l#
+*/
+static void
+@TYP@_floor_divide(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((@typ@ *)op)=floor@c@(*((@typ@ *)i1) / *((@typ@ *)i2));
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+
+#TYP=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG#
+#typ=char, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong#
+#btyp=float*4, double*6#
+*/
+static void
+@TYP@_power(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, is1=steps[0],is2=steps[1];
+ register intp os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ @btyp@ x, y, v;
+ @typ@ z;
+
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ x = *((@typ@ *)i1);
+ y = *((@typ@ *)i2);
+ z = (@typ@) y;
+ if ((x < 0.0) && (y != z)) v = 1.0/numeric_zero;
+ else v = pow(x,y);
+ *((@typ@ *)op) = (@typ@) v;
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#TYP=UBYTE, BYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE#
+#typ=ubyte, char, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble#
+*/
+static void
+@TYP@_conjugate(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, is1=steps[0], os=steps[1], n=dimensions[0];
+ char *i1=args[0], *op=args[1];
+ for(i=0; i<n; i++, i1+=is1, op+=os) {
+ *((@typ@ *)op)=*((@typ@ *)i1);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#TYP=CFLOAT, CDOUBLE, CLONGDOUBLE#
+#typ=float, double, longdouble#
+*/
+static void
+@TYP@_conjugate(char **args, intp *dimensions, intp *steps, void *func) {
+ register intp i, is1=steps[0], os=steps[1], n=dimensions[0];
+ char *i1=args[0], *op=args[1];
+
+ for(i=0; i<n; i++, i1+=is1, op+=os) {
+ ((@typ@ *)op)[0]=((@typ@ *)i1)[0];
+ ((@typ@ *)op)[1]=-(((@typ@ *)i1)[1]);
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+
+#TYPE=BOOL,UBYTE,USHORT,UINT,ULONG,ULONGLONG#
+#typ=Bool, ubyte, ushort, uint, ulong, ulonglong#
+*/
+static void
+@TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, n;
+ intp is1=steps[0], os=steps[1];
+ char *i1=args[0], *op=args[1];
+
+ n=dimensions[0];
+
+ for(i=0; i<n; i++, i1+=is1, op+=os) {
+ *((@typ@ *)op) = *((@typ@*)i1);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#TYPE=BYTE,SHORT,INT,LONG,LONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#typ=byte, short, int, long, longlong, float, double, longdouble#
+*/
+static void
+@TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, n;
+ intp is1=steps[0], os=steps[1];
+ char *i1=args[0], *op=args[1];
+
+ n=dimensions[0];
+ for(i=0; i<n; i++, i1+=is1, op+=os) {
+ *((@typ@ *)op) = *((@typ@ *)i1) < 0 ? -*((@typ@ *)i1) : *((@typ@ *)i1);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+ #TYPE=CFLOAT,CDOUBLE,CLONGDOUBLE#
+ #typ= float, double, longdouble#
+ #c= f,,l#
+*/
+static void
+@TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i, n;
+ register intp is1=steps[0], os=steps[1];
+ char *i1=args[0], *op=args[1];
+ n=dimensions[0];
+ for(i=0; i<n; i++, i1+=is1, op+=os) {
+ *((@typ@ *)op) = (@typ@)sqrt@c@(((@typ@ *)i1)[0]*((@typ@ *)i1)[0] + ((@typ@ *)i1)[1]*((@typ@ *)i1)[1]);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#kind=greater, greater_equal, less, less_equal, equal, not_equal, logical_and, logical_or, bitwise_and, bitwise_or, bitwise_xor#
+#OP=>, >=, <, <=, ==, !=, &&, ||, &, |, ^#
+**/
+static void
+BOOL_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ Bool in1, in2;
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ in1 = (*((Bool *)i1) != 0);
+ in2 = (*((Bool *)i2) != 0);
+ *((Bool *)op)= in1 @OP@ in2;
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#TYPE=(BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*4#
+#OP= >*13, >=*13, <*13, <=*13#
+#typ=(byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*4#
+#kind= greater*13, greater_equal*13, less*13, less_equal*13#
+*/
+
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((Bool *)op)=*((@typ@ *)i1) @OP@ *((@typ@ *)i2);
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+#TYPE=(CFLOAT,CDOUBLE,CLONGDOUBLE)*4#
+#OP= >*3, >=*3, <*3, <=*3#
+#typ=(cfloat, cdouble, clongdouble)*4#
+#kind= greater*3, greater_equal*3, less*3, less_equal*3#
+*/
+
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ if (((@typ@ *)i1)->real == ((@typ@ *)i2)->real)
+ *((Bool *)op)=((@typ@ *)i1)->imag @OP@ \
+ ((@typ@ *)i2)->imag;
+ else
+ *((Bool *)op)=((@typ@ *)i1)->real @OP@ \
+ ((@typ@ *)i2)->real;
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+#TYPE=(BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*4#
+#typ=(byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*4#
+#OP= ==*13, !=*13, &&*13, ||*13#
+#kind=equal*13, not_equal*13, logical_and*13, logical_or*13#
+*/
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((Bool *)op) = *((@typ@ *)i1) @OP@ *((@typ@ *)i2);
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+
+#TYPE=(CFLOAT, CDOUBLE, CLONGDOUBLE)*4#
+#typ=(float, double, longdouble)*4#
+#OP= ==*3, !=*3, &&*3, ||*3#
+#OP2= &&*3, ||*3, &&*3, ||*3#
+#kind=equal*3, not_equal*3, logical_and*3, logical_or*3#
+*/
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((Bool *)op) = (*((@typ@ *)i1) @OP@ *((@typ@ *)i2)) @OP2@ (*((@typ@ *)i1+1) @OP@ *((@typ@ *)i2+1));
+ }
+}
+/**end repeat**/
+
+
+/** OBJECT comparison for OBJECT arrays **/
+
+/**begin repeat
+
+#kind=greater, greater_equal, less, less_equal, equal, not_equal#
+#op=GT, GE, LT, LE, EQ, NE#
+*/
+static void
+OBJECT_@kind@(char **args, intp *dimensions, intp *steps, void *func) {
+ register intp i, is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((Bool *)op)=PyObject_RichCompareBool(*((PyObject **)i1),
+ *((PyObject **)i2),
+ Py_@op@);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#TYPE=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#typ=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble#
+*/
+static void
+@TYPE@_negative(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],os=steps[1], n=dimensions[0];
+ char *i1=args[0], *op=args[1];
+ for(i=0; i<n; i++, i1+=is1, op+=os) {
+ *((@typ@ *)op) = - *((@typ@ *)i1);
+ }
+}
+/**end repeat**/
+
+#define BOOL_negative BOOL_logical_not
+
+
+/**begin repeat
+#TYPE=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#typ=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble#
+*/
+static void
+@TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],os=steps[1], n=dimensions[0];
+ char *i1=args[0], *op=args[1];
+ for(i=0; i<n; i++, i1+=is1, op+=os) {
+ *((Bool *)op) = ! *((@typ@ *)i1);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+#TYPE=CFLOAT,CDOUBLE,CLONGDOUBLE#
+#typ=cfloat, cdouble, clongdouble#
+*/
+static void
+@TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],os=steps[1], n=dimensions[0];
+ char *i1=args[0], *op=args[1];
+ for(i=0; i<n; i++, i1+=is1, op+=os) {
+ *((Bool *)op) = ! (((@typ@ *)i1)->real || \
+ ((@typ@ *)i1)->imag);
+ }
+}
+/**end repeat**/
+
+
+
+
+/**begin repeat
+#TYPE=BYTE,SHORT,INT,LONG,LONGLONG#
+#typ=byte, short, int, long, longlong#
+#ftyp=float*2,double*2,longdouble*1#
+#c=f*2,,,l*1#
+*/
+static void
+@TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ register @typ@ ix,iy, tmp;
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ ix = *((@typ@ *)i1);
+ iy = *((@typ@ *)i2);
+ if (iy == 0 || ix == 0) {
+ if (iy == 0) generate_divbyzero_error();
+ *((@typ@ *)op) = 0;
+ }
+ else if ((ix > 0) == (iy > 0)) {
+ *((@typ@ *)op) = ix % iy;
+ }
+ else { /* handle mixed case the way Python does */
+ tmp = ix % iy;
+ if (tmp) tmp += iy;
+ *((@typ@ *)op)= tmp;
+ }
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+#TYPE=UBYTE,USHORT,UINT,ULONG,ULONGLONG#
+#typ=ubyte, ushort, uint, ulong, ulonglong#
+*/
+static void
+@TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ register @typ@ ix,iy;
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ ix = *((@typ@ *)i1);
+ iy = *((@typ@ *)i2);
+ if (iy == 0) {
+ generate_divbyzero_error();
+ *((@typ@ *)op) = 0;
+ }
+ *((@typ@ *)op) = ix % iy;
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+#TYPE=FLOAT,DOUBLE,LONGDOUBLE#
+#typ=float,double,longdouble#
+#c=f,,l#
+*/
+static void
+@TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ @typ@ x, y, res;
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ x = *((@typ@ *)i1);
+ y = *((@typ@ *)i2);
+ res = x - floor@c@(x/y)*y;
+ *((@typ@ *)op)= res;
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+
+#TYPE=(BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG)*6#
+#typ=(byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong)*6#
+#OP= %*10, &*10, |*10, ^*10, <<*10, >>*10#
+#kind=fmod*10, bitwise_and*10, bitwise_or*10, bitwise_xor*10, left_shift*10, right_shift*10#
+
+*/
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((@typ@ *)op)=*((@typ@ *)i1) @OP@ *((@typ@ *)i2);
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+ #TYPE=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG#
+ #typ=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong#
+*/
+static void
+@TYPE@_invert(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0], os=steps[1], n=dimensions[0];
+ char *i1=args[0], *op=args[1];
+ for(i=0; i<n; i++, i1+=is1, op+=os) {
+ *((@typ@ *)op) = ~ *((@typ@*)i1);
+ }
+}
+/**end repeat**/
+
+static void
+BOOL_invert(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0], os=steps[1], n=dimensions[0];
+ char *i1=args[0], *op=args[1];
+ for(i=0; i<n; i++, i1+=is1, op+=os) {
+ *((Bool *)op) = (*((Bool *)i1) ? FALSE : TRUE);
+ }
+}
+
+
+/**begin repeat
+#TYPE=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
+#typ=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble#
+
+*/
+static void
+@TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((Bool *)op)=(*((@typ@ *)i1) || *((@typ@ *)i2)) && !(*((@typ@ *)i1) && *((@typ@ *)i2));
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+#TYPE=CFLOAT,CDOUBLE,CLONGDOUBLE#
+#typ=cfloat, cdouble, clongdouble#
+*/
+static void
+@TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *func)
+{
+ Bool p1, p2;
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ p1 = ((@typ@ *)i1)->real || ((@typ@ *)i1)->imag;
+ p2 = ((@typ@ *)i2)->real || ((@typ@ *)i2)->imag;
+ *((Bool *)op)= (p1 || p2) && !(p1 && p2);
+ }
+}
+/**end repeat**/
+
+
+
+/**begin repeat
+
+#TYPE=(BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*2#
+#OP= >*14, <*14#
+#typ=(Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*2#
+#kind= maximum*14, minimum*14#
+*/
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ *((@typ@ *)op)=*((@typ@ *)i1) @OP@ *((@typ@ *)i2) ? *((@typ@ *)i1) : *((@typ@ *)i2);
+ }
+}
+/**end repeat**/
+
+/**begin repeat
+
+#TYPE=(CFLOAT,CDOUBLE,CLONGDOUBLE)*2#
+#OP= >*3, <*3#
+#typ=(cfloat, cdouble, clongdouble)*2#
+#kind= maximum*3, minimum*3#
+*/
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2], n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ @typ@ *i1c, *i2c;
+ for(i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ i1c = (@typ@ *)i1;
+ i2c = (@typ@ *)i2;
+ if ((i1c->real @OP@ i2c->real) || \
+ ((i1c->real==i2c->real) && (i1c->imag @OP@ i2c->imag)))
+ memcpy(op, i1, sizeof(@typ@));
+ else
+ memcpy(op, i2, sizeof(@typ@));
+ }
+}
+/**end repeat**/
+
+
+
+/*** isinf, isinf, isfinite, signbit ***/
+/**begin repeat
+#kind=isnan*3, isinf*3, isfinite*3, signbit*3#
+#TYPE=(FLOAT, DOUBLE, LONGDOUBLE)*4#
+#typ=(float, double, longdouble)*4#
+#c=(f,,l)*4#
+*/
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is=steps[0], os=steps[1], n=dimensions[0];
+ char *ip=args[0], *op=args[1];
+ for(i=0; i<n; i++, ip+=is, op+=os) {
+ *((Bool *)op) = (Bool) (@kind@@c@(*((@typ@ *)ip)) != 0);
+ }
+}
+/**end repeat**/
+
+
+/**begin repeat
+#kind=isnan*3, isinf*3, isfinite*3#
+#TYPE=(CFLOAT, CDOUBLE, CLONGDOUBLE)*3#
+#typ=(float, double, longdouble)*3#
+#c=(f,,l)*3#
+#OP=||*6,&&*3#
+*/
+static void
+@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is=steps[0], os=steps[1], n=dimensions[0];
+ char *ip=args[0], *op=args[1];
+ for(i=0; i<n; i++, ip+=is, op+=os) {
+ *((Bool *)op) = @kind@@c@(((@typ@ *)ip)[0]) @OP@ \
+ @kind@@c@(((@typ@ *)ip)[1]);
+ }
+}
+/**end repeat**/
+
+
+
+
+/****** modf ****/
+
+/**begin repeat
+#TYPE=FLOAT, DOUBLE, LONGDOUBLE#
+#typ=float, double, longdouble#
+#c=f,,l#
+*/
+static void
+@TYPE@_modf(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],os1=steps[1],os2=steps[2],n=dimensions[0];
+ char *i1=args[0], *op1=args[1], *op2=args[2];
+ @typ@ x1, y1, y2;
+ for (i=0; i<n; i++, i1+=is1, op1+=os1, op2+=os2) {
+ x1 = *((@typ@ *)i1);
+ y1 = modf@c@(x1, &y2);
+ *((@typ@ *)op1) = y1;
+ *((@typ@ *)op2) = y2;
+ }
+}
+/**end repeat**/
+
+#define HAVE_DOUBLE_FUNCS
+/**begin repeat
+#TYPE=FLOAT, DOUBLE, LONGDOUBLE#
+#typ=float, double, longdouble#
+#c=f,,l#
+*/
+#ifdef HAVE_@TYPE@_FUNCS
+static void
+@TYPE@_frexp(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],os1=steps[1],os2=steps[2],n=dimensions[0];
+ char *i1=args[0], *op1=args[1], *op2=args[2];
+ @typ@ x1, y1;
+ int y2;
+ for (i=0; i<n; i++, i1+=is1, op1+=os1, op2+=os2) {
+ x1 = *((@typ@ *)i1);
+ y1 = frexp@c@(x1, &y2);
+ *((@typ@ *)op1) = y1;
+ *((int *) op2) = y2;
+ }
+}
+
+static void
+@TYPE@_ldexp(char **args, intp *dimensions, intp *steps, void *func)
+{
+ register intp i;
+ intp is1=steps[0],is2=steps[1],os=steps[2],n=dimensions[0];
+ char *i1=args[0], *i2=args[1], *op=args[2];
+ @typ@ x1, y1;
+ int x2;
+ for (i=0; i<n; i++, i1+=is1, i2+=is2, op+=os) {
+ x1 = *((@typ@ *)i1);
+ x2 = *((int *)i2);
+ y1 = ldexp@c@(x1, x2);
+ *((@typ@ *)op) = y1;
+ }
+}
+#endif
+/**end repeat**/
+#undef HAVE_DOUBLE_FUNCS
+
+
+static PyUFuncGenericFunction frexp_functions[] = {
+#ifdef HAVE_FLOAT_FUNCS
+ FLOAT_frexp,
+#endif
+ DOUBLE_frexp
+#ifdef HAVE_LONGDOUBLE_FUNCS
+ ,LONGDOUBLE_frexp
+#endif
+};
+
+static void * blank3_data[] = { (void *)NULL, (void *)NULL, (void *)NULL};
+static char frexp_signatures[] = {
+#ifdef HAVE_FLOAT_FUNCS
+ PyArray_FLOAT, PyArray_FLOAT, PyArray_INT,
+#endif
+ PyArray_DOUBLE, PyArray_DOUBLE, PyArray_INT
+#ifdef HAVE_LONGDOUBLE_FUNCS
+ ,PyArray_LONGDOUBLE, PyArray_LONGDOUBLE, PyArray_INT
+#endif
+};
+
+
+static PyUFuncGenericFunction ldexp_functions[] = {
+#ifdef HAVE_FLOAT_FUNCS
+ FLOAT_ldexp,
+#endif
+ DOUBLE_ldexp
+#ifdef HAVE_LONGDOUBLE_FUNCS
+ ,LONGDOUBLE_ldexp
+#endif
+};
+
+static char ldexp_signatures[] = {
+#ifdef HAVE_FLOAT_FUNCS
+ PyArray_FLOAT, PyArray_INT, PyArray_FLOAT,
+#endif
+ PyArray_DOUBLE, PyArray_INT, PyArray_DOUBLE
+#ifdef HAVE_LONGDOUBLE_FUNCS
+ ,PyArray_LONGDOUBLE, PyArray_INT, PyArray_LONGDOUBLE
+#endif
+};
+
+
+
+#include "__umath_generated.c"
+
+
+#include "ufuncobject.c"
+
+#include "__ufunc_api.c"
+
+static double
+pinf_init(void)
+{
+ double mul = 1e10;
+ double tmp = 0.0;
+ double pinf;
+
+ pinf = mul;
+ for (;;) {
+ pinf *= mul;
+ if (pinf == tmp) break;
+ tmp = pinf;
+ }
+ return pinf;
+}
+
+static double
+pzero_init(void)
+{
+ double div = 1e10;
+ double tmp = 0.0;
+ double pinf;
+
+ pinf = div;
+ for (;;) {
+ pinf /= div;
+ if (pinf == tmp) break;
+ tmp = pinf;
+ }
+ return pinf;
+}
+
+/* Less automated additions to the ufuncs */
+
+static void
+InitOtherOperators(PyObject *dictionary) {
+ PyObject *f;
+ int num=1;
+
+#ifdef HAVE_LONGDOUBLE_FUNCS
+ num += 1;
+#endif
+#ifdef HAVE_FLOAT_FUNCS
+ num += 1;
+#endif
+ f = PyUFunc_FromFuncAndData(frexp_functions, blank3_data,
+ frexp_signatures, num,
+ 1, 2, PyUFunc_None, "frexp",
+ "Split the number, x, into a normalized"\
+ " fraction (y1) and exponent (y2)",0);
+ PyDict_SetItemString(dictionary, "frexp", f);
+ Py_DECREF(f);
+
+ f = PyUFunc_FromFuncAndData(ldexp_functions, blank3_data, ldexp_signatures, num,
+ 2, 1, PyUFunc_None, "ldexp",
+ "Compute y = x1 * 2**x2.",0);
+ PyDict_SetItemString(dictionary, "ldexp", f);
+ Py_DECREF(f);
+ return;
+}
+
+static struct PyMethodDef methods[] = {
+ {"frompyfunc", (PyCFunction) ufunc_frompyfunc,
+ METH_VARARGS | METH_KEYWORDS, doc_frompyfunc},
+ {"update_use_defaults", (PyCFunction) ufunc_update_use_defaults,
+ METH_VARARGS , NULL},
+ {NULL, NULL, 0} /* sentinel */
+};
+
+DL_EXPORT(void) initumath(void) {
+ PyObject *m, *d, *s, *s2, *c_api;
+ double pinf, pzero, mynan;
+
+ /* Create the module and add the functions */
+ m = Py_InitModule("umath", methods);
+
+ /* Import the array */
+ if (import_array() < 0) return;
+
+ /* Initialize the types */
+ if (PyType_Ready(&PyUFunc_Type) < 0)
+ return;
+
+ /* Add some symbolic constants to the module */
+ d = PyModule_GetDict(m);
+
+ c_api = PyCObject_FromVoidPtr((void *)PyUFunc_API, NULL);
+ if (PyErr_Occurred()) goto err;
+ PyDict_SetItemString(d, "_UFUNC_API", c_api);
+ Py_DECREF(c_api);
+ if (PyErr_Occurred()) goto err;
+
+ s = PyString_FromString("0.4.0");
+ PyDict_SetItemString(d, "__version__", s);
+ Py_DECREF(s);
+
+ /* Load the ufunc operators into the array module's namespace */
+ InitOperators(d);
+
+ InitOtherOperators(d);
+
+ PyDict_SetItemString(d, "pi", s = PyFloat_FromDouble(M_PI));
+ Py_DECREF(s);
+ PyDict_SetItemString(d, "e", s = PyFloat_FromDouble(exp(1.0)));
+ Py_DECREF(s);
+
+#define ADDCONST(str) PyModule_AddIntConstant(m, #str, UFUNC_##str)
+#define ADDSCONST(str) PyModule_AddStringConstant(m, "UFUNC_" #str, UFUNC_##str)
+
+ ADDCONST(ERR_IGNORE);
+ ADDCONST(ERR_WARN);
+ ADDCONST(ERR_CALL);
+ ADDCONST(ERR_RAISE);
+ ADDCONST(ERR_DEFAULT);
+
+ ADDCONST(SHIFT_DIVIDEBYZERO);
+ ADDCONST(SHIFT_OVERFLOW);
+ ADDCONST(SHIFT_UNDERFLOW);
+ ADDCONST(SHIFT_INVALID);
+
+ ADDCONST(FPE_DIVIDEBYZERO);
+ ADDCONST(FPE_OVERFLOW);
+ ADDCONST(FPE_UNDERFLOW);
+ ADDCONST(FPE_INVALID);
+
+ ADDSCONST(PYVALS_NAME);
+
+#undef ADDCONST
+#undef ADDSCONST
+ PyModule_AddIntConstant(m, "UFUNC_BUFSIZE_DEFAULT", (long)PyArray_BUFSIZE);
+
+ pinf = pinf_init();
+ pzero = pzero_init();
+ mynan = pinf / pinf;
+
+ PyModule_AddObject(m, "PINF", PyFloat_FromDouble(pinf));
+ PyModule_AddObject(m, "NINF", PyFloat_FromDouble(-pinf));
+ PyModule_AddObject(m, "PZERO", PyFloat_FromDouble(pzero));
+ PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(-pzero));
+ PyModule_AddObject(m, "NAN", PyFloat_FromDouble(mynan));
+
+ s = PyDict_GetItemString(d, "conjugate");
+ s2 = PyDict_GetItemString(d, "remainder");
+ /* Setup the array object's numerical structures with appropriate
+ ufuncs in d*/
+ PyArray_SetNumericOps(d);
+
+ PyDict_SetItemString(d, "conj", s);
+ PyDict_SetItemString(d, "mod", s2);
+
+ err:
+ /* Check for errors */
+ if (PyErr_Occurred())
+ Py_FatalError("can't initialize module umath");
+}
diff --git a/numpy/base/tests/test_function_base.py b/numpy/base/tests/test_function_base.py
new file mode 100644
index 000000000..fafd75eef
--- /dev/null
+++ b/numpy/base/tests/test_function_base.py
@@ -0,0 +1,338 @@
+
+import sys
+
+from scipy.testing import *
+set_package_path()
+import scipy.base;reload(scipy.base)
+from scipy.base import *
+del sys.path[0]
+
+class test_any(ScipyTestCase):
+ def check_basic(self):
+ y1 = [0,0,1,0]
+ y2 = [0,0,0,0]
+ y3 = [1,0,1,0]
+ assert(any(y1))
+ assert(any(y3))
+ assert(not any(y2))
+
+ def check_nd(self):
+ y1 = [[0,0,0],[0,1,0],[1,1,0]]
+ assert(any(y1))
+ assert_array_equal(sometrue(y1),[1,1,0])
+ assert_array_equal(sometrue(y1,axis=1),[0,1,1])
+
+class test_all(ScipyTestCase):
+ def check_basic(self):
+ y1 = [0,1,1,0]
+ y2 = [0,0,0,0]
+ y3 = [1,1,1,1]
+ assert(not all(y1))
+ assert(all(y3))
+ assert(not all(y2))
+ assert(all(~array(y2)))
+
+ def check_nd(self):
+ y1 = [[0,0,1],[0,1,1],[1,1,1]]
+ assert(not all(y1))
+ assert_array_equal(alltrue(y1),[0,0,1])
+ assert_array_equal(alltrue(y1,axis=1),[0,0,1])
+
+class test_average(ScipyTestCase):
+ def check_basic(self):
+ y1 = array([1,2,3])
+ assert(average(y1) == 2.)
+ y2 = array([1.,2.,3.])
+ assert(average(y2) == 2.)
+ y3 = [0.,0.,0.]
+ assert(average(y3) == 0.)
+
+ y4 = ones((4,4))
+ y4[0,1] = 0
+ y4[1,0] = 2
+ assert_array_equal(y4.mean(0), average(y4, 0))
+ assert_array_equal(y4.mean(1), average(y4, 1))
+
+ y5 = rand(5,5)
+ assert_array_equal(y5.mean(0), average(y5, 0))
+ assert_array_equal(y5.mean(1), average(y5, 1))
+
+class test_logspace(ScipyTestCase):
+ def check_basic(self):
+ y = logspace(0,6)
+ assert(len(y)==50)
+ y = logspace(0,6,num=100)
+ assert(y[-1] == 10**6)
+ y = logspace(0,6,endpoint=0)
+ assert(y[-1] < 10**6)
+ y = logspace(0,6,num=7)
+ assert_array_equal(y,[1,10,100,1e3,1e4,1e5,1e6])
+
+class test_linspace(ScipyTestCase):
+ def check_basic(self):
+ y = linspace(0,10)
+ assert(len(y)==50)
+ y = linspace(2,10,num=100)
+ assert(y[-1] == 10)
+ y = linspace(2,10,endpoint=0)
+ assert(y[-1] < 10)
+ y,st = linspace(2,10,retstep=1)
+ assert_almost_equal(st,8/49.0)
+ assert_array_almost_equal(y,mgrid[2:10:50j],13)
+
+ def check_corner(self):
+ y = list(linspace(0,1,1))
+ assert y == [0.0], y
+ y = list(linspace(0,1,2.5))
+ assert y == [0.0, 1.0]
+
+class test_amax(ScipyTestCase):
+ def check_basic(self):
+ a = [3,4,5,10,-3,-5,6.0]
+ assert_equal(amax(a),10.0)
+ b = [[3,6.0, 9.0],
+ [4,10.0,5.0],
+ [8,3.0,2.0]]
+ assert_equal(amax(b,axis=0),[8.0,10.0,9.0])
+ assert_equal(amax(b,axis=1),[9.0,10.0,8.0])
+
+class test_amin(ScipyTestCase):
+ def check_basic(self):
+ a = [3,4,5,10,-3,-5,6.0]
+ assert_equal(amin(a),-5.0)
+ b = [[3,6.0, 9.0],
+ [4,10.0,5.0],
+ [8,3.0,2.0]]
+ assert_equal(amin(b,axis=0),[3.0,3.0,2.0])
+ assert_equal(amin(b,axis=1),[3.0,4.0,2.0])
+
+class test_ptp(ScipyTestCase):
+ def check_basic(self):
+ a = [3,4,5,10,-3,-5,6.0]
+ assert_equal(ptp(a),15.0)
+ b = [[3,6.0, 9.0],
+ [4,10.0,5.0],
+ [8,3.0,2.0]]
+ assert_equal(ptp(b,axis=0),[5.0,7.0,7.0])
+ assert_equal(ptp(b,axis=-1),[6.0,6.0,6.0])
+
+class test_cumsum(ScipyTestCase):
+ def check_basic(self):
+ ba = [1,2,10,11,6,5,4]
+ ba2 = [[1,2,3,4],[5,6,7,9],[10,3,4,5]]
+ for ctype in [int8,uint8,int16,uint16,int32,uint32,
+ float32,float64,complex64,complex128]:
+ a = array(ba,ctype)
+ a2 = array(ba2,ctype)
+ assert_array_equal(cumsum(a), array([1,3,13,24,30,35,39],ctype))
+ assert_array_equal(cumsum(a2,axis=0), array([[1,2,3,4],[6,8,10,13],
+ [16,11,14,18]],ctype))
+ assert_array_equal(cumsum(a2,axis=1),
+ array([[1,3,6,10],
+ [5,11,18,27],
+ [10,13,17,22]],ctype))
+
+class test_prod(ScipyTestCase):
+ def check_basic(self):
+ ba = [1,2,10,11,6,5,4]
+ ba2 = [[1,2,3,4],[5,6,7,9],[10,3,4,5]]
+ for ctype in [int16,uint16,int32,uint32,
+ float32,float64,complex64,complex128]:
+ a = array(ba,ctype)
+ a2 = array(ba2,ctype)
+ if ctype in ['1', 'b']:
+ self.failUnlessRaises(ArithmeticError, prod, a)
+ self.failUnlessRaises(ArithmeticError, prod, a2, 1)
+ self.failUnlessRaises(ArithmeticError, prod, a)
+ else:
+ assert_equal(prod(a),26400)
+ assert_array_equal(prod(a2,axis=0),
+ array([50,36,84,180],ctype))
+ assert_array_equal(prod(a2,axis=-1),array([24, 1890, 600],ctype))
+
+class test_cumprod(ScipyTestCase):
+ def check_basic(self):
+ ba = [1,2,10,11,6,5,4]
+ ba2 = [[1,2,3,4],[5,6,7,9],[10,3,4,5]]
+ for ctype in [int16,uint16,int32,uint32,
+ float32,float64,complex64,complex128]:
+ a = array(ba,ctype)
+ a2 = array(ba2,ctype)
+ if ctype in ['1', 'b']:
+ self.failUnlessRaises(ArithmeticError, cumprod, a)
+ self.failUnlessRaises(ArithmeticError, cumprod, a2, 1)
+ self.failUnlessRaises(ArithmeticError, cumprod, a)
+ else:
+ assert_array_equal(cumprod(a,axis=-1),
+ array([1, 2, 20, 220,
+ 1320, 6600, 26400],ctype))
+ assert_array_equal(cumprod(a2,axis=0),
+ array([[ 1, 2, 3, 4],
+ [ 5, 12, 21, 36],
+ [50, 36, 84, 180]],ctype))
+ assert_array_equal(cumprod(a2,axis=-1),
+ array([[ 1, 2, 6, 24],
+ [ 5, 30, 210, 1890],
+ [10, 30, 120, 600]],ctype))
+
+class test_diff(ScipyTestCase):
+ def check_basic(self):
+ x = [1,4,6,7,12]
+ out = array([3,2,1,5])
+ out2 = array([-1,-1,4])
+ out3 = array([0,5])
+ assert_array_equal(diff(x),out)
+ assert_array_equal(diff(x,n=2),out2)
+ assert_array_equal(diff(x,n=3),out3)
+
+ def check_nd(self):
+ x = 20*rand(10,20,30)
+ out1 = x[:,:,1:] - x[:,:,:-1]
+ out2 = out1[:,:,1:] - out1[:,:,:-1]
+ out3 = x[1:,:,:] - x[:-1,:,:]
+ out4 = out3[1:,:,:] - out3[:-1,:,:]
+ assert_array_equal(diff(x),out1)
+ assert_array_equal(diff(x,n=2),out2)
+ assert_array_equal(diff(x,axis=0),out3)
+ assert_array_equal(diff(x,n=2,axis=0),out4)
+
+class test_angle(ScipyTestCase):
+ def check_basic(self):
+ x = [1+3j,sqrt(2)/2.0+1j*sqrt(2)/2,1,1j,-1,-1j,1-3j,-1+3j]
+ y = angle(x)
+ yo = [arctan(3.0/1.0),arctan(1.0),0,pi/2,pi,-pi/2.0,
+ -arctan(3.0/1.0),pi-arctan(3.0/1.0)]
+ z = angle(x,deg=1)
+ zo = array(yo)*180/pi
+ assert_array_almost_equal(y,yo,11)
+ assert_array_almost_equal(z,zo,11)
+
+class test_trim_zeros(ScipyTestCase):
+ """ only testing for integer splits.
+ """
+ def check_basic(self):
+ a= array([0,0,1,2,3,4,0])
+ res = trim_zeros(a)
+ assert_array_equal(res,array([1,2,3,4]))
+ def check_leading_skip(self):
+ a= array([0,0,1,0,2,3,4,0])
+ res = trim_zeros(a)
+ assert_array_equal(res,array([1,0,2,3,4]))
+ def check_trailing_skip(self):
+ a= array([0,0,1,0,2,3,0,4,0])
+ res = trim_zeros(a)
+ assert_array_equal(res,array([1,0,2,3,0,4]))
+
+
+class test_extins(ScipyTestCase):
+ def check_basic(self):
+ a = array([1,3,2,1,2,3,3])
+ b = extract(a>1,a)
+ assert_array_equal(b,[3,2,2,3,3])
+ def check_insert(self):
+ a = array([1,4,3,2,5,8,7])
+ insert(a,[0,1,0,1,0,1,0],[2,4,6])
+ assert_array_equal(a,[1,2,3,4,5,6,7])
+ def check_both(self):
+ a = rand(10)
+ mask = a > 0.5
+ ac = a.copy()
+ c = extract(mask, a)
+ insert(a,mask,0)
+ insert(a,mask,c)
+ assert_array_equal(a,ac)
+
+class test_vectorize(ScipyTestCase):
+ def check_simple(self):
+ def addsubtract(a,b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+ f = vectorize(addsubtract)
+ r = f([0,3,6,9],[1,3,5,7])
+ assert_array_equal(r,[1,6,1,2])
+ def check_scalar(self):
+ def addsubtract(a,b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+ f = vectorize(addsubtract)
+ r = f([0,3,6,9],5)
+ assert_array_equal(r,[5,8,1,4])
+
+
+
+class test_unwrap(ScipyTestCase):
+ def check_simple(self):
+ #check that unwrap removes jumps greather that 2*pi
+ assert_array_equal(unwrap([1,1+2*pi]),[1,1])
+ #check that unwrap maintans continuity
+ assert(all(diff(unwrap(rand(10)*100))<pi))
+
+
+class test_filterwindows(ScipyTestCase):
+ def check_hanning(self):
+ #check symmetry
+ w=hanning(10)
+ assert_array_almost_equal(w,flipud(w),7)
+ #check known value
+ assert_almost_equal(sum(w),4.500,4)
+
+ def check_hamming(self):
+ #check symmetry
+ w=hamming(10)
+ assert_array_almost_equal(w,flipud(w),7)
+ #check known value
+ assert_almost_equal(sum(w),4.9400,4)
+
+ def check_bartlett(self):
+ #check symmetry
+ w=bartlett(10)
+ assert_array_almost_equal(w,flipud(w),7)
+ #check known value
+ assert_almost_equal(sum(w),4.4444,4)
+
+ def check_blackman(self):
+ #check symmetry
+ w=blackman(10)
+ assert_array_almost_equal(w,flipud(w),7)
+ #check known value
+ assert_almost_equal(sum(w),3.7800,4)
+
+
+class test_trapz(ScipyTestCase):
+ def check_simple(self):
+ r=trapz(exp(-1.0/2*(arange(-10,10,.1))**2)/sqrt(2*pi),dx=0.1)
+ #check integral of normal equals 1
+ assert_almost_equal(sum(r),1,7)
+
+class test_sinc(ScipyTestCase):
+ def check_simple(self):
+ assert(sinc(0)==1)
+ w=sinc(linspace(-1,1,100))
+ #check symmetry
+ assert_array_almost_equal(w,flipud(w),7)
+
+class test_histogram(ScipyTestCase):
+ def check_simple(self):
+ n=100
+ v=rand(n)
+ (a,b)=histogram(v)
+ #check if the sum of the bins equals the number of samples
+ assert(sum(a)==n)
+ #check that the bin counts are evenly spaced when the data is from a linear function
+ (a,b)=histogram(linspace(0,10,100))
+ assert(all(a==10))
+
+
+
+
+
+def compare_results(res,desired):
+ for i in range(len(desired)):
+ assert_array_equal(res[i],desired[i])
+
+if __name__ == "__main__":
+ ScipyTest('scipy.base.function_base').run()
diff --git a/numpy/base/tests/test_getlimits.py b/numpy/base/tests/test_getlimits.py
new file mode 100644
index 000000000..99a6f5160
--- /dev/null
+++ b/numpy/base/tests/test_getlimits.py
@@ -0,0 +1,38 @@
+""" Test functions for limits module.
+"""
+
+from scipy.testing import *
+set_package_path()
+import scipy.base;reload(scipy.base)
+from scipy.base.getlimits import finfo
+from scipy import single,double,longdouble
+restore_path()
+
+##################################################
+
+class test_python_float(ScipyTestCase):
+ def check_singleton(self):
+ ftype = finfo(float)
+ ftype2 = finfo(float)
+ assert_equal(id(ftype),id(ftype2))
+
+class test_single(ScipyTestCase):
+ def check_singleton(self):
+ ftype = finfo(single)
+ ftype2 = finfo(single)
+ assert_equal(id(ftype),id(ftype2))
+
+class test_double(ScipyTestCase):
+ def check_singleton(self):
+ ftype = finfo(double)
+ ftype2 = finfo(double)
+ assert_equal(id(ftype),id(ftype2))
+
+class test_longdouble(ScipyTestCase):
+ def check_singleton(self,level=2):
+ ftype = finfo(longdouble)
+ ftype2 = finfo(longdouble)
+ assert_equal(id(ftype),id(ftype2))
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/base/tests/test_index_tricks.py b/numpy/base/tests/test_index_tricks.py
new file mode 100644
index 000000000..96e9dff84
--- /dev/null
+++ b/numpy/base/tests/test_index_tricks.py
@@ -0,0 +1,53 @@
+
+from scipy.testing import *
+set_package_path()
+import scipy.base;reload(scipy.base)
+from scipy.base import *
+restore_path()
+
+class test_grid(ScipyTestCase):
+ def check_basic(self):
+ a = mgrid[-1:1:10j]
+ b = mgrid[-1:1:0.1]
+ assert(a.shape == (10,))
+ assert(b.shape == (20,))
+ assert(a[0] == -1)
+ assert_almost_equal(a[-1],1)
+ assert(b[0] == -1)
+ assert_almost_equal(b[1]-b[0],0.1,11)
+ assert_almost_equal(b[-1],b[0]+19*0.1,11)
+ assert_almost_equal(a[1]-a[0],2.0/9.0,11)
+
+ def check_nd(self):
+ c = mgrid[-1:1:10j,-2:2:10j]
+ d = mgrid[-1:1:0.1,-2:2:0.2]
+ assert(c.shape == (2,10,10))
+ assert(d.shape == (2,20,20))
+ assert_array_equal(c[0][0,:],-ones(10,'d'))
+ assert_array_equal(c[1][:,0],-2*ones(10,'d'))
+ assert_array_almost_equal(c[0][-1,:],ones(10,'d'),11)
+ assert_array_almost_equal(c[1][:,-1],2*ones(10,'d'),11)
+ assert_array_almost_equal(d[0,1,:]-d[0,0,:], 0.1*ones(20,'d'),11)
+ assert_array_almost_equal(d[1,:,1]-d[1,:,0], 0.2*ones(20,'d'),11)
+
+class test_concatenator(ScipyTestCase):
+ def check_1d(self):
+ assert_array_equal(r_[1,2,3,4,5,6],array([1,2,3,4,5,6]))
+ b = ones(5)
+ c = r_[b,0,0,b]
+ assert_array_equal(c,[1,1,1,1,1,0,0,1,1,1,1,1])
+
+ def check_2d(self):
+ b = rand(5,5)
+ c = rand(5,5)
+ d = r_[b,c,'1'] # append columns
+ assert(d.shape == (5,10))
+ assert_array_equal(d[:,:5],b)
+ assert_array_equal(d[:,5:],c)
+ d = r_[b,c]
+ assert(d.shape == (10,5))
+ assert_array_equal(d[:5,:],b)
+ assert_array_equal(d[5:,:],c)
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/base/tests/test_ma.py b/numpy/base/tests/test_ma.py
new file mode 100644
index 000000000..884a4a277
--- /dev/null
+++ b/numpy/base/tests/test_ma.py
@@ -0,0 +1,637 @@
+import scipy
+import types, time
+from scipy.base.ma import *
+from scipy.testing import ScipyTestCase, ScipyTest
+def eq(v,w):
+ result = allclose(v,w)
+ if not result:
+ print """Not eq:
+%s
+----
+%s"""% (str(v), str(w))
+ return result
+
+class test_ma(ScipyTestCase):
+ def __init__(self, *args, **kwds):
+ ScipyTestCase.__init__(self, *args, **kwds)
+ self.setUp()
+
+ def setUp (self):
+ x=scipy.array([1.,1.,1.,-2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
+ y=scipy.array([5.,0.,3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
+ a10 = 10.
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 ,0, 1]
+ xm = array(x, mask=m1)
+ ym = array(y, mask=m2)
+ z = scipy.array([-.5, 0., .5, .8])
+ zm = array(z, mask=[0,1,0,0])
+ xf = scipy.where(m1, 1.e+20, x)
+ s = x.shape
+ xm.set_fill_value(1.e+20)
+ self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
+
+ def check_testBasic1d(self):
+ "Test of basic array creation and properties in 1 dimension."
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ self.failIf(isMaskedArray(x))
+ self.failUnless(isMaskedArray(xm))
+ self.assertEqual(shape(xm), s)
+ self.assertEqual(xm.shape, s)
+ self.assertEqual(xm.dtype, x.dtype)
+ self.assertEqual(xm.dtypechar, x.dtypechar)
+ self.assertEqual( xm.size , reduce(lambda x,y:x*y, s))
+ self.assertEqual(count(xm) , len(m1) - reduce(lambda x,y:x+y, m1))
+ self.failUnless(eq(xm, xf))
+ self.failUnless(eq(filled(xm, 1.e20), xf))
+ self.failUnless(eq(x, xm))
+
+ def check_testBasic2d(self):
+ "Test of basic array creation and properties in 2 dimensions."
+ for s in [(4,3), (6,2)]:
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ x.shape = s
+ y.shape = s
+ xm.shape = s
+ ym.shape = s
+ xf.shape = s
+
+ self.failIf(isMaskedArray(x))
+ self.failUnless(isMaskedArray(xm))
+ self.assertEqual(shape(xm), s)
+ self.assertEqual(xm.shape, s)
+ self.assertEqual( xm.size , reduce(lambda x,y:x*y, s))
+ self.assertEqual( count(xm) , len(m1) - reduce(lambda x,y:x+y, m1))
+ self.failUnless(eq(xm, xf))
+ self.failUnless(eq(filled(xm, 1.e20), xf))
+ self.failUnless(eq(x, xm))
+ self.setUp()
+
+ def check_testArithmetic (self):
+ "Test of basic arithmetic."
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ a2d = array([[1,2],[0,4]])
+ a2dm = masked_array(a2d, [[0,0],[1,0]])
+ self.failUnless(eq (a2d * a2d, a2d * a2dm))
+ self.failUnless(eq (a2d + a2d, a2d + a2dm))
+ self.failUnless(eq (a2d - a2d, a2d - a2dm))
+ for s in [(12,), (4,3), (2,6)]:
+ x = x.reshape(s)
+ y = y.reshape(s)
+ xm = xm.reshape(s)
+ ym = ym.reshape(s)
+ xf = xf.reshape(s)
+ self.failUnless(eq(-x, -xm))
+ self.failUnless(eq(x + y, xm + ym))
+ self.failUnless(eq(x - y, xm - ym))
+ self.failUnless(eq(x * y, xm * ym))
+ self.failUnless(eq(x / y, xm / ym))
+ self.failUnless(eq(a10 + y, a10 + ym))
+ self.failUnless(eq(a10 - y, a10 - ym))
+ self.failUnless(eq(a10 * y, a10 * ym))
+ self.failUnless(eq(a10 / y, a10 / ym))
+ self.failUnless(eq(x + a10, xm + a10))
+ self.failUnless(eq(x - a10, xm - a10))
+ self.failUnless(eq(x * a10, xm * a10))
+ self.failUnless(eq(x / a10, xm / a10))
+ self.failUnless(eq(x**2, xm**2))
+ self.failUnless(eq(abs(x)**2.5, abs(xm) **2.5))
+ self.failUnless(eq(x**y, xm**ym))
+ self.failUnless(eq(scipy.add(x,y), add(xm, ym)))
+ self.failUnless(eq(scipy.subtract(x,y), subtract(xm, ym)))
+ self.failUnless(eq(scipy.multiply(x,y), multiply(xm, ym)))
+ self.failUnless(eq(scipy.divide(x,y), divide(xm, ym)))
+
+
+ def check_testUfuncs1 (self):
+ "Test various functions such as sin, cos."
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ self.failUnless (eq(scipy.cos(x), cos(xm)))
+ self.failUnless (eq(scipy.cosh(x), cosh(xm)))
+ self.failUnless (eq(scipy.sin(x), sin(xm)))
+ self.failUnless (eq(scipy.sinh(x), sinh(xm)))
+ self.failUnless (eq(scipy.tan(x), tan(xm)))
+ self.failUnless (eq(scipy.tanh(x), tanh(xm)))
+ self.failUnless (eq(scipy.sqrt(abs(x)), sqrt(xm)))
+ self.failUnless (eq(scipy.log(abs(x)), log(xm)))
+ self.failUnless (eq(scipy.log10(abs(x)), log10(xm)))
+ self.failUnless (eq(scipy.exp(x), exp(xm)))
+ self.failUnless (eq(scipy.arcsin(z), arcsin(zm)))
+ self.failUnless (eq(scipy.arccos(z), arccos(zm)))
+ self.failUnless (eq(scipy.arctan(z), arctan(zm)))
+ self.failUnless (eq(scipy.arctan2(x, y), arctan2(xm, ym)))
+ self.failUnless (eq(scipy.absolute(x), absolute(xm)))
+ self.failUnless (eq(scipy.equal(x,y), equal(xm, ym)))
+ self.failUnless (eq(scipy.not_equal(x,y), not_equal(xm, ym)))
+ self.failUnless (eq(scipy.less(x,y), less(xm, ym)))
+ self.failUnless (eq(scipy.greater(x,y), greater(xm, ym)))
+ self.failUnless (eq(scipy.less_equal(x,y), less_equal(xm, ym)))
+ self.failUnless (eq(scipy.greater_equal(x,y), greater_equal(xm, ym)))
+ self.failUnless (eq(scipy.conjugate(x), conjugate(xm)))
+ self.failUnless (eq(scipy.concatenate((x,y)), concatenate((xm,ym))))
+ self.failUnless (eq(scipy.concatenate((x,y)), concatenate((x,y))))
+ self.failUnless (eq(scipy.concatenate((x,y)), concatenate((xm,y))))
+ self.failUnless (eq(scipy.concatenate((x,y,x)), concatenate((x,ym,x))))
+
+ def check_xtestCount (self):
+ "Test count"
+ ott = array([0.,1.,2.,3.], mask=[1,0,0,0])
+ self.failUnless( isinstance(count(ott), types.IntType))
+ self.assertEqual(3, count(ott))
+ self.assertEqual(1, count(1))
+ self.failUnless (eq(0, array(1,mask=[1])))
+ ott=ott.reshape((2,2))
+ assert isMaskedArray(count(ott,0))
+ assert isinstance(count(ott), types.IntType)
+ self.failUnless (eq(3, count(ott)))
+ assert getmask(count(ott,0)) is None
+ self.failUnless (eq([1,2],count(ott,0)))
+
+ def check_testMinMax (self):
+ "Test minimum and maximum."
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ xr = scipy.ravel(x) #max doesn't work if shaped
+ xmr = ravel(xm)
+ self.failUnless (eq(max(xr), maximum(xmr))) #true because of careful selection of data
+ self.failUnless (eq(min(xr), minimum(xmr))) #true because of careful selection of data
+
+ def check_testAddSumProd (self):
+ "Test add, sum, product."
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ self.failUnless (eq(scipy.add.reduce(x), add.reduce(x)))
+ self.failUnless (eq(scipy.add.accumulate(x), add.accumulate(x)))
+ self.failUnless (eq(4, sum(array(4))))
+ self.failUnless (eq(4, sum(array(4), axis=0)))
+ self.failUnless (eq(scipy.sum(x), sum(x)))
+ self.failUnless (eq(scipy.sum(filled(xm,0)), sum(xm)))
+ self.failUnless (eq(scipy.sum(x,0), sum(x,0)))
+ self.failUnless (eq(scipy.product(x), product(x)))
+ self.failUnless (eq(scipy.product(x,0), product(x,0)))
+ self.failUnless (eq(scipy.product(filled(xm,1)), product(xm)))
+ if len(s) > 1:
+ self.failUnless (eq(scipy.concatenate((x,y),1), concatenate((xm,ym),1)))
+ self.failUnless (eq(scipy.add.reduce(x,1), add.reduce(x,1)))
+ self.failUnless (eq(scipy.sum(x,1), sum(x,1)))
+ self.failUnless (eq(scipy.product(x,1), product(x,1)))
+
+
+ def check_testCI(self):
+ "Test of conversions and indexing"
+ x1 = scipy.array([1,2,4,3])
+ x2 = array(x1, mask = [1,0,0,0])
+ x3 = array(x1, mask = [0,1,0,1])
+ x4 = array(x1)
+ # test conversion to strings
+ junk, garbage = str(x2), repr(x2)
+ assert eq(scipy.sort(x1),sort(x2, fill_value=0))
+ # tests of indexing
+ assert type(x2[1]) is type(x1[1])
+ assert x1[1] == x2[1]
+ assert x2[0] is masked
+ assert eq(x1[2],x2[2])
+ assert eq(x1[2:5],x2[2:5])
+ assert eq(x1[:],x2[:])
+ assert eq(x1[1:], x3[1:])
+ x1[2]=9
+ x2[2]=9
+ assert eq(x1,x2)
+ x1[1:3] = 99
+ x2[1:3] = 99
+ assert eq(x1,x2)
+ x2[1] = masked
+ assert eq(x1,x2)
+ x2[1:3]=masked
+ assert eq(x1,x2)
+ x2[:] = x1
+ x2[1] = masked
+ assert allequal(getmask(x2),array([0,1,0,0]))
+ x3[:] = masked_array([1,2,3,4],[0,1,1,0])
+ assert allequal(getmask(x3), array([0,1,1,0]))
+ x4[:] = masked_array([1,2,3,4],[0,1,1,0])
+ assert allequal(getmask(x4), array([0,1,1,0]))
+ assert allequal(x4, array([1,2,3,4]))
+ x1 = scipy.arange(5)*1.0
+ x2 = masked_values(x1, 3.0)
+ assert eq(x1,x2)
+ assert allequal(array([0,0,0,1,0],MaskType), x2.mask)
+ assert eq(3.0, x2.fill_value())
+ x1 = array([1,'hello',2,3],object)
+ x2 = scipy.array([1,'hello',2,3],object)
+ s1 = x1[1].item()
+ s2 = x2[1].item()
+ self.assertEqual(type(s2), str)
+ self.assertEqual(type(s1), str)
+ self.assertEqual(s1, s2)
+ assert x1[1:1].shape == (0,)
+
+ def check_testCopySize(self):
+ "Tests of some subtle points of copying and sizing."
+ n = [0,0,1,0,0]
+ m = make_mask(n)
+ m2 = make_mask(m)
+ self.failUnless(m is m2)
+ m3 = make_mask(m, copy=1)
+ self.failUnless(m is not m3)
+
+ x1 = scipy.arange(5)
+ y1 = array(x1, mask=m)
+ self.failUnless( y1.raw_data() is not x1)
+ self.failUnless( allequal(x1,y1.raw_data()))
+ self.failUnless( y1.mask is m)
+
+ y1a = array(y1, copy=0)
+ self.failUnless( y1a.raw_data() is y1.raw_data())
+ self.failUnless( y1a.mask is y1.mask)
+
+ y2 = array(x1, mask=m, copy=0)
+ self.failUnless( y2.raw_data() is x1)
+ self.failUnless( y2.mask is m)
+ self.failUnless( y2[2] is masked)
+ y2[2]=9
+ self.failUnless( y2[2] is not masked)
+ self.failUnless( y2.mask is not m)
+ self.failUnless( allequal(y2.mask, 0))
+
+ y3 = array(x1*1.0, mask=m)
+ self.failUnless(filled(y3).dtype is (x1*1.0).dtype)
+
+ x4 = arange(4)
+ x4[2] = masked
+ y4 = resize(x4, (8,))
+ self.failUnless( eq(concatenate([x4,x4]), y4))
+ self.failUnless( eq(getmask(y4),[0,0,1,0,0,0,1,0]))
+ y5 = repeat(x4, (2,2,2,2))
+ self.failUnless( eq(y5, [0,0,1,1,2,2,3,3]))
+ y6 = repeat(x4, 2)
+ self.failUnless( eq(y5, y6))
+
+ def check_testPut(self):
+ "Test of put"
+ d = arange(5)
+ n = [0,0,0,1,1]
+ m = make_mask(n)
+ x = array(d, mask = m)
+ self.failUnless( x[3] is masked)
+ self.failUnless( x[4] is masked)
+ x[[1,4]] = [10,40]
+ self.failUnless( x.mask is not m)
+ self.failUnless( x[3] is masked)
+ self.failUnless( x[4] is not masked)
+ self.failUnless( eq(x, [0,10,2,-1,40]))
+
+ x = array(d, mask = m)
+ x.put([-1,100,200])
+ self.failUnless( eq(x, [-1,100,200,0,0]))
+ self.failUnless( x[3] is masked)
+ self.failUnless( x[4] is masked)
+
+ x = array(d, mask = m)
+ x.putmask([30,40])
+ self.failUnless( eq(x, [0,1,2,30,40]))
+ self.failUnless( x.mask is None)
+
+ x = array(d, mask = m)
+ y = x.compressed()
+ z = array(x, mask = m)
+ z.put(y)
+ assert eq (x, z)
+
+ def check_testMaPut(self):
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
+ i = scipy.nonzero(m)
+ putmask(xm, m, z)
+ assert take(xm, i) == z
+ put(ym, i, zm)
+ assert take(ym, i) == zm
+
+ def check_testOddFeatures(self):
+ "Test of other odd features"
+ x = arange(20); x=x.reshape(4,5)
+ x.flat[5] = 12
+ assert x[1,0] == 12
+ z = x + 10j * x
+ assert eq(z.real, x)
+ assert eq(z.imag, 10*x)
+ assert eq((z*conjugate(z)).real, 101*x*x)
+ z.imag[...] = 0.0
+
+ x = arange(10)
+ x[3] = masked
+ assert str(x[3]) == str(masked)
+ c = x >= 8
+ assert count(where(c,masked,masked)) == 0
+ assert shape(where(c,masked,masked)) == c.shape
+ z = where(c , x, masked)
+ assert z.dtype is x.dtype
+ assert z[3] is masked
+ assert z[4] is masked
+ assert z[7] is masked
+ assert z[8] is not masked
+ assert z[9] is not masked
+ assert eq(x,z)
+ z = where(c , masked, x)
+ assert z.dtype is x.dtype
+ assert z[3] is masked
+ assert z[4] is not masked
+ assert z[7] is not masked
+ assert z[8] is masked
+ assert z[9] is masked
+ z = masked_where(c, x)
+ assert z.dtype is x.dtype
+ assert z[3] is masked
+ assert z[4] is not masked
+ assert z[7] is not masked
+ assert z[8] is masked
+ assert z[9] is masked
+ assert eq(x,z)
+ x = array([1.,2.,3.,4.,5.])
+ c = array([1,1,1,0,0])
+ x[2] = masked
+ z = where(c, x, -x)
+ assert eq(z, [1.,2.,0., -4., -5])
+ c[0] = masked
+ z = where(c, x, -x)
+ assert eq(z, [1.,2.,0., -4., -5])
+ assert z[0] is masked
+ assert z[1] is not masked
+ assert z[2] is masked
+ assert eq(masked_where(greater(x, 2), x), masked_greater(x,2))
+ assert eq(masked_where(greater_equal(x, 2), x), masked_greater_equal(x,2))
+ assert eq(masked_where(less(x, 2), x), masked_less(x,2))
+ assert eq(masked_where(less_equal(x, 2), x), masked_less_equal(x,2))
+ assert eq(masked_where(not_equal(x, 2), x), masked_not_equal(x,2))
+ assert eq(masked_where(equal(x, 2), x), masked_equal(x,2))
+ assert eq(masked_where(not_equal(x,2), x), masked_not_equal(x,2))
+ assert eq(masked_inside(range(5), 1, 3), [0, 199, 199, 199, 4])
+ assert eq(masked_outside(range(5), 1, 3),[199,1,2,3,199])
+ assert eq(masked_inside(array(range(5), mask=[1,0,0,0,0]), 1, 3).mask, [1,1,1,1,0])
+ assert eq(masked_outside(array(range(5), mask=[0,1,0,0,0]), 1, 3).mask, [1,1,0,0,1])
+ assert eq(masked_equal(array(range(5), mask=[1,0,0,0,0]), 2).mask, [1,0,1,0,0])
+ assert eq(masked_not_equal(array([2,2,1,2,1], mask=[1,0,0,0,0]), 2).mask, [1,0,1,0,1])
+ assert eq(masked_where([1,1,0,0,0], [1,2,3,4,5]), [99,99,3,4,5])
+ atest = ones((10,10,10), dtype=float32)
+ btest = zeros(atest.shape, MaskType)
+ ctest = masked_where(btest,atest)
+ assert eq(atest,ctest)
+ z = choose(c, (-x, x))
+ assert eq(z, [1.,2.,0., -4., -5])
+ assert z[0] is masked
+ assert z[1] is not masked
+ assert z[2] is masked
+ x = arange(6)
+ x[5] = masked
+ y = arange(6)*10
+ y[2]= masked
+ c = array([1,1,1,0,0,0], mask=[1,0,0,0,0,0])
+ cm = c.filled(1)
+ z = where(c,x,y)
+ zm = where(cm,x,y)
+ assert eq(z, zm)
+ assert getmask(zm) is None
+ assert eq(zm, [0,1,2,30,40,50])
+ z = where(c, masked, 1)
+ assert eq(z, [99,99,99,1,1,1])
+ z = where(c, 1, masked)
+ assert eq(z, [99, 1, 1, 99, 99, 99])
+
+ def check_testMinMax(self):
+ "Test of minumum, maximum."
+ assert eq(minimum([1,2,3],[4,0,9]), [1,0,3])
+ assert eq(maximum([1,2,3],[4,0,9]), [4,2,9])
+ x = arange(5)
+ y = arange(5) - 2
+ x[3] = masked
+ y[0] = masked
+ assert eq(minimum(x,y), where(less(x,y), x, y))
+ assert eq(maximum(x,y), where(greater(x,y), x, y))
+ assert minimum(x) == 0
+ assert maximum(x) == 4
+
+ def check_testTakeTransposeInnerOuter(self):
+ "Test of take, transpose, inner, outer products"
+ x = arange(24)
+ y = scipy.arange(24)
+ x[5:6] = masked
+ x=x.reshape(2,3,4)
+ y=y.reshape(2,3,4)
+ assert eq(scipy.transpose(y,(2,0,1)), transpose(x,(2,0,1)))
+ assert eq(scipy.take(y, (2,0,1), 1), take(x, (2,0,1), 1))
+ assert eq(scipy.innerproduct(filled(x,0),filled(y,0)),
+ innerproduct(x, y))
+ assert eq(scipy.outerproduct(filled(x,0),filled(y,0)),
+ outerproduct(x, y))
+ y = array(['abc', 1, 'def', 2, 3], object)
+ y[2] = masked
+ t = take(y,[0,3,4])
+ assert t[0].item() == 'abc'
+ assert t[1].item() == 2
+ assert t[2].item() == 3
+
+ def check_testInplace(self):
+ """Test of inplace operations and rich comparisons"""
+ y = arange(10)
+
+ x = arange(10)
+ xm = arange(10)
+ xm[2] = masked
+ x += 1
+ assert eq(x, y+1)
+ xm += 1
+ assert eq(x, y+1)
+
+ x = arange(10)
+ xm = arange(10)
+ xm[2] = masked
+ x -= 1
+ assert eq(x, y-1)
+ xm -= 1
+ assert eq(xm, y-1)
+
+ x = arange(10)*1.0
+ xm = arange(10)*1.0
+ xm[2] = masked
+ x *= 2.0
+ assert eq(x, y*2)
+ xm *= 2.0
+ assert eq(xm, y*2)
+
+ x = arange(10)*2
+ xm = arange(10)
+ xm[2] = masked
+ x /= 2
+ assert eq(x, y)
+ xm /= 2
+ assert eq(x, y)
+
+ x = arange(10)*1.0
+ xm = arange(10)*1.0
+ xm[2] = masked
+ x /= 2.0
+ assert eq(x, y/2.0)
+ xm /= arange(10)
+ assert eq(xm, ones((10,)))
+
+ x = arange(10).astype(float32)
+ xm = arange(10)
+ xm[2] = masked
+ id1 = id(x.raw_data())
+ x += 1.
+ assert id1 == id(x.raw_data())
+ assert eq(x, y+1.)
+
+ def check_testPickle(self):
+ "Test of pickling"
+ x = arange(12)
+ x[4:10:2] = masked
+ x=x.reshape(4,3)
+ f = open('test9.pik','wb')
+ import pickle
+ pickle.dump(x, f)
+ f.close()
+ f = open('test9.pik', 'rb')
+ y = pickle.load(f)
+ assert eq(x,y)
+
+ def check_testMasked(self):
+ "Test of masked element"
+ xx=arange(6)
+ xx[1] = masked
+ self.failUnless(xx[1] is masked)
+ self.failUnlessRaises(Exception, lambda x,y: x+y, masked, masked)
+ self.failUnlessRaises(Exception, lambda x,y: x+y, masked, 2)
+ self.failUnlessRaises(Exception, lambda x,y: x+y, masked, xx)
+ self.failUnlessRaises(Exception, lambda x,y: x+y, xx, masked)
+
+ def check_testAverage1(self):
+ "Test of average."
+ ott = array([0.,1.,2.,3.], mask=[1,0,0,0])
+ self.failUnless(eq(2.0, average(ott)))
+ self.failUnless(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
+ result, wts = average(ott, weights=[1.,1.,2.,1.], returned=1)
+ self.failUnless(eq(2.0, result))
+ self.failUnless(wts == 4.0)
+ ott[:] = masked
+ self.failUnless(average(ott) is masked)
+ ott = array([0.,1.,2.,3.], mask=[1,0,0,0])
+ ott=ott.reshape(2,2)
+ ott[:,1] = masked
+ self.failUnless(eq(average(ott), [2.0, 0.0]))
+ self.failUnless(average(ott,axis=1)[0] is masked)
+ self.failUnless(eq([2.,0.], average(ott)))
+ result, wts = average(ott, returned=1)
+ self.failUnless(eq(wts, [1., 0.]))
+
+ def check_testAverage2(self):
+ "More tests of average."
+ w1 = [0,1,1,1,1,0]
+ w2 = [[0,1,1,1,1,0],[1,0,0,0,0,1]]
+ x=arange(6)
+ self.failUnless(allclose(average(x), 2.5))
+ self.failUnless(allclose(average(x, weights=w1), 2.5))
+ y=array([arange(6), 2.0*arange(6)])
+ self.failUnless(allclose(average(y, None), scipy.add.reduce(scipy.arange(6))*3./12.))
+ self.failUnless(allclose(average(y, axis=0), scipy.arange(6) * 3./2.))
+ self.failUnless(allclose(average(y, axis=1), [average(x), average(x) * 2.0]))
+ self.failUnless(allclose(average(y, None, weights=w2), 20./6.))
+ self.failUnless(allclose(average(y, axis=0, weights=w2), [0.,1.,2.,3.,4.,10.]))
+ self.failUnless(allclose(average(y, axis=1), [average(x), average(x) * 2.0]))
+ m1 = zeros(6)
+ m2 = [0,0,1,1,0,0]
+ m3 = [[0,0,1,1,0,0],[0,1,1,1,1,0]]
+ m4 = ones(6)
+ m5 = [0, 1, 1, 1, 1, 1]
+ self.failUnless(allclose(average(masked_array(x, m1)), 2.5))
+ self.failUnless(allclose(average(masked_array(x, m2)), 2.5))
+ self.failUnless(average(masked_array(x, m4)) is masked)
+ self.assertEqual(average(masked_array(x, m5)), 0.0)
+ self.assertEqual(count(average(masked_array(x, m4))), 0)
+ z = masked_array(y, m3)
+ self.failUnless(allclose(average(z, None), 20./6.))
+ self.failUnless(allclose(average(z, axis=0), [0.,1.,99.,99.,4.0, 7.5]))
+ self.failUnless(allclose(average(z, axis=1), [2.5, 5.0]))
+ self.failUnless(allclose( average(z,weights=w2), [0.,1., 99., 99., 4.0, 10.0]))
+
+ a = arange(6)
+ b = arange(6) * 3
+ r1, w1 = average([[a,b],[b,a]], axis=1, returned=1)
+ self.assertEqual(shape(r1) , shape(w1))
+ self.assertEqual(r1.shape , w1.shape)
+ r2, w2 = average(ones((2,2,3)), axis=0, weights=[3,1], returned=1)
+ self.assertEqual(shape(w2) , shape(r2))
+ r2, w2 = average(ones((2,2,3)), returned=1)
+ self.assertEqual(shape(w2) , shape(r2))
+ r2, w2 = average(ones((2,2,3)), weights=ones((2,2,3)), returned=1)
+ self.failUnless(shape(w2) == shape(r2))
+ a2d = array([[1,2],[0,4]], float)
+ a2dm = masked_array(a2d, [[0,0],[1,0]])
+ a2da = average(a2d)
+ self.failUnless(eq (a2da, [0.5, 3.0]))
+ a2dma = average(a2dm)
+ self.failUnless(eq( a2dma, [1.0, 3.0]))
+ a2dma = average(a2dm, axis=None)
+ self.failUnless(eq(a2dma, 7./3.))
+ a2dma = average(a2dm, axis=1)
+ self.failUnless(eq(a2dma, [1.5, 4.0]))
+
+ def check_testToPython(self):
+ self.assertEqual(1, int(array(1)))
+ self.assertEqual(1.0, float(array(1)))
+ self.assertEqual(1, int(array([[[1]]])))
+ self.assertEqual(1.0, float(array([[1]])))
+ self.failUnlessRaises(ValueError, float, array([1,1]))
+ self.failUnlessRaises(MAError, float, array([1],mask=[1]))
+
+def timingTest():
+ for f in [testf, testinplace]:
+ for n in [1000,10000,50000]:
+ t = testta(n, f)
+ t1 = testtb(n, f)
+ t2 = testtc(n, f)
+ print f.test_name
+ print """\
+n = %7d
+scipy time (ms) %6.1f
+MA maskless ratio %6.1f
+MA masked ratio %6.1f
+""" % (n, t*1000.0, t1/t, t2/t)
+
+def testta(n, f):
+ x=scipy.arange(n) + 1.0
+ tn0 = time.time()
+ z = f(x)
+ return time.time() - tn0
+
+def testtb(n, f):
+ x=arange(n) + 1.0
+ tn0 = time.time()
+ z = f(x)
+ return time.time() - tn0
+
+def testtc(n, f):
+ x=arange(n) + 1.0
+ x[0] = masked
+ tn0 = time.time()
+ z = f(x)
+ return time.time() - tn0
+
+def testf(x):
+ for i in range(25):
+ y = x **2 + 2.0 * x - 1.0
+ w = x **2 + 1.0
+ z = (y / w) ** 2
+ return z
+testf.test_name = 'Simple arithmetic'
+
+def testinplace(x):
+ for i in range(25):
+ y = x**2
+ y += 2.0*x
+ y -= 1.0
+ y /= x
+ return y
+testinplace.test_name = 'Inplace operations'
+
+if __name__ == "__main__":
+ ScipyTest('scipy.base.ma').run()
+ #timingTest()
diff --git a/numpy/base/tests/test_matrix.py b/numpy/base/tests/test_matrix.py
new file mode 100644
index 000000000..59b0a131e
--- /dev/null
+++ b/numpy/base/tests/test_matrix.py
@@ -0,0 +1,117 @@
+
+from scipy.testing import *
+set_package_path()
+import scipy.base;reload(scipy.base)
+from scipy.base import *
+restore_path()
+
+class test_ctor(ScipyTestCase):
+ def test_basic(self):
+ A = array([[1,2],[3,4]])
+ mA = matrix(A)
+ assert all(mA.A == A)
+
+ B = bmat("A,A;A,A")
+ C = bmat([[A,A], [A,A]])
+ D = array([[1,2,1,2],
+ [3,4,3,4],
+ [1,2,1,2],
+ [3,4,3,4]])
+ assert all(B.A == D)
+ assert all(C.A == D)
+
+ vec = arange(5)
+ mvec = matrix(vec)
+ assert mvec.shape == (1,5)
+
+class test_properties(ScipyTestCase):
+ def test_basic(self):
+ import scipy.corelinalg as linalg
+
+ A = array([[1., 2.],
+ [3., 4.]])
+ mA = matrix(A)
+ assert allclose(linalg.inv(A), mA.I)
+ assert all(array(transpose(A) == mA.T))
+ assert all(array(transpose(A) == mA.H))
+ assert all(A == mA.A)
+
+ B = A + 2j*A
+ mB = matrix(B)
+ assert allclose(linalg.inv(B), mB.I)
+ assert all(array(transpose(B) == mB.T))
+ assert all(array(conjugate(transpose(B)) == mB.H))
+
+ def test_comparisons(self):
+ A = arange(100).reshape(10,10)
+ mA = matrix(A)
+ mB = matrix(A) + 0.1
+ assert all(mB == A+0.1)
+ assert all(mB == matrix(A+0.1))
+ assert not any(mB == matrix(A-0.1))
+ assert all(mA < mB)
+ assert all(mA <= mB)
+ assert all(mA <= mA)
+ assert not any(mA < mA)
+
+ assert not any(mB < mA)
+ assert all(mB >= mA)
+ assert all(mB >= mB)
+ assert not any(mB > mB)
+
+ assert all(mA == mA)
+ assert not any(mA == mB)
+ assert all(mB != mA)
+
+ assert not all(abs(mA) > 0)
+ assert all(abs(mB > 0))
+
+ def test_asmatrix(self):
+ A = arange(100).reshape(10,10)
+ mA = asmatrix(A)
+ mB = matrix(A)
+ A[0,0] = -10
+ assert A[0,0] == mA[0,0]
+ assert A[0,0] != mB[0,0]
+
+class test_autocasting(ScipyTestCase):
+ def test_basic(self):
+ A = arange(100).reshape(10,10)
+ mA = matrix(A)
+
+ mB = mA.copy()
+ O = ones((10,10), float64) * 0.1
+ mB = mB + O
+ assert mB.dtype == float64
+ assert all(mA != mB)
+ assert all(mB == mA+0.1)
+
+ mC = mA.copy()
+ O = ones((10,10), complex128)
+ mC = mC * O
+ assert mC.dtype == complex128
+ assert all(mA != mB)
+
+class test_algebra(ScipyTestCase):
+ def test_basic(self):
+ import scipy.corelinalg as linalg
+
+ A = array([[1., 2.],
+ [3., 4.]])
+ mA = matrix(A)
+
+ B = identity(2)
+ for i in xrange(6):
+ assert allclose((mA ** i).A, B)
+ B = dot(B, A)
+
+ Ainv = linalg.inv(A)
+ B = identity(2)
+ for i in xrange(6):
+ assert allclose((mA ** -i).A, B)
+ B = dot(B, Ainv)
+
+ assert allclose((mA * mA).A, dot(A, A))
+ assert allclose((mA + mA).A, (A + A))
+ assert allclose((3*mA).A, (3*A))
+
diff --git a/numpy/base/tests/test_polynomial.py b/numpy/base/tests/test_polynomial.py
new file mode 100644
index 000000000..51d4b5707
--- /dev/null
+++ b/numpy/base/tests/test_polynomial.py
@@ -0,0 +1,83 @@
+"""
+>>> import scipy.base as nx
+>>> from scipy.base.polynomial import poly1d, polydiv
+
+>>> p = poly1d([1.,2,3])
+>>> p
+poly1d([ 1., 2., 3.])
+>>> print p
+ 2
+1 x + 2 x + 3
+>>> q = poly1d([3.,2,1])
+>>> q
+poly1d([ 3., 2., 1.])
+>>> print q
+ 2
+3 x + 2 x + 1
+
+>>> p(0)
+3.0
+>>> p(5)
+38.0
+>>> q(0)
+1.0
+>>> q(5)
+86.0
+
+>>> p * q
+poly1d([ 3., 8., 14., 8., 3.])
+>>> p / q
+(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667]))
+>>> p + q
+poly1d([ 4., 4., 4.])
+>>> p - q
+poly1d([-2., 0., 2.])
+>>> p ** 4
+poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
+
+>>> p(q)
+poly1d([ 9., 12., 16., 8., 6.])
+>>> q(p)
+poly1d([ 3., 12., 32., 40., 34.])
+
+>>> nx.asarray(p)
+array([ 1., 2., 3.])
+>>> len(p)
+2
+
+>>> p[0], p[1], p[2], p[3]
+(3.0, 2.0, 1.0, 0)
+
+>>> p.integ()
+poly1d([ 0.33333333, 1. , 3. , 0. ])
+>>> p.integ(1)
+poly1d([ 0.33333333, 1. , 3. , 0. ])
+>>> p.integ(5)
+poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. ,
+ 0. , 0. , 0. ])
+>>> p.deriv()
+poly1d([ 2., 2.])
+>>> p.deriv(2)
+poly1d([ 2.])
+
+>>> q = poly1d([1.,2,3], variable='y')
+>>> print q
+ 2
+1 y + 2 y + 3
+>>> q = poly1d([1.,2,3], variable='lambda')
+>>> print q
+ 2
+1 lambda + 2 lambda + 3
+
+>>> polydiv(poly1d([1,0,-1]), poly1d([1,1]))
+(poly1d([ 1., -1.]), poly1d([ 0.]))
+"""
+
+from scipy.testing import *
+
+import doctest
+def test_suite(level=1):
+ return doctest.DocTestSuite()
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/base/tests/test_records.py b/numpy/base/tests/test_records.py
new file mode 100644
index 000000000..8135a55a8
--- /dev/null
+++ b/numpy/base/tests/test_records.py
@@ -0,0 +1,44 @@
+
+from scipy.testing import *
+set_package_path()
+import os as _os
+import scipy.base;reload(scipy.base)
+from scipy.base import *
+from scipy.base import records as rec
+restore_path()
+
+class test_fromrecords(ScipyTestCase):
+ def check_fromrecords(self):
+ r = rec.fromrecords([[456,'dbe',1.2],[2,'de',1.3]],names='col1,col2,col3')
+ assert_equal(r[0].item(),(456, 'dbe', 1.2))
+
+ def check_method_array(self):
+ r = rec.array('abcdefg'*100,formats='i2,a3,i4',shape=3,byteorder='big')
+ assert_equal(r[1].item(),(25444, 'efg', 1633837924))
+
+ def check_method_array2(self):
+ r=rec.array([(1,11,'a'),(2,22,'b'),(3,33,'c'),(4,44,'d'),(5,55,'ex'),(6,66,'f'),(7,77,'g')],formats='u1,f4,a1')
+ assert_equal(r[1].item(),(2, 22.0, 'b'))
+
+ def check_recarray_slices(self):
+ r=rec.array([(1,11,'a'),(2,22,'b'),(3,33,'c'),(4,44,'d'),(5,55,'ex'),(6,66,'f'),(7,77,'g')],formats='u1,f4,a1')
+ assert_equal(r[1::2][1].item(),(4, 44.0, 'd'))
+
+ def check_recarray_fromarrays(self):
+ x1 = array([1,2,3,4])
+ x2 = array(['a','dd','xyz','12'])
+ x3 = array([1.1,2,3,4])
+ r = rec.fromarrays([x1,x2,x3],names='a,b,c')
+ assert_equal(r[1].item(),(2,'dd',2.0))
+ x1[1] = 34
+ assert_equal(r.a,array([1,2,3,4]))
+
+ def check_recarray_fromfile(self):
+ __path__ = _os.path.split(__file__)
+ filename = _os.path.join(__path__[0], "testdata.fits")
+ fd = open(filename)
+ fd.seek(2880*2)
+ r = rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/base/tests/test_shape_base.py b/numpy/base/tests/test_shape_base.py
new file mode 100644
index 000000000..005868e96
--- /dev/null
+++ b/numpy/base/tests/test_shape_base.py
@@ -0,0 +1,364 @@
+
+from scipy.testing import *
+set_package_path()
+import scipy.base;
+from scipy.base import *
+restore_path()
+
+class test_apply_along_axis(ScipyTestCase):
+ def check_simple(self):
+ a = ones((20,10),'d')
+ assert_array_equal(apply_along_axis(len,0,a),len(a)*ones(shape(a)[1]))
+ def check_simple101(self,level=11):
+ # This test causes segmentation fault (Numeric 23.3,23.6,Python 2.3.4)
+ # when enabled and shape(a)[1]>100. See Issue 202.
+ a = ones((10,101),'d')
+ assert_array_equal(apply_along_axis(len,0,a),len(a)*ones(shape(a)[1]))
+
+class test_array_split(ScipyTestCase):
+ def check_integer_0_split(self):
+ a = arange(10)
+ try:
+ res = array_split(a,0)
+ assert(0) # it should have thrown a value error
+ except ValueError:
+ pass
+ def check_integer_split(self):
+ a = arange(10)
+ res = array_split(a,1)
+ desired = [arange(10)]
+ compare_results(res,desired)
+
+ res = array_split(a,2)
+ desired = [arange(5),arange(5,10)]
+ compare_results(res,desired)
+
+ res = array_split(a,3)
+ desired = [arange(4),arange(4,7),arange(7,10)]
+ compare_results(res,desired)
+
+ res = array_split(a,4)
+ desired = [arange(3),arange(3,6),arange(6,8),arange(8,10)]
+ compare_results(res,desired)
+
+ res = array_split(a,5)
+ desired = [arange(2),arange(2,4),arange(4,6),arange(6,8),arange(8,10)]
+ compare_results(res,desired)
+
+ res = array_split(a,6)
+ desired = [arange(2),arange(2,4),arange(4,6),arange(6,8),arange(8,9),
+ arange(9,10)]
+ compare_results(res,desired)
+
+ res = array_split(a,7)
+ desired = [arange(2),arange(2,4),arange(4,6),arange(6,7),arange(7,8),
+ arange(8,9), arange(9,10)]
+ compare_results(res,desired)
+
+ res = array_split(a,8)
+ desired = [arange(2),arange(2,4),arange(4,5),arange(5,6),arange(6,7),
+ arange(7,8), arange(8,9), arange(9,10)]
+ compare_results(res,desired)
+
+ res = array_split(a,9)
+ desired = [arange(2),arange(2,3),arange(3,4),arange(4,5),arange(5,6),
+ arange(6,7), arange(7,8), arange(8,9), arange(9,10)]
+ compare_results(res,desired)
+
+ res = array_split(a,10)
+ desired = [arange(1),arange(1,2),arange(2,3),arange(3,4),
+ arange(4,5),arange(5,6), arange(6,7), arange(7,8),
+ arange(8,9), arange(9,10)]
+ compare_results(res,desired)
+
+ res = array_split(a,11)
+ desired = [arange(1),arange(1,2),arange(2,3),arange(3,4),
+ arange(4,5),arange(5,6), arange(6,7), arange(7,8),
+ arange(8,9), arange(9,10),array([])]
+ compare_results(res,desired)
+ def check_integer_split_2D_rows(self):
+ a = array([arange(10),arange(10)])
+ res = array_split(a,3,axis=0)
+ desired = [array([arange(10)]),array([arange(10)]),array([])]
+ compare_results(res,desired)
+ def check_integer_split_2D_cols(self):
+ a = array([arange(10),arange(10)])
+ res = array_split(a,3,axis=-1)
+ desired = [array([arange(4),arange(4)]),
+ array([arange(4,7),arange(4,7)]),
+ array([arange(7,10),arange(7,10)])]
+ compare_results(res,desired)
+ def check_integer_split_2D_default(self):
+ """ This will fail if we change default axis
+ """
+ a = array([arange(10),arange(10)])
+ res = array_split(a,3)
+ desired = [array([arange(10)]),array([arange(10)]),array([])]
+ compare_results(res,desired)
+ #perhaps should check higher dimensions
+
+ def check_index_split_simple(self):
+ a = arange(10)
+ indices = [1,5,7]
+ res = array_split(a,indices,axis=-1)
+ desired = [arange(0,1),arange(1,5),arange(5,7),arange(7,10)]
+ compare_results(res,desired)
+
+ def check_index_split_low_bound(self):
+ a = arange(10)
+ indices = [0,5,7]
+ res = array_split(a,indices,axis=-1)
+ desired = [array([]),arange(0,5),arange(5,7),arange(7,10)]
+ compare_results(res,desired)
+ def check_index_split_high_bound(self):
+ a = arange(10)
+ indices = [0,5,7,10,12]
+ res = array_split(a,indices,axis=-1)
+ desired = [array([]),arange(0,5),arange(5,7),arange(7,10),
+ array([]),array([])]
+ compare_results(res,desired)
+
+class test_split(ScipyTestCase):
+ """* This function is essentially the same as array_split,
+ except that it test if splitting will result in an
+ equal split. Only test for this case.
+ *"""
+ def check_equal_split(self):
+ a = arange(10)
+ res = split(a,2)
+ desired = [arange(5),arange(5,10)]
+ compare_results(res,desired)
+
+ def check_unequal_split(self):
+ a = arange(10)
+ try:
+ res = split(a,3)
+ assert(0) # should raise an error
+ except ValueError:
+ pass
+
+class test_atleast_1d(ScipyTestCase):
+ def check_0D_array(self):
+ a = array(1); b = array(2);
+ res=map(atleast_1d,[a,b])
+ desired = [array([1]),array([2])]
+ assert_array_equal(res,desired)
+ def check_1D_array(self):
+ a = array([1,2]); b = array([2,3]);
+ res=map(atleast_1d,[a,b])
+ desired = [array([1,2]),array([2,3])]
+ assert_array_equal(res,desired)
+ def check_2D_array(self):
+ a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
+ res=map(atleast_1d,[a,b])
+ desired = [a,b]
+ assert_array_equal(res,desired)
+ def check_3D_array(self):
+ a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
+ a = array([a,a]);b = array([b,b]);
+ res=map(atleast_1d,[a,b])
+ desired = [a,b]
+ assert_array_equal(res,desired)
+ def check_r1array(self):
+ """ Test to make sure equivalent Travis O's r1array function
+ """
+ assert(atleast_1d(3).shape == (1,))
+ assert(atleast_1d(3j).shape == (1,))
+ assert(atleast_1d(3L).shape == (1,))
+ assert(atleast_1d(3.0).shape == (1,))
+ assert(atleast_1d([[2,3],[4,5]]).shape == (2,2))
+
+class test_atleast_2d(ScipyTestCase):
+ def check_0D_array(self):
+ a = array(1); b = array(2);
+ res=map(atleast_2d,[a,b])
+ desired = [array([[1]]),array([[2]])]
+ assert_array_equal(res,desired)
+ def check_1D_array(self):
+ a = array([1,2]); b = array([2,3]);
+ res=map(atleast_2d,[a,b])
+ desired = [array([[1,2]]),array([[2,3]])]
+ assert_array_equal(res,desired)
+ def check_2D_array(self):
+ a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
+ res=map(atleast_2d,[a,b])
+ desired = [a,b]
+ assert_array_equal(res,desired)
+ def check_3D_array(self):
+ a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
+ a = array([a,a]);b = array([b,b]);
+ res=map(atleast_2d,[a,b])
+ desired = [a,b]
+ assert_array_equal(res,desired)
+ def check_r2array(self):
+ """ Test to make sure equivalent Travis O's r2array function
+ """
+ assert(atleast_2d(3).shape == (1,1))
+ assert(atleast_2d([3j,1]).shape == (1,2))
+ assert(atleast_2d([[[3,1],[4,5]],[[3,5],[1,2]]]).shape == (2,2,2))
+
+class test_atleast_3d(ScipyTestCase):
+ def check_0D_array(self):
+ a = array(1); b = array(2);
+ res=map(atleast_3d,[a,b])
+ desired = [array([[[1]]]),array([[[2]]])]
+ assert_array_equal(res,desired)
+ def check_1D_array(self):
+ a = array([1,2]); b = array([2,3]);
+ res=map(atleast_3d,[a,b])
+ desired = [array([[[1],[2]]]),array([[[2],[3]]])]
+ assert_array_equal(res,desired)
+ def check_2D_array(self):
+ a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
+ res=map(atleast_3d,[a,b])
+ desired = [a[:,:,NewAxis],b[:,:,NewAxis]]
+ assert_array_equal(res,desired)
+ def check_3D_array(self):
+ a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
+ a = array([a,a]);b = array([b,b]);
+ res=map(atleast_3d,[a,b])
+ desired = [a,b]
+ assert_array_equal(res,desired)
+
+class test_hstack(ScipyTestCase):
+ def check_0D_array(self):
+ a = array(1); b = array(2);
+ res=hstack([a,b])
+ desired = array([1,2])
+ assert_array_equal(res,desired)
+ def check_1D_array(self):
+ a = array([1]); b = array([2]);
+ res=hstack([a,b])
+ desired = array([1,2])
+ assert_array_equal(res,desired)
+ def check_2D_array(self):
+ a = array([[1],[2]]); b = array([[1],[2]]);
+ res=hstack([a,b])
+ desired = array([[1,1],[2,2]])
+ assert_array_equal(res,desired)
+
+class test_vstack(ScipyTestCase):
+ def check_0D_array(self):
+ a = array(1); b = array(2);
+ res=vstack([a,b])
+ desired = array([[1],[2]])
+ assert_array_equal(res,desired)
+ def check_1D_array(self):
+ a = array([1]); b = array([2]);
+ res=vstack([a,b])
+ desired = array([[1],[2]])
+ assert_array_equal(res,desired)
+ def check_2D_array(self):
+ a = array([[1],[2]]); b = array([[1],[2]]);
+ res=vstack([a,b])
+ desired = array([[1],[2],[1],[2]])
+ assert_array_equal(res,desired)
+ def check_2D_array2(self):
+ a = array([1,2]); b = array([1,2]);
+ res=vstack([a,b])
+ desired = array([[1,2],[1,2]])
+ assert_array_equal(res,desired)
+
+class test_dstack(ScipyTestCase):
+ def check_0D_array(self):
+ a = array(1); b = array(2);
+ res=dstack([a,b])
+ desired = array([[[1,2]]])
+ assert_array_equal(res,desired)
+ def check_1D_array(self):
+ a = array([1]); b = array([2]);
+ res=dstack([a,b])
+ desired = array([[[1,2]]])
+ assert_array_equal(res,desired)
+ def check_2D_array(self):
+ a = array([[1],[2]]); b = array([[1],[2]]);
+ res=dstack([a,b])
+ desired = array([[[1,1]],[[2,2,]]])
+ assert_array_equal(res,desired)
+ def check_2D_array2(self):
+ a = array([1,2]); b = array([1,2]);
+ res=dstack([a,b])
+ desired = array([[[1,1],[2,2]]])
+ assert_array_equal(res,desired)
+
+""" array_split has more comprehensive test of splitting.
+ only do simple test on hsplit, vsplit, and dsplit
+"""
+class test_hsplit(ScipyTestCase):
+ """ only testing for integer splits.
+ """
+ def check_0D_array(self):
+ a= array(1)
+ try:
+ hsplit(a,2)
+ assert(0)
+ except ValueError:
+ pass
+ def check_1D_array(self):
+ a= array([1,2,3,4])
+ res = hsplit(a,2)
+ desired = [array([1,2]),array([3,4])]
+ compare_results(res,desired)
+ def check_2D_array(self):
+ a= array([[1,2,3,4],
+ [1,2,3,4]])
+ res = hsplit(a,2)
+ desired = [array([[1,2],[1,2]]),array([[3,4],[3,4]])]
+ compare_results(res,desired)
+
+class test_vsplit(ScipyTestCase):
+ """ only testing for integer splits.
+ """
+ def check_1D_array(self):
+ a= array([1,2,3,4])
+ try:
+ vsplit(a,2)
+ assert(0)
+ except ValueError:
+ pass
+ def check_2D_array(self):
+ a= array([[1,2,3,4],
+ [1,2,3,4]])
+ res = vsplit(a,2)
+ desired = [array([[1,2,3,4]]),array([[1,2,3,4]])]
+ compare_results(res,desired)
+
+class test_dsplit(ScipyTestCase):
+ """ only testing for integer splits.
+ """
+ def check_2D_array(self):
+ a= array([[1,2,3,4],
+ [1,2,3,4]])
+ try:
+ dsplit(a,2)
+ assert(0)
+ except ValueError:
+ pass
+ def check_3D_array(self):
+ a= array([[[1,2,3,4],
+ [1,2,3,4]],
+ [[1,2,3,4],
+ [1,2,3,4]]])
+ res = dsplit(a,2)
+ desired = [array([[[1,2],[1,2]],[[1,2],[1,2]]]),
+ array([[[3,4],[3,4]],[[3,4],[3,4]]])]
+ compare_results(res,desired)
+
+class test_squeeze(ScipyTestCase):
+ def check_basic(self):
+ a = rand(20,10,10,1,1)
+ b = rand(20,1,10,1,20)
+ c = rand(1,1,20,10)
+ assert_array_equal(squeeze(a),reshape(a,(20,10,10)))
+ assert_array_equal(squeeze(b),reshape(b,(20,10,20)))
+ assert_array_equal(squeeze(c),reshape(c,(20,10)))
+
+# Utility
+
+def compare_results(res,desired):
+ for i in range(len(desired)):
+ assert_array_equal(res[i],desired[i])
+
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/base/tests/test_twodim_base.py b/numpy/base/tests/test_twodim_base.py
new file mode 100644
index 000000000..b061d4a5d
--- /dev/null
+++ b/numpy/base/tests/test_twodim_base.py
@@ -0,0 +1,134 @@
+""" Test functions for matrix module
+
+"""
+
+from scipy.testing import *
+set_package_path()
+import scipy.base;reload(scipy.base)
+from scipy.base import *
+restore_path()
+
+##################################################
+
+
+def get_mat(n):
+ data = arange(n)
+ data = add.outer(data,data)
+ return data
+
+class test_eye(ScipyTestCase):
+ def check_basic(self):
+ assert_equal(eye(4),array([[1,0,0,0],
+ [0,1,0,0],
+ [0,0,1,0],
+ [0,0,0,1]]))
+ assert_equal(eye(4,dtype='f'),array([[1,0,0,0],
+ [0,1,0,0],
+ [0,0,1,0],
+ [0,0,0,1]],'f'))
+ def check_diag(self):
+ assert_equal(eye(4,k=1),array([[0,1,0,0],
+ [0,0,1,0],
+ [0,0,0,1],
+ [0,0,0,0]]))
+ assert_equal(eye(4,k=-1),array([[0,0,0,0],
+ [1,0,0,0],
+ [0,1,0,0],
+ [0,0,1,0]]))
+ def check_2d(self):
+ assert_equal(eye(4,3),array([[1,0,0],
+ [0,1,0],
+ [0,0,1],
+ [0,0,0]]))
+ assert_equal(eye(3,4),array([[1,0,0,0],
+ [0,1,0,0],
+ [0,0,1,0]]))
+ def check_diag2d(self):
+ assert_equal(eye(3,4,k=2),array([[0,0,1,0],
+ [0,0,0,1],
+ [0,0,0,0]]))
+ assert_equal(eye(4,3,k=-2),array([[0,0,0],
+ [0,0,0],
+ [1,0,0],
+ [0,1,0]]))
+
+class test_diag(ScipyTestCase):
+ def check_vector(self):
+ vals = (100*arange(5)).astype('l')
+ b = zeros((5,5))
+ for k in range(5):
+ b[k,k] = vals[k]
+ assert_equal(diag(vals),b)
+ b = zeros((7,7))
+ c = b.copy()
+ for k in range(5):
+ b[k,k+2] = vals[k]
+ c[k+2,k] = vals[k]
+ assert_equal(diag(vals,k=2), b)
+ assert_equal(diag(vals,k=-2), c)
+
+ def check_matrix(self):
+ vals = (100*get_mat(5)+1).astype('l')
+ b = zeros((5,))
+ for k in range(5):
+ b[k] = vals[k,k]
+ assert_equal(diag(vals),b)
+ b = b*0
+ for k in range(3):
+ b[k] = vals[k,k+2]
+ assert_equal(diag(vals,2),b[:3])
+ for k in range(3):
+ b[k] = vals[k+2,k]
+ assert_equal(diag(vals,-2),b[:3])
+
+class test_fliplr(ScipyTestCase):
+ def check_basic(self):
+ self.failUnlessRaises(ValueError, fliplr, ones(4))
+ a = get_mat(4)
+ b = a[:,::-1]
+ assert_equal(fliplr(a),b)
+ a = [[0,1,2],
+ [3,4,5]]
+ b = [[2,1,0],
+ [5,4,3]]
+ assert_equal(fliplr(a),b)
+
+class test_flipud(ScipyTestCase):
+ def check_basic(self):
+ a = get_mat(4)
+ b = a[::-1,:]
+ assert_equal(flipud(a),b)
+ a = [[0,1,2],
+ [3,4,5]]
+ b = [[3,4,5],
+ [0,1,2]]
+ assert_equal(flipud(a),b)
+
+class test_rot90(ScipyTestCase):
+ def check_basic(self):
+ self.failUnlessRaises(ValueError, rot90, ones(4))
+
+ a = [[0,1,2],
+ [3,4,5]]
+ b1 = [[2,5],
+ [1,4],
+ [0,3]]
+ b2 = [[5,4,3],
+ [2,1,0]]
+ b3 = [[3,0],
+ [4,1],
+ [5,2]]
+ b4 = [[0,1,2],
+ [3,4,5]]
+
+ for k in range(-3,13,4):
+ assert_equal(rot90(a,k=k),b1)
+ for k in range(-2,13,4):
+ assert_equal(rot90(a,k=k),b2)
+ for k in range(-1,13,4):
+ assert_equal(rot90(a,k=k),b3)
+ for k in range(0,13,4):
+ assert_equal(rot90(a,k=k),b4)
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/base/tests/test_type_check.py b/numpy/base/tests/test_type_check.py
new file mode 100644
index 000000000..aac24bd6e
--- /dev/null
+++ b/numpy/base/tests/test_type_check.py
@@ -0,0 +1,238 @@
+
+import sys
+
+from scipy.testing import *
+set_package_path()
+import scipy.base;reload(scipy.base);reload(scipy.base.type_check)
+from scipy.base import *
+restore_path()
+
+def assert_all(x):
+ assert(all(x)), x
+
+class test_mintypecode(ScipyTestCase):
+
+ def check_default_1(self):
+ for itype in '1bcsuwil':
+ assert_equal(mintypecode(itype),'d')
+ assert_equal(mintypecode('f'),'f')
+ assert_equal(mintypecode('d'),'d')
+ assert_equal(mintypecode('F'),'F')
+ assert_equal(mintypecode('D'),'D')
+
+ def check_default_2(self):
+ for itype in '1bcsuwil':
+ assert_equal(mintypecode(itype+'f'),'f')
+ assert_equal(mintypecode(itype+'d'),'d')
+ assert_equal(mintypecode(itype+'F'),'F')
+ assert_equal(mintypecode(itype+'D'),'D')
+ assert_equal(mintypecode('ff'),'f')
+ assert_equal(mintypecode('fd'),'d')
+ assert_equal(mintypecode('fF'),'F')
+ assert_equal(mintypecode('fD'),'D')
+ assert_equal(mintypecode('df'),'d')
+ assert_equal(mintypecode('dd'),'d')
+ #assert_equal(mintypecode('dF',savespace=1),'F')
+ assert_equal(mintypecode('dF'),'D')
+ assert_equal(mintypecode('dD'),'D')
+ assert_equal(mintypecode('Ff'),'F')
+ #assert_equal(mintypecode('Fd',savespace=1),'F')
+ assert_equal(mintypecode('Fd'),'D')
+ assert_equal(mintypecode('FF'),'F')
+ assert_equal(mintypecode('FD'),'D')
+ assert_equal(mintypecode('Df'),'D')
+ assert_equal(mintypecode('Dd'),'D')
+ assert_equal(mintypecode('DF'),'D')
+ assert_equal(mintypecode('DD'),'D')
+
+ def check_default_3(self):
+ assert_equal(mintypecode('fdF'),'D')
+ #assert_equal(mintypecode('fdF',savespace=1),'F')
+ assert_equal(mintypecode('fdD'),'D')
+ assert_equal(mintypecode('fFD'),'D')
+ assert_equal(mintypecode('dFD'),'D')
+
+ assert_equal(mintypecode('ifd'),'d')
+ assert_equal(mintypecode('ifF'),'F')
+ assert_equal(mintypecode('ifD'),'D')
+ assert_equal(mintypecode('idF'),'D')
+ #assert_equal(mintypecode('idF',savespace=1),'F')
+ assert_equal(mintypecode('idD'),'D')
+
+class test_isscalar(ScipyTestCase):
+ def check_basic(self):
+ assert(isscalar(3))
+ assert(not isscalar([3]))
+ assert(not isscalar((3,)))
+ assert(isscalar(3j))
+ assert(isscalar(10L))
+ assert(isscalar(4.0))
+
+class test_real(ScipyTestCase):
+ def check_real(self):
+ y = rand(10,)
+ assert_array_equal(y,real(y))
+
+ def check_cmplx(self):
+ y = rand(10,)+1j*rand(10,)
+ assert_array_equal(y.real,real(y))
+
+class test_imag(ScipyTestCase):
+ def check_real(self):
+ y = rand(10,)
+ assert_array_equal(0,imag(y))
+
+ def check_cmplx(self):
+ y = rand(10,)+1j*rand(10,)
+ assert_array_equal(y.imag,imag(y))
+
+class test_iscomplex(ScipyTestCase):
+ def check_fail(self):
+ z = array([-1,0,1])
+ res = iscomplex(z)
+ assert(not sometrue(res))
+ def check_pass(self):
+ z = array([-1j,1,0])
+ res = iscomplex(z)
+ assert_array_equal(res,[1,0,0])
+
+class test_isreal(ScipyTestCase):
+ def check_pass(self):
+ z = array([-1,0,1j])
+ res = isreal(z)
+ assert_array_equal(res,[1,1,0])
+ def check_fail(self):
+ z = array([-1j,1,0])
+ res = isreal(z)
+ assert_array_equal(res,[0,1,1])
+
+class test_iscomplexobj(ScipyTestCase):
+ def check_basic(self):
+ z = array([-1,0,1])
+ assert(not iscomplexobj(z))
+ z = array([-1j,0,-1])
+ assert(iscomplexobj(z))
+
+class test_isrealobj(ScipyTestCase):
+ def check_basic(self):
+ z = array([-1,0,1])
+ assert(isrealobj(z))
+ z = array([-1j,0,-1])
+ assert(not isrealobj(z))
+
+class test_isnan(ScipyTestCase):
+ def check_goodvalues(self):
+ z = array((-1.,0.,1.))
+ res = isnan(z) == 0
+ assert_all(alltrue(res))
+ def check_posinf(self):
+ assert_all(isnan(array((1.,))/0.) == 0)
+ def check_neginf(self):
+ assert_all(isnan(array((-1.,))/0.) == 0)
+ def check_ind(self):
+ assert_all(isnan(array((0.,))/0.) == 1)
+ #def check_qnan(self): log(-1) return pi*j now
+ # assert_all(isnan(log(-1.)) == 1)
+ def check_integer(self):
+ assert_all(isnan(1) == 0)
+ def check_complex(self):
+ assert_all(isnan(1+1j) == 0)
+ def check_complex1(self):
+ assert_all(isnan(array(0+0j)/0.) == 1)
+
+class test_isfinite(ScipyTestCase):
+ def check_goodvalues(self):
+ z = array((-1.,0.,1.))
+ res = isfinite(z) == 1
+ assert_all(alltrue(res))
+ def check_posinf(self):
+ assert_all(isfinite(array((1.,))/0.) == 0)
+ def check_neginf(self):
+ assert_all(isfinite(array((-1.,))/0.) == 0)
+ def check_ind(self):
+ assert_all(isfinite(array((0.,))/0.) == 0)
+ #def check_qnan(self):
+ # assert_all(isfinite(log(-1.)) == 0)
+ def check_integer(self):
+ assert_all(isfinite(1) == 1)
+ def check_complex(self):
+ assert_all(isfinite(1+1j) == 1)
+ def check_complex1(self):
+ assert_all(isfinite(array(1+1j)/0.) == 0)
+
+class test_isinf(ScipyTestCase):
+ def check_goodvalues(self):
+ z = array((-1.,0.,1.))
+ res = isinf(z) == 0
+ assert_all(alltrue(res))
+ def check_posinf(self):
+ assert_all(isinf(array((1.,))/0.) == 1)
+ def check_posinf_scalar(self):
+ assert_all(isinf(array(1.,)/0.) == 1)
+ def check_neginf(self):
+ assert_all(isinf(array((-1.,))/0.) == 1)
+ def check_neginf_scalar(self):
+ assert_all(isinf(array(-1.)/0.) == 1)
+ def check_ind(self):
+ assert_all(isinf(array((0.,))/0.) == 0)
+ #def check_qnan(self):
+ # assert_all(isinf(log(-1.)) == 0)
+ # assert_all(isnan(log(-1.)) == 1)
+
+class test_isposinf(ScipyTestCase):
+ def check_generic(self):
+ vals = isposinf(array((-1.,0,1))/0.)
+ assert(vals[0] == 0)
+ assert(vals[1] == 0)
+ assert(vals[2] == 1)
+
+class test_isneginf(ScipyTestCase):
+ def check_generic(self):
+ vals = isneginf(array((-1.,0,1))/0.)
+ assert(vals[0] == 1)
+ assert(vals[1] == 0)
+ assert(vals[2] == 0)
+
+class test_nan_to_num(ScipyTestCase):
+ def check_generic(self):
+ vals = nan_to_num(array((-1.,0,1))/0.)
+ assert_all(vals[0] < -1e10) and assert_all(isfinite(vals[0]))
+ assert(vals[1] == 0)
+ assert_all(vals[2] > 1e10) and assert_all(isfinite(vals[2]))
+ def check_integer(self):
+ vals = nan_to_num(1)
+ assert_all(vals == 1)
+ def check_complex_good(self):
+ vals = nan_to_num(1+1j)
+ assert_all(vals == 1+1j)
+ def check_complex_bad(self):
+ v = 1+1j
+ v += array(0+1.j)/0.
+ vals = nan_to_num(v)
+ # !! This is actually (unexpectedly) zero
+ assert_all(isfinite(vals))
+ def check_complex_bad2(self):
+ v = 1+1j
+ v += array(-1+1.j)/0.
+ vals = nan_to_num(v)
+ assert_all(isfinite(vals))
+ #assert_all(vals.imag > 1e10) and assert_all(isfinite(vals))
+ # !! This is actually (unexpectedly) positive
+ # !! inf. Comment out for now, and see if it
+ # !! changes
+ #assert_all(vals.real < -1e10) and assert_all(isfinite(vals))
+
+
+class test_real_if_close(ScipyTestCase):
+ def check_basic(self):
+ a = rand(10)
+ b = real_if_close(a+1e-15j)
+ assert_all(isrealobj(b))
+ assert_array_equal(a,b)
+ b = real_if_close(a+1e-7j)
+ assert_all(iscomplexobj(b))
+ b = real_if_close(a+1e-7j,tol=1e-6)
+ assert_all(isrealobj(b))
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/base/tests/test_ufunclike.py b/numpy/base/tests/test_ufunclike.py
new file mode 100644
index 000000000..ca06140c7
--- /dev/null
+++ b/numpy/base/tests/test_ufunclike.py
@@ -0,0 +1,63 @@
+"""
+>>> import scipy.base as nx
+>>> import scipy.base.ufunclike as U
+
+Test fix:
+>>> a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
+>>> U.fix(a)
+array([[ 1., 1., 1., 1.],
+ [ 0., -1., -1., -1.]])
+>>> y = nx.zeros(a.shape, float)
+>>> U.fix(a, y)
+array([[ 1., 1., 1., 1.],
+ [ 0., -1., -1., -1.]])
+>>> y
+array([[ 1., 1., 1., 1.],
+ [ 0., -1., -1., -1.]])
+
+Test isposinf, isneginf, sign
+>>> a = nx.array([nx.Inf, -nx.Inf, nx.NaN, 0.0, 3.0, -3.0])
+>>> U.isposinf(a)
+array([True, False, False, False, False, False], dtype=bool)
+>>> U.isneginf(a)
+array([False, True, False, False, False, False], dtype=bool)
+>>> U.sign(a)
+array([ 1, -1, 0, 0, 1, -1])
+
+Same thing with an output array:
+>>> y = nx.zeros(a.shape, bool)
+>>> U.isposinf(a, y)
+array([True, False, False, False, False, False], dtype=bool)
+>>> y
+array([True, False, False, False, False, False], dtype=bool)
+>>> U.isneginf(a, y)
+array([False, True, False, False, False, False], dtype=bool)
+>>> y
+array([False, True, False, False, False, False], dtype=bool)
+>>> U.sign(a, y)
+array([True, True, False, False, True, True], dtype=bool)
+>>> y
+array([True, True, False, False, True, True], dtype=bool)
+
+Now log2:
+>>> a = nx.array([4.5, 2.3, 6.5])
+>>> U.log2(a)
+array([ 2.169925 , 1.20163386, 2.70043972])
+>>> 2**_
+array([ 4.5, 2.3, 6.5])
+>>> y = nx.zeros(a.shape, float)
+>>> U.log2(a, y)
+array([ 2.169925 , 1.20163386, 2.70043972])
+>>> y
+array([ 2.169925 , 1.20163386, 2.70043972])
+
+"""
+
+from scipy.testing import *
+
+import doctest
+def test_suite(level=1):
+ return doctest.DocTestSuite()
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/base/tests/test_umath.py b/numpy/base/tests/test_umath.py
new file mode 100644
index 000000000..9cd99f7e1
--- /dev/null
+++ b/numpy/base/tests/test_umath.py
@@ -0,0 +1,18 @@
+
+from scipy.testing import *
+set_package_path()
+from scipy.base.umath import minimum, maximum
+restore_path()
+
+
+class test_maximum(ScipyTestCase):
+ def check_reduce_complex(self):
+ assert_equal(maximum.reduce([1,2j]),1)
+ assert_equal(maximum.reduce([1+3j,2j]),1+3j)
+
+class test_minimum(ScipyTestCase):
+ def check_reduce_complex(self):
+ assert_equal(minimum.reduce([1,2j]),2j)
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/base/tests/testdata.fits b/numpy/base/tests/testdata.fits
new file mode 100644
index 000000000..ca48ee851
--- /dev/null
+++ b/numpy/base/tests/testdata.fits
Binary files differ
diff --git a/numpy/base/twodim_base.py b/numpy/base/twodim_base.py
new file mode 100644
index 000000000..b21532ea6
--- /dev/null
+++ b/numpy/base/twodim_base.py
@@ -0,0 +1,123 @@
+""" Basic functions for manipulating 2d arrays
+
+"""
+
+__all__ = ['diag','eye','fliplr','flipud','rot90','tri','triu','tril',
+ 'vander']
+
+from numeric import *
+import sys
+
+def fliplr(m):
+ """ returns an array m with the rows preserved and columns flipped
+ in the left/right direction. Works on the first two dimensions of m.
+ """
+ m = asarray(m)
+ if m.ndim < 2:
+ raise ValueError, "Input must be >= 2-d."
+ return m[:, ::-1]
+
+def flipud(m):
+ """ returns an array with the columns preserved and rows flipped in
+ the up/down direction. Works on the first dimension of m.
+ """
+ m = asarray(m)
+ if m.ndim < 1:
+ raise ValueError, "Input must be >= 1-d."
+ return m[::-1]
+
+def rot90(m, k=1):
+ """ returns the array found by rotating m by k*90
+ degrees in the counterclockwise direction. Works on the first two
+ dimensions of m.
+ """
+ m = asarray(m)
+ if m.ndim < 2:
+ raise ValueError, "Input must >= 2-d."
+ k = k % 4
+ if k == 0: return m
+ elif k == 1: return fliplr(m).transpose()
+ elif k == 2: return fliplr(flipud(m))
+ else: return fliplr(m.transpose()) # k==3
+
+def eye(N, M=None, k=0, dtype=int_):
+ """ eye returns a N-by-M 2-d array where the k-th diagonal is all ones,
+ and everything else is zeros.
+ """
+ if M is None: M = N
+ m = equal(subtract.outer(arange(N), arange(M)),-k)
+ return m.astype(dtype)
+
+def diag(v, k=0):
+ """ returns the k-th diagonal if v is a array or returns a array
+ with v as the k-th diagonal if v is a vector.
+ """
+ v = asarray(v)
+ s = v.shape
+ if len(s)==1:
+ n = s[0]+abs(k)
+ res = zeros((n,n), v.dtype)
+ if (k>=0):
+ i = arange(0,n-k)
+ fi = i+k+i*n
+ else:
+ i = arange(0,n+k)
+ fi = i+(i-k)*n
+ res.flat[fi] = v
+ return res
+ elif len(s)==2:
+ N1,N2 = s
+ if k >= 0:
+ M = min(N1,N2-k)
+ i = arange(0,M)
+ fi = i+k+i*N2
+ else:
+ M = min(N1+k,N2)
+ i = arange(0,M)
+ fi = i + (i-k)*N2
+ return v.flat[fi]
+ else:
+ raise ValueError, "Input must be 1- or 2-d."
+
+
+def tri(N, M=None, k=0, dtype=int_):
+ """ returns a N-by-M array where all the diagonals starting from
+ lower left corner up to the k-th are all ones.
+ """
+ if M is None: M = N
+ m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
+ return m.astype(dtype)
+
+def tril(m, k=0):
+ """ returns the elements on and below the k-th diagonal of m. k=0 is the
+ main diagonal, k > 0 is above and k < 0 is below the main diagonal.
+ """
+ m = asarray(m)
+ out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype),m)
+ return out
+
+def triu(m, k=0):
+ """ returns the elements on and above the k-th diagonal of m. k=0 is the
+ main diagonal, k > 0 is above and k < 0 is below the main diagonal.
+ """
+ m = asarray(m)
+ out = multiply((1-tri(m.shape[0], m.shape[1], k-1, m.dtype)),m)
+ return out
+
+
+# borrowed from John Hunter and matplotlib
+def vander(x, N=None):
+ """
+ X = vander(x,N=None)
+
+ The Vandermonde matrix of vector x. The i-th column of X is the
+ the i-th power of x. N is the maximum power to compute; if N is
+ None it defaults to len(x).
+
+ """
+ x = asarray(x)
+ if N is None: N=len(x)
+ X = ones( (len(x),N), x.dtypechar)
+ for i in range(N-1):
+ X[:,i] = x**(N-i-1)
+ return X
diff --git a/numpy/base/type_check.py b/numpy/base/type_check.py
new file mode 100644
index 000000000..4c802ca86
--- /dev/null
+++ b/numpy/base/type_check.py
@@ -0,0 +1,180 @@
+## Automatically adapted for scipy Sep 19, 2005 by convertcode.py
+
+__all__ = ['iscomplexobj','isrealobj','imag','iscomplex',
+ 'isscalar',
+ 'isreal','nan_to_num','real','real_if_close',
+ 'typename','asfarray','mintypecode','asscalar',
+ 'common_type']
+
+import numeric as _nx
+from numeric import ndarray, asarray, array, isinf, isnan, isfinite, signbit, \
+ ufunc, ScalarType, obj2dtype
+from ufunclike import isneginf, isposinf
+import umath
+
+_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
+
+def mintypecode(typechars,typeset='GDFgdf',default='d'):
+ """ Return a minimum data type character from typeset that
+ handles all typechars given
+
+ The returned type character must be the smallest size such that
+ an array of the returned type can handle the data from an array of
+ type t for each t in typechars (or if typechars is an array,
+ then its dtypechar).
+
+ If the typechars does not intersect with the typeset, then default
+ is returned.
+
+ If t in typechars is not a string then t=asarray(t).dtypechar is
+ applied.
+ """
+ typecodes = [(type(t) is type('') and t) or asarray(t).dtypechar\
+ for t in typechars]
+ intersection = [t for t in typecodes if t in typeset]
+ if not intersection:
+ return default
+ if 'F' in intersection and 'd' in intersection:
+ return 'D'
+ l = []
+ for t in intersection:
+ i = _typecodes_by_elsize.index(t)
+ l.append((i,t))
+ l.sort()
+ return l[0][1]
+
+def asfarray(a, dtype=_nx.float_):
+ """asfarray(a,dtype=None) returns a as a float array."""
+ dtype = _nx.obj2dtype(dtype)
+ if not issubclass(dtype, _nx.inexact):
+ dtype = _nx.float_
+ a = asarray(a,dtype=dtype)
+ return a
+
+def isscalar(num):
+ if isinstance(num, _nx.generic):
+ return True
+ else:
+ return type(num) in ScalarType
+
+def real(val):
+ return asarray(val).real
+
+def imag(val):
+ return asarray(val).imag
+
+def iscomplex(x):
+ return imag(x) != _nx.zeros_like(x)
+
+def isreal(x):
+ return imag(x) == _nx.zeros_like(x)
+
+def iscomplexobj(x):
+ return issubclass( asarray(x).dtype, _nx.complexfloating)
+
+def isrealobj(x):
+ return not issubclass( asarray(x).dtype, _nx.complexfloating)
+
+#-----------------------------------------------------------------------------
+
+def _getmaxmin(t):
+ import getlimits
+ f = getlimits.finfo(t)
+ return f.max, f.min
+
+def nan_to_num(x):
+ # mapping:
+ # NaN -> 0
+ # Inf -> limits.double_max
+ # -Inf -> limits.double_min
+ try:
+ t = x.dtype
+ except AttributeError:
+ t = obj2dtype(type(x))
+ if issubclass(t, _nx.complexfloating):
+ y = nan_to_num(x.real) + 1j * nan_to_num(x.imag)
+ elif issubclass(t, _nx.integer):
+ y = array(x)
+ else:
+ y = array(x)
+ if not y.shape:
+ y = array([x])
+ scalar = True
+ else:
+ scalar = False
+ are_inf = isposinf(y)
+ are_neg_inf = isneginf(y)
+ are_nan = isnan(y)
+ maxf, minf = _getmaxmin(y.dtype)
+ y[are_nan] = 0
+ y[are_inf] = maxf
+ y[are_neg_inf] = minf
+ if scalar:
+ y = y[0]
+ return y
+
+#-----------------------------------------------------------------------------
+
+def real_if_close(a,tol=100):
+ a = asarray(a)
+ if a.dtypechar not in 'FDG':
+ return a
+ if tol > 1:
+ import getlimits
+ f = getlimits.finfo(a.dtype)
+ tol = f.eps * tol
+ if _nx.allclose(a.imag, 0, atol=tol):
+ a = a.real
+ return a
+
+
+def asscalar(a):
+ return a.item()
+
+#-----------------------------------------------------------------------------
+
+_namefromtype = {'S1' : 'character',
+ '?' : 'bool',
+ 'b' : 'signed char',
+ 'B' : 'unsigned char',
+ 'h' : 'short',
+ 'H' : 'unsigned short',
+ 'i' : 'integer',
+ 'I' : 'unsigned integer',
+ 'l' : 'long integer',
+ 'L' : 'unsigned long integer',
+ 'q' : 'long long integer',
+ 'Q' : 'unsigned long long integer',
+ 'f' : 'single precision',
+ 'd' : 'double precision',
+ 'g' : 'long precision',
+ 'F' : 'complex single precision',
+ 'D' : 'complex double precision',
+ 'G' : 'complex long double precision',
+ 'S' : 'string',
+ 'U' : 'unicode',
+ 'V' : 'void',
+ 'O' : 'object'
+ }
+
+def typename(char):
+ """Return an english description for the given data type character.
+ """
+ return _namefromtype[char]
+
+#-----------------------------------------------------------------------------
+
+#determine the "minimum common type code" for a group of arrays.
+array_kind = {'i':0, 'l': 0, 'f': 0, 'd': 0, 'g':0, 'F': 1, 'D': 1, 'G':1}
+array_precision = {'i': 1, 'l': 1,
+ 'f': 0, 'd': 1, 'g':2,
+ 'F': 0, 'D': 1, 'G':2}
+array_type = [['f', 'd', 'g'], ['F', 'D', 'G']]
+def common_type(*arrays):
+ kind = 0
+ precision = 0
+ for a in arrays:
+ t = a.dtypechar
+ kind = max(kind, array_kind[t])
+ precision = max(precision, array_precision[t])
+ return array_type[kind][precision]
diff --git a/numpy/base/ufunclike.py b/numpy/base/ufunclike.py
new file mode 100644
index 000000000..7e8d44c7d
--- /dev/null
+++ b/numpy/base/ufunclike.py
@@ -0,0 +1,77 @@
+"""
+Module of functions that are like ufuncs in acting on arrays and optionally
+storing results in an output array.
+"""
+__all__ = ['fix', 'isneginf', 'isposinf', 'sign', 'log2']
+
+import numeric as nx
+from numeric import asarray, empty, empty_like, isinf, signbit, zeros
+import umath
+
+def fix(x, y=None):
+ """ Round x to nearest integer towards zero.
+ """
+ x = asarray(x)
+ if y is None:
+ y = nx.floor(x)
+ else:
+ nx.floor(x, y)
+ if x.ndim == 0:
+ if (x<0):
+ y += 1
+ else:
+ y[x<0] = y[x<0]+1
+ return y
+
+def isposinf(x, y=None):
+ """Return a boolean array y with y[i] True for x[i] = +Inf.
+
+ If y is an array, the result replaces the contents of y.
+ """
+ if y is None:
+ y = empty(x.shape, dtype=nx.bool_)
+ umath.logical_and(isinf(x), ~signbit(x), y)
+ return y
+
+def isneginf(x, y=None):
+ """Return a boolean array y with y[i] True for x[i] = -Inf.
+
+ If y is an array, the result replaces the contents of y.
+ """
+ if y is None:
+ y = empty(x.shape, dtype=nx.bool_)
+ umath.logical_and(isinf(x), signbit(x), y)
+ return y
+
+def sign(x, y=None):
+ """sign(x) gives an array with shape of x with elexents defined by sign
+ function: where x is less than 0 return -1, where x greater than 0, a=1,
+ elsewhere a=0.
+ """
+ x = asarray(x)
+ if y is None:
+ y = zeros(x.shape, dtype=nx.int_)
+ if x.ndim == 0:
+ if x < 0:
+ y -= 1
+ elif x > 0:
+ y += 1
+ else:
+ y[x<0] = -1
+ y[x>0] = 1
+ return y
+
+_log2 = umath.log(2)
+def log2(x, y=None):
+ """Returns the base 2 logarithm of x
+
+ If y is an array, the result replaces the contents of y.
+ """
+ x = asarray(x)
+ if y is None:
+ y = umath.log(x)
+ else:
+ umath.log(x, y)
+ y /= _log2
+ return y
+
diff --git a/numpy/base/utils.py b/numpy/base/utils.py
new file mode 100644
index 000000000..19fb18d4d
--- /dev/null
+++ b/numpy/base/utils.py
@@ -0,0 +1,28 @@
+from numerictypes import obj2dtype
+
+__all__ = ['issubclass_', 'get_scipy_include', 'issubdtype']
+
+def issubclass_(arg1, arg2):
+ try:
+ return issubclass(arg1, arg2)
+ except TypeError:
+ return False
+
+def issubdtype(arg1, arg2):
+ return issubclass(obj2dtype(arg1), obj2dtype(arg2))
+
+def get_scipy_include():
+ """Return the directory in the package that contains the scipy/*.h header
+ files.
+
+ Extension modules that need to compile against scipy.base should use this
+ function to locate the appropriate include directory. Using distutils:
+
+ import scipy
+ Extension('extension_name', ...
+ include_dirs=[scipy.get_scipy_include()])
+ """
+ from scipy.distutils.misc_util import get_scipy_include_dirs
+ include_dirs = get_scipy_include_dirs()
+ assert len(include_dirs)==1,`include_dirs`
+ return include_dirs[0]
diff --git a/numpy/core_version.py b/numpy/core_version.py
new file mode 100644
index 000000000..bb9fdfaa4
--- /dev/null
+++ b/numpy/core_version.py
@@ -0,0 +1,12 @@
+version='0.9.2'
+
+import os
+svn_version_file = os.path.join(os.path.dirname(__file__),
+ 'base','__svn_version__.py')
+if os.path.isfile(svn_version_file):
+ import imp
+ svn = imp.load_module('scipy.base.__svn_version__',
+ open(svn_version_file),
+ svn_version_file,
+ ('.py','U',1))
+ version += '.'+svn.version
diff --git a/numpy/corefft/__init__.py b/numpy/corefft/__init__.py
new file mode 100644
index 000000000..0a72b86bd
--- /dev/null
+++ b/numpy/corefft/__init__.py
@@ -0,0 +1,22 @@
+# To get sub-modules
+from info import __doc__
+
+from fftpack import *
+from helper import *
+
+# re-define duplicated functions if full scipy installed.
+try:
+ import scipy.fftpack
+except ImportError:
+ pass
+else:
+ fft = scipy.fftpack.fft
+ ifft = scipy.fftpack.ifft
+ fftn = scipy.fftpack.fftn
+ ifftn = scipy.fftpack.ifftn
+ fft2 = scipy.fftpack.fft2
+ ifft2 = scipy.fftpack.ifft2
+
+
+from scipy.testing import ScipyTest
+test = ScipyTest().test
diff --git a/numpy/corefft/fftpack.c b/numpy/corefft/fftpack.c
new file mode 100644
index 000000000..3e5e7d2ed
--- /dev/null
+++ b/numpy/corefft/fftpack.c
@@ -0,0 +1,1501 @@
+/*
+fftpack.c : A set of FFT routines in C.
+Algorithmically based on Fortran-77 FFTPACK by Paul N. Swarztrauber (Version 4, 1985).
+
+*/
+
+/* isign is +1 for backward and -1 for forward transforms */
+
+#include <math.h>
+#include <stdio.h>
+#define DOUBLE
+
+#ifdef DOUBLE
+#define Treal double
+#else
+#define Treal float
+#endif
+
+
+#define ref(u,a) u[a]
+
+#define MAXFAC 13 /* maximum number of factors in factorization of n */
+#define NSPECIAL 4 /* number of factors for which we have special-case routines */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* ----------------------------------------------------------------------
+ passf2, passf3, passf4, passf5, passf. Complex FFT passes fwd and bwd.
+---------------------------------------------------------------------- */
+
+static void passf2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[], int isign)
+ /* isign==+1 for backward transform */
+ {
+ int i, k, ah, ac;
+ Treal ti2, tr2;
+ if (ido <= 2) {
+ for (k=0; k<l1; k++) {
+ ah = k*ido;
+ ac = 2*k*ido;
+ ch[ah] = ref(cc,ac) + ref(cc,ac + ido);
+ ch[ah + ido*l1] = ref(cc,ac) - ref(cc,ac + ido);
+ ch[ah+1] = ref(cc,ac+1) + ref(cc,ac + ido + 1);
+ ch[ah + ido*l1 + 1] = ref(cc,ac+1) - ref(cc,ac + ido + 1);
+ }
+ } else {
+ for (k=0; k<l1; k++) {
+ for (i=0; i<ido-1; i+=2) {
+ ah = i + k*ido;
+ ac = i + 2*k*ido;
+ ch[ah] = ref(cc,ac) + ref(cc,ac + ido);
+ tr2 = ref(cc,ac) - ref(cc,ac + ido);
+ ch[ah+1] = ref(cc,ac+1) + ref(cc,ac + 1 + ido);
+ ti2 = ref(cc,ac+1) - ref(cc,ac + 1 + ido);
+ ch[ah+l1*ido+1] = wa1[i]*ti2 + isign*wa1[i+1]*tr2;
+ ch[ah+l1*ido] = wa1[i]*tr2 - isign*wa1[i+1]*ti2;
+ }
+ }
+ }
+ } /* passf2 */
+
+
+static void passf3(int ido, int l1, const Treal cc[], Treal ch[],
+ const Treal wa1[], const Treal wa2[], int isign)
+ /* isign==+1 for backward transform */
+ {
+ static const Treal taur = -0.5;
+ static const Treal taui = 0.866025403784439;
+ int i, k, ac, ah;
+ Treal ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2;
+ if (ido == 2) {
+ for (k=1; k<=l1; k++) {
+ ac = (3*k - 2)*ido;
+ tr2 = ref(cc,ac) + ref(cc,ac + ido);
+ cr2 = ref(cc,ac - ido) + taur*tr2;
+ ah = (k - 1)*ido;
+ ch[ah] = ref(cc,ac - ido) + tr2;
+
+ ti2 = ref(cc,ac + 1) + ref(cc,ac + ido + 1);
+ ci2 = ref(cc,ac - ido + 1) + taur*ti2;
+ ch[ah + 1] = ref(cc,ac - ido + 1) + ti2;
+
+ cr3 = isign*taui*(ref(cc,ac) - ref(cc,ac + ido));
+ ci3 = isign*taui*(ref(cc,ac + 1) - ref(cc,ac + ido + 1));
+ ch[ah + l1*ido] = cr2 - ci3;
+ ch[ah + 2*l1*ido] = cr2 + ci3;
+ ch[ah + l1*ido + 1] = ci2 + cr3;
+ ch[ah + 2*l1*ido + 1] = ci2 - cr3;
+ }
+ } else {
+ for (k=1; k<=l1; k++) {
+ for (i=0; i<ido-1; i+=2) {
+ ac = i + (3*k - 2)*ido;
+ tr2 = ref(cc,ac) + ref(cc,ac + ido);
+ cr2 = ref(cc,ac - ido) + taur*tr2;
+ ah = i + (k-1)*ido;
+ ch[ah] = ref(cc,ac - ido) + tr2;
+ ti2 = ref(cc,ac + 1) + ref(cc,ac + ido + 1);
+ ci2 = ref(cc,ac - ido + 1) + taur*ti2;
+ ch[ah + 1] = ref(cc,ac - ido + 1) + ti2;
+ cr3 = isign*taui*(ref(cc,ac) - ref(cc,ac + ido));
+ ci3 = isign*taui*(ref(cc,ac + 1) - ref(cc,ac + ido + 1));
+ dr2 = cr2 - ci3;
+ dr3 = cr2 + ci3;
+ di2 = ci2 + cr3;
+ di3 = ci2 - cr3;
+ ch[ah + l1*ido + 1] = wa1[i]*di2 + isign*wa1[i+1]*dr2;
+ ch[ah + l1*ido] = wa1[i]*dr2 - isign*wa1[i+1]*di2;
+ ch[ah + 2*l1*ido + 1] = wa2[i]*di3 + isign*wa2[i+1]*dr3;
+ ch[ah + 2*l1*ido] = wa2[i]*dr3 - isign*wa2[i+1]*di3;
+ }
+ }
+ }
+ } /* passf3 */
+
+
+static void passf4(int ido, int l1, const Treal cc[], Treal ch[],
+ const Treal wa1[], const Treal wa2[], const Treal wa3[], int isign)
+ /* isign == -1 for forward transform and +1 for backward transform */
+ {
+ int i, k, ac, ah;
+ Treal ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+ if (ido == 2) {
+ for (k=0; k<l1; k++) {
+ ac = 4*k*ido + 1;
+ ti1 = ref(cc,ac) - ref(cc,ac + 2*ido);
+ ti2 = ref(cc,ac) + ref(cc,ac + 2*ido);
+ tr4 = ref(cc,ac + 3*ido) - ref(cc,ac + ido);
+ ti3 = ref(cc,ac + ido) + ref(cc,ac + 3*ido);
+ tr1 = ref(cc,ac - 1) - ref(cc,ac + 2*ido - 1);
+ tr2 = ref(cc,ac - 1) + ref(cc,ac + 2*ido - 1);
+ ti4 = ref(cc,ac + ido - 1) - ref(cc,ac + 3*ido - 1);
+ tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 3*ido - 1);
+ ah = k*ido;
+ ch[ah] = tr2 + tr3;
+ ch[ah + 2*l1*ido] = tr2 - tr3;
+ ch[ah + 1] = ti2 + ti3;
+ ch[ah + 2*l1*ido + 1] = ti2 - ti3;
+ ch[ah + l1*ido] = tr1 + isign*tr4;
+ ch[ah + 3*l1*ido] = tr1 - isign*tr4;
+ ch[ah + l1*ido + 1] = ti1 + isign*ti4;
+ ch[ah + 3*l1*ido + 1] = ti1 - isign*ti4;
+ }
+ } else {
+ for (k=0; k<l1; k++) {
+ for (i=0; i<ido-1; i+=2) {
+ ac = i + 1 + 4*k*ido;
+ ti1 = ref(cc,ac) - ref(cc,ac + 2*ido);
+ ti2 = ref(cc,ac) + ref(cc,ac + 2*ido);
+ ti3 = ref(cc,ac + ido) + ref(cc,ac + 3*ido);
+ tr4 = ref(cc,ac + 3*ido) - ref(cc,ac + ido);
+ tr1 = ref(cc,ac - 1) - ref(cc,ac + 2*ido - 1);
+ tr2 = ref(cc,ac - 1) + ref(cc,ac + 2*ido - 1);
+ ti4 = ref(cc,ac + ido - 1) - ref(cc,ac + 3*ido - 1);
+ tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 3*ido - 1);
+ ah = i + k*ido;
+ ch[ah] = tr2 + tr3;
+ cr3 = tr2 - tr3;
+ ch[ah + 1] = ti2 + ti3;
+ ci3 = ti2 - ti3;
+ cr2 = tr1 + isign*tr4;
+ cr4 = tr1 - isign*tr4;
+ ci2 = ti1 + isign*ti4;
+ ci4 = ti1 - isign*ti4;
+ ch[ah + l1*ido] = wa1[i]*cr2 - isign*wa1[i + 1]*ci2;
+ ch[ah + l1*ido + 1] = wa1[i]*ci2 + isign*wa1[i + 1]*cr2;
+ ch[ah + 2*l1*ido] = wa2[i]*cr3 - isign*wa2[i + 1]*ci3;
+ ch[ah + 2*l1*ido + 1] = wa2[i]*ci3 + isign*wa2[i + 1]*cr3;
+ ch[ah + 3*l1*ido] = wa3[i]*cr4 -isign*wa3[i + 1]*ci4;
+ ch[ah + 3*l1*ido + 1] = wa3[i]*ci4 + isign*wa3[i + 1]*cr4;
+ }
+ }
+ }
+ } /* passf4 */
+
+
+static void passf5(int ido, int l1, const Treal cc[], Treal ch[],
+ const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[], int isign)
+ /* isign == -1 for forward transform and +1 for backward transform */
+ {
+ static const Treal tr11 = 0.309016994374947;
+ static const Treal ti11 = 0.951056516295154;
+ static const Treal tr12 = -0.809016994374947;
+ static const Treal ti12 = 0.587785252292473;
+ int i, k, ac, ah;
+ Treal ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3,
+ ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5;
+ if (ido == 2) {
+ for (k = 1; k <= l1; ++k) {
+ ac = (5*k - 4)*ido + 1;
+ ti5 = ref(cc,ac) - ref(cc,ac + 3*ido);
+ ti2 = ref(cc,ac) + ref(cc,ac + 3*ido);
+ ti4 = ref(cc,ac + ido) - ref(cc,ac + 2*ido);
+ ti3 = ref(cc,ac + ido) + ref(cc,ac + 2*ido);
+ tr5 = ref(cc,ac - 1) - ref(cc,ac + 3*ido - 1);
+ tr2 = ref(cc,ac - 1) + ref(cc,ac + 3*ido - 1);
+ tr4 = ref(cc,ac + ido - 1) - ref(cc,ac + 2*ido - 1);
+ tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 2*ido - 1);
+ ah = (k - 1)*ido;
+ ch[ah] = ref(cc,ac - ido - 1) + tr2 + tr3;
+ ch[ah + 1] = ref(cc,ac - ido) + ti2 + ti3;
+ cr2 = ref(cc,ac - ido - 1) + tr11*tr2 + tr12*tr3;
+ ci2 = ref(cc,ac - ido) + tr11*ti2 + tr12*ti3;
+ cr3 = ref(cc,ac - ido - 1) + tr12*tr2 + tr11*tr3;
+ ci3 = ref(cc,ac - ido) + tr12*ti2 + tr11*ti3;
+ cr5 = isign*(ti11*tr5 + ti12*tr4);
+ ci5 = isign*(ti11*ti5 + ti12*ti4);
+ cr4 = isign*(ti12*tr5 - ti11*tr4);
+ ci4 = isign*(ti12*ti5 - ti11*ti4);
+ ch[ah + l1*ido] = cr2 - ci5;
+ ch[ah + 4*l1*ido] = cr2 + ci5;
+ ch[ah + l1*ido + 1] = ci2 + cr5;
+ ch[ah + 2*l1*ido + 1] = ci3 + cr4;
+ ch[ah + 2*l1*ido] = cr3 - ci4;
+ ch[ah + 3*l1*ido] = cr3 + ci4;
+ ch[ah + 3*l1*ido + 1] = ci3 - cr4;
+ ch[ah + 4*l1*ido + 1] = ci2 - cr5;
+ }
+ } else {
+ for (k=1; k<=l1; k++) {
+ for (i=0; i<ido-1; i+=2) {
+ ac = i + 1 + (k*5 - 4)*ido;
+ ti5 = ref(cc,ac) - ref(cc,ac + 3*ido);
+ ti2 = ref(cc,ac) + ref(cc,ac + 3*ido);
+ ti4 = ref(cc,ac + ido) - ref(cc,ac + 2*ido);
+ ti3 = ref(cc,ac + ido) + ref(cc,ac + 2*ido);
+ tr5 = ref(cc,ac - 1) - ref(cc,ac + 3*ido - 1);
+ tr2 = ref(cc,ac - 1) + ref(cc,ac + 3*ido - 1);
+ tr4 = ref(cc,ac + ido - 1) - ref(cc,ac + 2*ido - 1);
+ tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 2*ido - 1);
+ ah = i + (k - 1)*ido;
+ ch[ah] = ref(cc,ac - ido - 1) + tr2 + tr3;
+ ch[ah + 1] = ref(cc,ac - ido) + ti2 + ti3;
+ cr2 = ref(cc,ac - ido - 1) + tr11*tr2 + tr12*tr3;
+
+ ci2 = ref(cc,ac - ido) + tr11*ti2 + tr12*ti3;
+ cr3 = ref(cc,ac - ido - 1) + tr12*tr2 + tr11*tr3;
+
+ ci3 = ref(cc,ac - ido) + tr12*ti2 + tr11*ti3;
+ cr5 = isign*(ti11*tr5 + ti12*tr4);
+ ci5 = isign*(ti11*ti5 + ti12*ti4);
+ cr4 = isign*(ti12*tr5 - ti11*tr4);
+ ci4 = isign*(ti12*ti5 - ti11*ti4);
+ dr3 = cr3 - ci4;
+ dr4 = cr3 + ci4;
+ di3 = ci3 + cr4;
+ di4 = ci3 - cr4;
+ dr5 = cr2 + ci5;
+ dr2 = cr2 - ci5;
+ di5 = ci2 - cr5;
+ di2 = ci2 + cr5;
+ ch[ah + l1*ido] = wa1[i]*dr2 - isign*wa1[i+1]*di2;
+ ch[ah + l1*ido + 1] = wa1[i]*di2 + isign*wa1[i+1]*dr2;
+ ch[ah + 2*l1*ido] = wa2[i]*dr3 - isign*wa2[i+1]*di3;
+ ch[ah + 2*l1*ido + 1] = wa2[i]*di3 + isign*wa2[i+1]*dr3;
+ ch[ah + 3*l1*ido] = wa3[i]*dr4 - isign*wa3[i+1]*di4;
+ ch[ah + 3*l1*ido + 1] = wa3[i]*di4 + isign*wa3[i+1]*dr4;
+ ch[ah + 4*l1*ido] = wa4[i]*dr5 - isign*wa4[i+1]*di5;
+ ch[ah + 4*l1*ido + 1] = wa4[i]*di5 + isign*wa4[i+1]*dr5;
+ }
+ }
+ }
+ } /* passf5 */
+
+
+static void passf(int *nac, int ido, int ip, int l1, int idl1,
+ Treal cc[], Treal ch[],
+ const Treal wa[], int isign)
+ /* isign is -1 for forward transform and +1 for backward transform */
+ {
+ int idij, idlj, idot, ipph, i, j, k, l, jc, lc, ik, nt, idj, idl, inc,idp;
+ Treal wai, war;
+
+ idot = ido / 2;
+ nt = ip*idl1;
+ ipph = (ip + 1) / 2;
+ idp = ip*ido;
+ if (ido >= l1) {
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (k=0; k<l1; k++) {
+ for (i=0; i<ido; i++) {
+ ch[i + (k + j*l1)*ido] =
+ ref(cc,i + (j + k*ip)*ido) + ref(cc,i + (jc + k*ip)*ido);
+ ch[i + (k + jc*l1)*ido] =
+ ref(cc,i + (j + k*ip)*ido) - ref(cc,i + (jc + k*ip)*ido);
+ }
+ }
+ }
+ for (k=0; k<l1; k++)
+ for (i=0; i<ido; i++)
+ ch[i + k*ido] = ref(cc,i + k*ip*ido);
+ } else {
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (i=0; i<ido; i++) {
+ for (k=0; k<l1; k++) {
+ ch[i + (k + j*l1)*ido] = ref(cc,i + (j + k*ip)*ido) + ref(cc,i + (jc + k*
+ ip)*ido);
+ ch[i + (k + jc*l1)*ido] = ref(cc,i + (j + k*ip)*ido) - ref(cc,i + (jc + k*
+ ip)*ido);
+ }
+ }
+ }
+ for (i=0; i<ido; i++)
+ for (k=0; k<l1; k++)
+ ch[i + k*ido] = ref(cc,i + k*ip*ido);
+ }
+
+ idl = 2 - ido;
+ inc = 0;
+ for (l=1; l<ipph; l++) {
+ lc = ip - l;
+ idl += ido;
+ for (ik=0; ik<idl1; ik++) {
+ cc[ik + l*idl1] = ch[ik] + wa[idl - 2]*ch[ik + idl1];
+ cc[ik + lc*idl1] = isign*wa[idl-1]*ch[ik + (ip-1)*idl1];
+ }
+ idlj = idl;
+ inc += ido;
+ for (j=2; j<ipph; j++) {
+ jc = ip - j;
+ idlj += inc;
+ if (idlj > idp) idlj -= idp;
+ war = wa[idlj - 2];
+ wai = wa[idlj-1];
+ for (ik=0; ik<idl1; ik++) {
+ cc[ik + l*idl1] += war*ch[ik + j*idl1];
+ cc[ik + lc*idl1] += isign*wai*ch[ik + jc*idl1];
+ }
+ }
+ }
+ for (j=1; j<ipph; j++)
+ for (ik=0; ik<idl1; ik++)
+ ch[ik] += ch[ik + j*idl1];
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (ik=1; ik<idl1; ik+=2) {
+ ch[ik - 1 + j*idl1] = cc[ik - 1 + j*idl1] - cc[ik + jc*idl1];
+ ch[ik - 1 + jc*idl1] = cc[ik - 1 + j*idl1] + cc[ik + jc*idl1];
+ ch[ik + j*idl1] = cc[ik + j*idl1] + cc[ik - 1 + jc*idl1];
+ ch[ik + jc*idl1] = cc[ik + j*idl1] - cc[ik - 1 + jc*idl1];
+ }
+ }
+ *nac = 1;
+ if (ido == 2) return;
+ *nac = 0;
+ for (ik=0; ik<idl1; ik++)
+ cc[ik] = ch[ik];
+ for (j=1; j<ip; j++) {
+ for (k=0; k<l1; k++) {
+ cc[(k + j*l1)*ido + 0] = ch[(k + j*l1)*ido + 0];
+ cc[(k + j*l1)*ido + 1] = ch[(k + j*l1)*ido + 1];
+ }
+ }
+ if (idot <= l1) {
+ idij = 0;
+ for (j=1; j<ip; j++) {
+ idij += 2;
+ for (i=3; i<ido; i+=2) {
+ idij += 2;
+ for (k=0; k<l1; k++) {
+ cc[i - 1 + (k + j*l1)*ido] =
+ wa[idij - 2]*ch[i - 1 + (k + j*l1)*ido] -
+ isign*wa[idij-1]*ch[i + (k + j*l1)*ido];
+ cc[i + (k + j*l1)*ido] =
+ wa[idij - 2]*ch[i + (k + j*l1)*ido] +
+ isign*wa[idij-1]*ch[i - 1 + (k + j*l1)*ido];
+ }
+ }
+ }
+ } else {
+ idj = 2 - ido;
+ for (j=1; j<ip; j++) {
+ idj += ido;
+ for (k = 0; k < l1; k++) {
+ idij = idj;
+ for (i=3; i<ido; i+=2) {
+ idij += 2;
+ cc[i - 1 + (k + j*l1)*ido] =
+ wa[idij - 2]*ch[i - 1 + (k + j*l1)*ido] -
+ isign*wa[idij-1]*ch[i + (k + j*l1)*ido];
+ cc[i + (k + j*l1)*ido] =
+ wa[idij - 2]*ch[i + (k + j*l1)*ido] +
+ isign*wa[idij-1]*ch[i - 1 + (k + j*l1)*ido];
+ }
+ }
+ }
+ }
+ } /* passf */
+
+
+ /* ----------------------------------------------------------------------
+radf2,radb2, radf3,radb3, radf4,radb4, radf5,radb5, radfg,radbg.
+Treal FFT passes fwd and bwd.
+---------------------------------------------------------------------- */
+
+static void radf2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[])
+ {
+ int i, k, ic;
+ Treal ti2, tr2;
+ for (k=0; k<l1; k++) {
+ ch[2*k*ido] =
+ ref(cc,k*ido) + ref(cc,(k + l1)*ido);
+ ch[(2*k+1)*ido + ido-1] =
+ ref(cc,k*ido) - ref(cc,(k + l1)*ido);
+ }
+ if (ido < 2) return;
+ if (ido != 2) {
+ for (k=0; k<l1; k++) {
+ for (i=2; i<ido; i+=2) {
+ ic = ido - i;
+ tr2 = wa1[i - 2]*ref(cc, i-1 + (k + l1)*ido) + wa1[i - 1]*ref(cc, i + (k + l1)*ido);
+ ti2 = wa1[i - 2]*ref(cc, i + (k + l1)*ido) - wa1[i - 1]*ref(cc, i-1 + (k + l1)*ido);
+ ch[i + 2*k*ido] = ref(cc,i + k*ido) + ti2;
+ ch[ic + (2*k+1)*ido] = ti2 - ref(cc,i + k*ido);
+ ch[i - 1 + 2*k*ido] = ref(cc,i - 1 + k*ido) + tr2;
+ ch[ic - 1 + (2*k+1)*ido] = ref(cc,i - 1 + k*ido) - tr2;
+ }
+ }
+ if (ido % 2 == 1) return;
+ }
+ for (k=0; k<l1; k++) {
+ ch[(2*k+1)*ido] = -ref(cc,ido-1 + (k + l1)*ido);
+ ch[ido-1 + 2*k*ido] = ref(cc,ido-1 + k*ido);
+ }
+ } /* radf2 */
+
+
+static void radb2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[])
+ {
+ int i, k, ic;
+ Treal ti2, tr2;
+ for (k=0; k<l1; k++) {
+ ch[k*ido] =
+ ref(cc,2*k*ido) + ref(cc,ido-1 + (2*k+1)*ido);
+ ch[(k + l1)*ido] =
+ ref(cc,2*k*ido) - ref(cc,ido-1 + (2*k+1)*ido);
+ }
+ if (ido < 2) return;
+ if (ido != 2) {
+ for (k = 0; k < l1; ++k) {
+ for (i = 2; i < ido; i += 2) {
+ ic = ido - i;
+ ch[i-1 + k*ido] =
+ ref(cc,i-1 + 2*k*ido) + ref(cc,ic-1 + (2*k+1)*ido);
+ tr2 = ref(cc,i-1 + 2*k*ido) - ref(cc,ic-1 + (2*k+1)*ido);
+ ch[i + k*ido] =
+ ref(cc,i + 2*k*ido) - ref(cc,ic + (2*k+1)*ido);
+ ti2 = ref(cc,i + (2*k)*ido) + ref(cc,ic + (2*k+1)*ido);
+ ch[i-1 + (k + l1)*ido] =
+ wa1[i - 2]*tr2 - wa1[i - 1]*ti2;
+ ch[i + (k + l1)*ido] =
+ wa1[i - 2]*ti2 + wa1[i - 1]*tr2;
+ }
+ }
+ if (ido % 2 == 1) return;
+ }
+ for (k = 0; k < l1; k++) {
+ ch[ido-1 + k*ido] = 2*ref(cc,ido-1 + 2*k*ido);
+ ch[ido-1 + (k + l1)*ido] = -2*ref(cc,(2*k+1)*ido);
+ }
+ } /* radb2 */
+
+
+static void radf3(int ido, int l1, const Treal cc[], Treal ch[],
+ const Treal wa1[], const Treal wa2[])
+ {
+ static const Treal taur = -0.5;
+ static const Treal taui = 0.866025403784439;
+ int i, k, ic;
+ Treal ci2, di2, di3, cr2, dr2, dr3, ti2, ti3, tr2, tr3;
+ for (k=0; k<l1; k++) {
+ cr2 = ref(cc,(k + l1)*ido) + ref(cc,(k + 2*l1)*ido);
+ ch[3*k*ido] = ref(cc,k*ido) + cr2;
+ ch[(3*k+2)*ido] = taui*(ref(cc,(k + l1*2)*ido) - ref(cc,(k + l1)*ido));
+ ch[ido-1 + (3*k + 1)*ido] = ref(cc,k*ido) + taur*cr2;
+ }
+ if (ido == 1) return;
+ for (k=0; k<l1; k++) {
+ for (i=2; i<ido; i+=2) {
+ ic = ido - i;
+ dr2 = wa1[i - 2]*ref(cc,i - 1 + (k + l1)*ido) +
+ wa1[i - 1]*ref(cc,i + (k + l1)*ido);
+ di2 = wa1[i - 2]*ref(cc,i + (k + l1)*ido) - wa1[i - 1]*ref(cc,i - 1 + (k + l1)*ido);
+ dr3 = wa2[i - 2]*ref(cc,i - 1 + (k + l1*2)*ido) + wa2[i - 1]*ref(cc,i + (k + l1*2)*ido);
+ di3 = wa2[i - 2]*ref(cc,i + (k + l1*2)*ido) - wa2[i - 1]*ref(cc,i - 1 + (k + l1*2)*ido);
+ cr2 = dr2 + dr3;
+ ci2 = di2 + di3;
+ ch[i - 1 + 3*k*ido] = ref(cc,i - 1 + k*ido) + cr2;
+ ch[i + 3*k*ido] = ref(cc,i + k*ido) + ci2;
+ tr2 = ref(cc,i - 1 + k*ido) + taur*cr2;
+ ti2 = ref(cc,i + k*ido) + taur*ci2;
+ tr3 = taui*(di2 - di3);
+ ti3 = taui*(dr3 - dr2);
+ ch[i - 1 + (3*k + 2)*ido] = tr2 + tr3;
+ ch[ic - 1 + (3*k + 1)*ido] = tr2 - tr3;
+ ch[i + (3*k + 2)*ido] = ti2 + ti3;
+ ch[ic + (3*k + 1)*ido] = ti3 - ti2;
+ }
+ }
+ } /* radf3 */
+
+
+static void radb3(int ido, int l1, const Treal cc[], Treal ch[],
+ const Treal wa1[], const Treal wa2[])
+ {
+ static const Treal taur = -0.5;
+ static const Treal taui = 0.866025403784439;
+ int i, k, ic;
+ Treal ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2;
+ for (k=0; k<l1; k++) {
+ tr2 = 2*ref(cc,ido-1 + (3*k + 1)*ido);
+ cr2 = ref(cc,3*k*ido) + taur*tr2;
+ ch[k*ido] = ref(cc,3*k*ido) + tr2;
+ ci3 = 2*taui*ref(cc,(3*k + 2)*ido);
+ ch[(k + l1)*ido] = cr2 - ci3;
+ ch[(k + 2*l1)*ido] = cr2 + ci3;
+ }
+ if (ido == 1) return;
+ for (k=0; k<l1; k++) {
+ for (i=2; i<ido; i+=2) {
+ ic = ido - i;
+ tr2 = ref(cc,i - 1 + (3*k + 2)*ido) + ref(cc,ic - 1 + (3*k + 1)*ido);
+ cr2 = ref(cc,i - 1 + 3*k*ido) + taur*tr2;
+ ch[i - 1 + k*ido] = ref(cc,i - 1 + 3*k*ido) + tr2;
+ ti2 = ref(cc,i + (3*k + 2)*ido) - ref(cc,ic + (3*k + 1)*ido);
+ ci2 = ref(cc,i + 3*k*ido) + taur*ti2;
+ ch[i + k*ido] = ref(cc,i + 3*k*ido) + ti2;
+ cr3 = taui*(ref(cc,i - 1 + (3*k + 2)*ido) - ref(cc,ic - 1 + (3*k + 1)*ido));
+ ci3 = taui*(ref(cc,i + (3*k + 2)*ido) + ref(cc,ic + (3*k + 1)*ido));
+ dr2 = cr2 - ci3;
+ dr3 = cr2 + ci3;
+ di2 = ci2 + cr3;
+ di3 = ci2 - cr3;
+ ch[i - 1 + (k + l1)*ido] = wa1[i - 2]*dr2 - wa1[i - 1]*di2;
+ ch[i + (k + l1)*ido] = wa1[i - 2]*di2 + wa1[i - 1]*dr2;
+ ch[i - 1 + (k + 2*l1)*ido] = wa2[i - 2]*dr3 - wa2[i - 1]*di3;
+ ch[i + (k + 2*l1)*ido] = wa2[i - 2]*di3 + wa2[i - 1]*dr3;
+ }
+ }
+ } /* radb3 */
+
+
+static void radf4(int ido, int l1, const Treal cc[], Treal ch[],
+ const Treal wa1[], const Treal wa2[], const Treal wa3[])
+ {
+ static const Treal hsqt2 = 0.7071067811865475;
+ int i, k, ic;
+ Treal ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+ for (k=0; k<l1; k++) {
+ tr1 = ref(cc,(k + l1)*ido) + ref(cc,(k + 3*l1)*ido);
+ tr2 = ref(cc,k*ido) + ref(cc,(k + 2*l1)*ido);
+ ch[4*k*ido] = tr1 + tr2;
+ ch[ido-1 + (4*k + 3)*ido] = tr2 - tr1;
+ ch[ido-1 + (4*k + 1)*ido] = ref(cc,k*ido) - ref(cc,(k + 2*l1)*ido);
+ ch[(4*k + 2)*ido] = ref(cc,(k + 3*l1)*ido) - ref(cc,(k + l1)*ido);
+ }
+ if (ido < 2) return;
+ if (ido != 2) {
+ for (k=0; k<l1; k++) {
+ for (i=2; i<ido; i += 2) {
+ ic = ido - i;
+ cr2 = wa1[i - 2]*ref(cc,i - 1 + (k + l1)*ido) + wa1[i - 1]*ref(cc,i + (k + l1)*ido);
+ ci2 = wa1[i - 2]*ref(cc,i + (k + l1)*ido) - wa1[i - 1]*ref(cc,i - 1 + (k + l1)*ido);
+ cr3 = wa2[i - 2]*ref(cc,i - 1 + (k + 2*l1)*ido) + wa2[i - 1]*ref(cc,i + (k + 2*l1)*
+ ido);
+ ci3 = wa2[i - 2]*ref(cc,i + (k + 2*l1)*ido) - wa2[i - 1]*ref(cc,i - 1 + (k + 2*l1)*
+ ido);
+ cr4 = wa3[i - 2]*ref(cc,i - 1 + (k + 3*l1)*ido) + wa3[i - 1]*ref(cc,i + (k + 3*l1)*
+ ido);
+ ci4 = wa3[i - 2]*ref(cc,i + (k + 3*l1)*ido) - wa3[i - 1]*ref(cc,i - 1 + (k + 3*l1)*
+ ido);
+ tr1 = cr2 + cr4;
+ tr4 = cr4 - cr2;
+ ti1 = ci2 + ci4;
+ ti4 = ci2 - ci4;
+ ti2 = ref(cc,i + k*ido) + ci3;
+ ti3 = ref(cc,i + k*ido) - ci3;
+ tr2 = ref(cc,i - 1 + k*ido) + cr3;
+ tr3 = ref(cc,i - 1 + k*ido) - cr3;
+ ch[i - 1 + 4*k*ido] = tr1 + tr2;
+ ch[ic - 1 + (4*k + 3)*ido] = tr2 - tr1;
+ ch[i + 4*k*ido] = ti1 + ti2;
+ ch[ic + (4*k + 3)*ido] = ti1 - ti2;
+ ch[i - 1 + (4*k + 2)*ido] = ti4 + tr3;
+ ch[ic - 1 + (4*k + 1)*ido] = tr3 - ti4;
+ ch[i + (4*k + 2)*ido] = tr4 + ti3;
+ ch[ic + (4*k + 1)*ido] = tr4 - ti3;
+ }
+ }
+ if (ido % 2 == 1) return;
+ }
+ for (k=0; k<l1; k++) {
+ ti1 = -hsqt2*(ref(cc,ido-1 + (k + l1)*ido) + ref(cc,ido-1 + (k + 3*l1)*ido));
+ tr1 = hsqt2*(ref(cc,ido-1 + (k + l1)*ido) - ref(cc,ido-1 + (k + 3*l1)*ido));
+ ch[ido-1 + 4*k*ido] = tr1 + ref(cc,ido-1 + k*ido);
+ ch[ido-1 + (4*k + 2)*ido] = ref(cc,ido-1 + k*ido) - tr1;
+ ch[(4*k + 1)*ido] = ti1 - ref(cc,ido-1 + (k + 2*l1)*ido);
+ ch[(4*k + 3)*ido] = ti1 + ref(cc,ido-1 + (k + 2*l1)*ido);
+ }
+ } /* radf4 */
+
+
+static void radb4(int ido, int l1, const Treal cc[], Treal ch[],
+ const Treal wa1[], const Treal wa2[], const Treal wa3[])
+ {
+ static const Treal sqrt2 = 1.414213562373095;
+ int i, k, ic;
+ Treal ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+ for (k = 0; k < l1; k++) {
+ tr1 = ref(cc,4*k*ido) - ref(cc,ido-1 + (4*k + 3)*ido);
+ tr2 = ref(cc,4*k*ido) + ref(cc,ido-1 + (4*k + 3)*ido);
+ tr3 = ref(cc,ido-1 + (4*k + 1)*ido) + ref(cc,ido-1 + (4*k + 1)*ido);
+ tr4 = ref(cc,(4*k + 2)*ido) + ref(cc,(4*k + 2)*ido);
+ ch[k*ido] = tr2 + tr3;
+ ch[(k + l1)*ido] = tr1 - tr4;
+ ch[(k + 2*l1)*ido] = tr2 - tr3;
+ ch[(k + 3*l1)*ido] = tr1 + tr4;
+ }
+ if (ido < 2) return;
+ if (ido != 2) {
+ for (k = 0; k < l1; ++k) {
+ for (i = 2; i < ido; i += 2) {
+ ic = ido - i;
+ ti1 = ref(cc,i + 4*k*ido) + ref(cc,ic + (4*k + 3)*ido);
+ ti2 = ref(cc,i + 4*k*ido) - ref(cc,ic + (4*k + 3)*ido);
+ ti3 = ref(cc,i + (4*k + 2)*ido) - ref(cc,ic + (4*k + 1)*ido);
+ tr4 = ref(cc,i + (4*k + 2)*ido) + ref(cc,ic + (4*k + 1)*ido);
+ tr1 = ref(cc,i - 1 + 4*k*ido) - ref(cc,ic - 1 + (4*k + 3)*ido);
+ tr2 = ref(cc,i - 1 + 4*k*ido) + ref(cc,ic - 1 + (4*k + 3)*ido);
+ ti4 = ref(cc,i - 1 + (4*k + 2)*ido) - ref(cc,ic - 1 + (4*k + 1)*ido);
+ tr3 = ref(cc,i - 1 + (4*k + 2)*ido) + ref(cc,ic - 1 + (4*k + 1)*ido);
+ ch[i - 1 + k*ido] = tr2 + tr3;
+ cr3 = tr2 - tr3;
+ ch[i + k*ido] = ti2 + ti3;
+ ci3 = ti2 - ti3;
+ cr2 = tr1 - tr4;
+ cr4 = tr1 + tr4;
+ ci2 = ti1 + ti4;
+ ci4 = ti1 - ti4;
+ ch[i - 1 + (k + l1)*ido] = wa1[i - 2]*cr2 - wa1[i - 1]*ci2;
+ ch[i + (k + l1)*ido] = wa1[i - 2]*ci2 + wa1[i - 1]*cr2;
+ ch[i - 1 + (k + 2*l1)*ido] = wa2[i - 2]*cr3 - wa2[i - 1]*ci3;
+ ch[i + (k + 2*l1)*ido] = wa2[i - 2]*ci3 + wa2[i - 1]*cr3;
+ ch[i - 1 + (k + 3*l1)*ido] = wa3[i - 2]*cr4 - wa3[i - 1]*ci4;
+ ch[i + (k + 3*l1)*ido] = wa3[i - 2]*ci4 + wa3[i - 1]*cr4;
+ }
+ }
+ if (ido % 2 == 1) return;
+ }
+ for (k = 0; k < l1; k++) {
+ ti1 = ref(cc,(4*k + 1)*ido) + ref(cc,(4*k + 3)*ido);
+ ti2 = ref(cc,(4*k + 3)*ido) - ref(cc,(4*k + 1)*ido);
+ tr1 = ref(cc,ido-1 + 4*k*ido) - ref(cc,ido-1 + (4*k + 2)*ido);
+ tr2 = ref(cc,ido-1 + 4*k*ido) + ref(cc,ido-1 + (4*k + 2)*ido);
+ ch[ido-1 + k*ido] = tr2 + tr2;
+ ch[ido-1 + (k + l1)*ido] = sqrt2*(tr1 - ti1);
+ ch[ido-1 + (k + 2*l1)*ido] = ti2 + ti2;
+ ch[ido-1 + (k + 3*l1)*ido] = -sqrt2*(tr1 + ti1);
+ }
+ } /* radb4 */
+
+
+static void radf5(int ido, int l1, const Treal cc[], Treal ch[],
+ const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[])
+ {
+ static const Treal tr11 = 0.309016994374947;
+ static const Treal ti11 = 0.951056516295154;
+ static const Treal tr12 = -0.809016994374947;
+ static const Treal ti12 = 0.587785252292473;
+ int i, k, ic;
+ Treal ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3, dr4, dr5,
+ cr5, cr4, ti2, ti3, ti5, ti4, tr2, tr3, tr4, tr5;
+ for (k = 0; k < l1; k++) {
+ cr2 = ref(cc,(k + 4*l1)*ido) + ref(cc,(k + l1)*ido);
+ ci5 = ref(cc,(k + 4*l1)*ido) - ref(cc,(k + l1)*ido);
+ cr3 = ref(cc,(k + 3*l1)*ido) + ref(cc,(k + 2*l1)*ido);
+ ci4 = ref(cc,(k + 3*l1)*ido) - ref(cc,(k + 2*l1)*ido);
+ ch[5*k*ido] = ref(cc,k*ido) + cr2 + cr3;
+ ch[ido-1 + (5*k + 1)*ido] = ref(cc,k*ido) + tr11*cr2 + tr12*cr3;
+ ch[(5*k + 2)*ido] = ti11*ci5 + ti12*ci4;
+ ch[ido-1 + (5*k + 3)*ido] = ref(cc,k*ido) + tr12*cr2 + tr11*cr3;
+ ch[(5*k + 4)*ido] = ti12*ci5 - ti11*ci4;
+ }
+ if (ido == 1) return;
+ for (k = 0; k < l1; ++k) {
+ for (i = 2; i < ido; i += 2) {
+ ic = ido - i;
+ dr2 = wa1[i - 2]*ref(cc,i - 1 + (k + l1)*ido) + wa1[i - 1]*ref(cc,i + (k + l1)*ido);
+ di2 = wa1[i - 2]*ref(cc,i + (k + l1)*ido) - wa1[i - 1]*ref(cc,i - 1 + (k + l1)*ido);
+ dr3 = wa2[i - 2]*ref(cc,i - 1 + (k + 2*l1)*ido) + wa2[i - 1]*ref(cc,i + (k + 2*l1)*ido);
+ di3 = wa2[i - 2]*ref(cc,i + (k + 2*l1)*ido) - wa2[i - 1]*ref(cc,i - 1 + (k + 2*l1)*ido);
+ dr4 = wa3[i - 2]*ref(cc,i - 1 + (k + 3*l1)*ido) + wa3[i - 1]*ref(cc,i + (k + 3*l1)*ido);
+ di4 = wa3[i - 2]*ref(cc,i + (k + 3*l1)*ido) - wa3[i - 1]*ref(cc,i - 1 + (k + 3*l1)*ido);
+ dr5 = wa4[i - 2]*ref(cc,i - 1 + (k + 4*l1)*ido) + wa4[i - 1]*ref(cc,i + (k + 4*l1)*ido);
+ di5 = wa4[i - 2]*ref(cc,i + (k + 4*l1)*ido) - wa4[i - 1]*ref(cc,i - 1 + (k + 4*l1)*ido);
+ cr2 = dr2 + dr5;
+ ci5 = dr5 - dr2;
+ cr5 = di2 - di5;
+ ci2 = di2 + di5;
+ cr3 = dr3 + dr4;
+ ci4 = dr4 - dr3;
+ cr4 = di3 - di4;
+ ci3 = di3 + di4;
+ ch[i - 1 + 5*k*ido] = ref(cc,i - 1 + k*ido) + cr2 + cr3;
+ ch[i + 5*k*ido] = ref(cc,i + k*ido) + ci2 + ci3;
+ tr2 = ref(cc,i - 1 + k*ido) + tr11*cr2 + tr12*cr3;
+ ti2 = ref(cc,i + k*ido) + tr11*ci2 + tr12*ci3;
+ tr3 = ref(cc,i - 1 + k*ido) + tr12*cr2 + tr11*cr3;
+ ti3 = ref(cc,i + k*ido) + tr12*ci2 + tr11*ci3;
+ tr5 = ti11*cr5 + ti12*cr4;
+ ti5 = ti11*ci5 + ti12*ci4;
+ tr4 = ti12*cr5 - ti11*cr4;
+ ti4 = ti12*ci5 - ti11*ci4;
+ ch[i - 1 + (5*k + 2)*ido] = tr2 + tr5;
+ ch[ic - 1 + (5*k + 1)*ido] = tr2 - tr5;
+ ch[i + (5*k + 2)*ido] = ti2 + ti5;
+ ch[ic + (5*k + 1)*ido] = ti5 - ti2;
+ ch[i - 1 + (5*k + 4)*ido] = tr3 + tr4;
+ ch[ic - 1 + (5*k + 3)*ido] = tr3 - tr4;
+ ch[i + (5*k + 4)*ido] = ti3 + ti4;
+ ch[ic + (5*k + 3)*ido] = ti4 - ti3;
+ }
+ }
+ } /* radf5 */
+
+
+static void radb5(int ido, int l1, const Treal cc[], Treal ch[],
+ const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[])
+ {
+ static const Treal tr11 = 0.309016994374947;
+ static const Treal ti11 = 0.951056516295154;
+ static const Treal tr12 = -0.809016994374947;
+ static const Treal ti12 = 0.587785252292473;
+ int i, k, ic;
+ Treal ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3,
+ ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5;
+ for (k = 0; k < l1; k++) {
+ ti5 = 2*ref(cc,(5*k + 2)*ido);
+ ti4 = 2*ref(cc,(5*k + 4)*ido);
+ tr2 = 2*ref(cc,ido-1 + (5*k + 1)*ido);
+ tr3 = 2*ref(cc,ido-1 + (5*k + 3)*ido);
+ ch[k*ido] = ref(cc,5*k*ido) + tr2 + tr3;
+ cr2 = ref(cc,5*k*ido) + tr11*tr2 + tr12*tr3;
+ cr3 = ref(cc,5*k*ido) + tr12*tr2 + tr11*tr3;
+ ci5 = ti11*ti5 + ti12*ti4;
+ ci4 = ti12*ti5 - ti11*ti4;
+ ch[(k + l1)*ido] = cr2 - ci5;
+ ch[(k + 2*l1)*ido] = cr3 - ci4;
+ ch[(k + 3*l1)*ido] = cr3 + ci4;
+ ch[(k + 4*l1)*ido] = cr2 + ci5;
+ }
+ if (ido == 1) return;
+ for (k = 0; k < l1; ++k) {
+ for (i = 2; i < ido; i += 2) {
+ ic = ido - i;
+ ti5 = ref(cc,i + (5*k + 2)*ido) + ref(cc,ic + (5*k + 1)*ido);
+ ti2 = ref(cc,i + (5*k + 2)*ido) - ref(cc,ic + (5*k + 1)*ido);
+ ti4 = ref(cc,i + (5*k + 4)*ido) + ref(cc,ic + (5*k + 3)*ido);
+ ti3 = ref(cc,i + (5*k + 4)*ido) - ref(cc,ic + (5*k + 3)*ido);
+ tr5 = ref(cc,i - 1 + (5*k + 2)*ido) - ref(cc,ic - 1 + (5*k + 1)*ido);
+ tr2 = ref(cc,i - 1 + (5*k + 2)*ido) + ref(cc,ic - 1 + (5*k + 1)*ido);
+ tr4 = ref(cc,i - 1 + (5*k + 4)*ido) - ref(cc,ic - 1 + (5*k + 3)*ido);
+ tr3 = ref(cc,i - 1 + (5*k + 4)*ido) + ref(cc,ic - 1 + (5*k + 3)*ido);
+ ch[i - 1 + k*ido] = ref(cc,i - 1 + 5*k*ido) + tr2 + tr3;
+ ch[i + k*ido] = ref(cc,i + 5*k*ido) + ti2 + ti3;
+ cr2 = ref(cc,i - 1 + 5*k*ido) + tr11*tr2 + tr12*tr3;
+
+ ci2 = ref(cc,i + 5*k*ido) + tr11*ti2 + tr12*ti3;
+ cr3 = ref(cc,i - 1 + 5*k*ido) + tr12*tr2 + tr11*tr3;
+
+ ci3 = ref(cc,i + 5*k*ido) + tr12*ti2 + tr11*ti3;
+ cr5 = ti11*tr5 + ti12*tr4;
+ ci5 = ti11*ti5 + ti12*ti4;
+ cr4 = ti12*tr5 - ti11*tr4;
+ ci4 = ti12*ti5 - ti11*ti4;
+ dr3 = cr3 - ci4;
+ dr4 = cr3 + ci4;
+ di3 = ci3 + cr4;
+ di4 = ci3 - cr4;
+ dr5 = cr2 + ci5;
+ dr2 = cr2 - ci5;
+ di5 = ci2 - cr5;
+ di2 = ci2 + cr5;
+ ch[i - 1 + (k + l1)*ido] = wa1[i - 2]*dr2 - wa1[i - 1]*di2;
+ ch[i + (k + l1)*ido] = wa1[i - 2]*di2 + wa1[i - 1]*dr2;
+ ch[i - 1 + (k + 2*l1)*ido] = wa2[i - 2]*dr3 - wa2[i - 1]*di3;
+ ch[i + (k + 2*l1)*ido] = wa2[i - 2]*di3 + wa2[i - 1]*dr3;
+ ch[i - 1 + (k + 3*l1)*ido] = wa3[i - 2]*dr4 - wa3[i - 1]*di4;
+ ch[i + (k + 3*l1)*ido] = wa3[i - 2]*di4 + wa3[i - 1]*dr4;
+ ch[i - 1 + (k + 4*l1)*ido] = wa4[i - 2]*dr5 - wa4[i - 1]*di5;
+ ch[i + (k + 4*l1)*ido] = wa4[i - 2]*di5 + wa4[i - 1]*dr5;
+ }
+ }
+ } /* radb5 */
+
+
+static void radfg(int ido, int ip, int l1, int idl1,
+ Treal cc[], Treal ch[], const Treal wa[])
+ {
+ static const Treal twopi = 6.28318530717959;
+ int idij, ipph, i, j, k, l, j2, ic, jc, lc, ik, is, nbd;
+ Treal dc2, ai1, ai2, ar1, ar2, ds2, dcp, arg, dsp, ar1h, ar2h;
+ arg = twopi / ip;
+ dcp = cos(arg);
+ dsp = sin(arg);
+ ipph = (ip + 1) / 2;
+ nbd = (ido - 1) / 2;
+ if (ido != 1) {
+ for (ik=0; ik<idl1; ik++) ch[ik] = cc[ik];
+ for (j=1; j<ip; j++)
+ for (k=0; k<l1; k++)
+ ch[(k + j*l1)*ido] = cc[(k + j*l1)*ido];
+ if (nbd <= l1) {
+ is = -ido;
+ for (j=1; j<ip; j++) {
+ is += ido;
+ idij = is-1;
+ for (i=2; i<ido; i+=2) {
+ idij += 2;
+ for (k=0; k<l1; k++) {
+ ch[i - 1 + (k + j*l1)*ido] =
+ wa[idij - 1]*cc[i - 1 + (k + j*l1)*ido] + wa[idij]*cc[i + (k + j*l1)*ido];
+ ch[i + (k + j*l1)*ido] =
+ wa[idij - 1]*cc[i + (k + j*l1)*ido] - wa[idij]*cc[i - 1 + (k + j*l1)*ido];
+ }
+ }
+ }
+ } else {
+ is = -ido;
+ for (j=1; j<ip; j++) {
+ is += ido;
+ for (k=0; k<l1; k++) {
+ idij = is-1;
+ for (i=2; i<ido; i+=2) {
+ idij += 2;
+ ch[i - 1 + (k + j*l1)*ido] =
+ wa[idij - 1]*cc[i - 1 + (k + j*l1)*ido] + wa[idij]*cc[i + (k + j*l1)*ido];
+ ch[i + (k + j*l1)*ido] =
+ wa[idij - 1]*cc[i + (k + j*l1)*ido] - wa[idij]*cc[i - 1 + (k + j*l1)*ido];
+ }
+ }
+ }
+ }
+ if (nbd >= l1) {
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (k=0; k<l1; k++) {
+ for (i=2; i<ido; i+=2) {
+ cc[i - 1 + (k + j*l1)*ido] = ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido];
+ cc[i - 1 + (k + jc*l1)*ido] = ch[i + (k + j*l1)*ido] - ch[i + (k + jc*l1)*ido];
+ cc[i + (k + j*l1)*ido] = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido];
+ cc[i + (k + jc*l1)*ido] = ch[i - 1 + (k + jc*l1)*ido] - ch[i - 1 + (k + j*l1)*ido];
+ }
+ }
+ }
+ } else {
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (i=2; i<ido; i+=2) {
+ for (k=0; k<l1; k++) {
+ cc[i - 1 + (k + j*l1)*ido] =
+ ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido];
+ cc[i - 1 + (k + jc*l1)*ido] = ch[i + (k + j*l1)*ido] - ch[i + (k + jc*l1)*ido];
+ cc[i + (k + j*l1)*ido] = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido];
+ cc[i + (k + jc*l1)*ido] = ch[i - 1 + (k + jc*l1)*ido] - ch[i - 1 + (k + j*l1)*ido];
+ }
+ }
+ }
+ }
+ } else { /* now ido == 1 */
+ for (ik=0; ik<idl1; ik++) cc[ik] = ch[ik];
+ }
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (k=0; k<l1; k++) {
+ cc[(k + j*l1)*ido] = ch[(k + j*l1)*ido] + ch[(k + jc*l1)*ido];
+ cc[(k + jc*l1)*ido] = ch[(k + jc*l1)*ido] - ch[(k + j*l1)*ido];
+ }
+ }
+
+ ar1 = 1;
+ ai1 = 0;
+ for (l=1; l<ipph; l++) {
+ lc = ip - l;
+ ar1h = dcp*ar1 - dsp*ai1;
+ ai1 = dcp*ai1 + dsp*ar1;
+ ar1 = ar1h;
+ for (ik=0; ik<idl1; ik++) {
+ ch[ik + l*idl1] = cc[ik] + ar1*cc[ik + idl1];
+ ch[ik + lc*idl1] = ai1*cc[ik + (ip-1)*idl1];
+ }
+ dc2 = ar1;
+ ds2 = ai1;
+ ar2 = ar1;
+ ai2 = ai1;
+ for (j=2; j<ipph; j++) {
+ jc = ip - j;
+ ar2h = dc2*ar2 - ds2*ai2;
+ ai2 = dc2*ai2 + ds2*ar2;
+ ar2 = ar2h;
+ for (ik=0; ik<idl1; ik++) {
+ ch[ik + l*idl1] += ar2*cc[ik + j*idl1];
+ ch[ik + lc*idl1] += ai2*cc[ik + jc*idl1];
+ }
+ }
+ }
+ for (j=1; j<ipph; j++)
+ for (ik=0; ik<idl1; ik++)
+ ch[ik] += cc[ik + j*idl1];
+
+ if (ido >= l1) {
+ for (k=0; k<l1; k++) {
+ for (i=0; i<ido; i++) {
+ ref(cc,i + k*ip*ido) = ch[i + k*ido];
+ }
+ }
+ } else {
+ for (i=0; i<ido; i++) {
+ for (k=0; k<l1; k++) {
+ ref(cc,i + k*ip*ido) = ch[i + k*ido];
+ }
+ }
+ }
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ j2 = 2*j;
+ for (k=0; k<l1; k++) {
+ ref(cc,ido-1 + (j2 - 1 + k*ip)*ido) =
+ ch[(k + j*l1)*ido];
+ ref(cc,(j2 + k*ip)*ido) =
+ ch[(k + jc*l1)*ido];
+ }
+ }
+ if (ido == 1) return;
+ if (nbd >= l1) {
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ j2 = 2*j;
+ for (k=0; k<l1; k++) {
+ for (i=2; i<ido; i+=2) {
+ ic = ido - i;
+ ref(cc,i - 1 + (j2 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido];
+ ref(cc,ic - 1 + (j2 - 1 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] - ch[i - 1 + (k + jc*l1)*ido];
+ ref(cc,i + (j2 + k*ip)*ido) = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido];
+ ref(cc,ic + (j2 - 1 + k*ip)*ido) = ch[i + (k + jc*l1)*ido] - ch[i + (k + j*l1)*ido];
+ }
+ }
+ }
+ } else {
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ j2 = 2*j;
+ for (i=2; i<ido; i+=2) {
+ ic = ido - i;
+ for (k=0; k<l1; k++) {
+ ref(cc,i - 1 + (j2 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido];
+ ref(cc,ic - 1 + (j2 - 1 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] - ch[i - 1 + (k + jc*l1)*ido];
+ ref(cc,i + (j2 + k*ip)*ido) = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido];
+ ref(cc,ic + (j2 - 1 + k*ip)*ido) = ch[i + (k + jc*l1)*ido] - ch[i + (k + j*l1)*ido];
+ }
+ }
+ }
+ }
+ } /* radfg */
+
+
+static void radbg(int ido, int ip, int l1, int idl1,
+ Treal cc[], Treal ch[], const Treal wa[])
+ {
+ static const Treal twopi = 6.28318530717959;
+ int idij, ipph, i, j, k, l, j2, ic, jc, lc, ik, is;
+ Treal dc2, ai1, ai2, ar1, ar2, ds2;
+ int nbd;
+ Treal dcp, arg, dsp, ar1h, ar2h;
+ arg = twopi / ip;
+ dcp = cos(arg);
+ dsp = sin(arg);
+ nbd = (ido - 1) / 2;
+ ipph = (ip + 1) / 2;
+ if (ido >= l1) {
+ for (k=0; k<l1; k++) {
+ for (i=0; i<ido; i++) {
+ ch[i + k*ido] = ref(cc,i + k*ip*ido);
+ }
+ }
+ } else {
+ for (i=0; i<ido; i++) {
+ for (k=0; k<l1; k++) {
+ ch[i + k*ido] = ref(cc,i + k*ip*ido);
+ }
+ }
+ }
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ j2 = 2*j;
+ for (k=0; k<l1; k++) {
+ ch[(k + j*l1)*ido] = ref(cc,ido-1 + (j2 - 1 + k*ip)*ido) + ref(cc,ido-1 + (j2 - 1 + k*ip)*
+ ido);
+ ch[(k + jc*l1)*ido] = ref(cc,(j2 + k*ip)*ido) + ref(cc,(j2 + k*ip)*ido);
+ }
+ }
+
+ if (ido != 1) {
+ if (nbd >= l1) {
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (k=0; k<l1; k++) {
+ for (i=2; i<ido; i+=2) {
+ ic = ido - i;
+ ch[i - 1 + (k + j*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) + ref(cc,
+ ic - 1 + (2*j - 1 + k*ip)*ido);
+ ch[i - 1 + (k + jc*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) -
+ ref(cc,ic - 1 + (2*j - 1 + k*ip)*ido);
+ ch[i + (k + j*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) - ref(cc,ic
+ + (2*j - 1 + k*ip)*ido);
+ ch[i + (k + jc*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) + ref(cc,ic
+ + (2*j - 1 + k*ip)*ido);
+ }
+ }
+ }
+ } else {
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (i=2; i<ido; i+=2) {
+ ic = ido - i;
+ for (k=0; k<l1; k++) {
+ ch[i - 1 + (k + j*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) + ref(cc,
+ ic - 1 + (2*j - 1 + k*ip)*ido);
+ ch[i - 1 + (k + jc*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) -
+ ref(cc,ic - 1 + (2*j - 1 + k*ip)*ido);
+ ch[i + (k + j*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) - ref(cc,ic
+ + (2*j - 1 + k*ip)*ido);
+ ch[i + (k + jc*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) + ref(cc,ic
+ + (2*j - 1 + k*ip)*ido);
+ }
+ }
+ }
+ }
+ }
+
+ ar1 = 1;
+ ai1 = 0;
+ for (l=1; l<ipph; l++) {
+ lc = ip - l;
+ ar1h = dcp*ar1 - dsp*ai1;
+ ai1 = dcp*ai1 + dsp*ar1;
+ ar1 = ar1h;
+ for (ik=0; ik<idl1; ik++) {
+ cc[ik + l*idl1] = ch[ik] + ar1*ch[ik + idl1];
+ cc[ik + lc*idl1] = ai1*ch[ik + (ip-1)*idl1];
+ }
+ dc2 = ar1;
+ ds2 = ai1;
+ ar2 = ar1;
+ ai2 = ai1;
+ for (j=2; j<ipph; j++) {
+ jc = ip - j;
+ ar2h = dc2*ar2 - ds2*ai2;
+ ai2 = dc2*ai2 + ds2*ar2;
+ ar2 = ar2h;
+ for (ik=0; ik<idl1; ik++) {
+ cc[ik + l*idl1] += ar2*ch[ik + j*idl1];
+ cc[ik + lc*idl1] += ai2*ch[ik + jc*idl1];
+ }
+ }
+ }
+ for (j=1; j<ipph; j++) {
+ for (ik=0; ik<idl1; ik++) {
+ ch[ik] += ch[ik + j*idl1];
+ }
+ }
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (k=0; k<l1; k++) {
+ ch[(k + j*l1)*ido] = cc[(k + j*l1)*ido] - cc[(k + jc*l1)*ido];
+ ch[(k + jc*l1)*ido] = cc[(k + j*l1)*ido] + cc[(k + jc*l1)*ido];
+ }
+ }
+
+ if (ido == 1) return;
+ if (nbd >= l1) {
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (k=0; k<l1; k++) {
+ for (i=2; i<ido; i+=2) {
+ ch[i - 1 + (k + j*l1)*ido] = cc[i - 1 + (k + j*l1)*ido] - cc[i + (k + jc*l1)*ido];
+ ch[i - 1 + (k + jc*l1)*ido] = cc[i - 1 + (k + j*l1)*ido] + cc[i + (k + jc*l1)*ido];
+ ch[i + (k + j*l1)*ido] = cc[i + (k + j*l1)*ido] + cc[i - 1 + (k + jc*l1)*ido];
+ ch[i + (k + jc*l1)*ido] = cc[i + (k + j*l1)*ido] - cc[i - 1 + (k + jc*l1)*ido];
+ }
+ }
+ }
+ } else {
+ for (j=1; j<ipph; j++) {
+ jc = ip - j;
+ for (i=2; i<ido; i+=2) {
+ for (k=0; k<l1; k++) {
+ ch[i - 1 + (k + j*l1)*ido] = cc[i - 1 + (k + j*l1)*ido] - cc[i + (k + jc*l1)*ido];
+ ch[i - 1 + (k + jc*l1)*ido] = cc[i - 1 + (k + j *l1)*ido] + cc[i + (k + jc*l1)*ido];
+ ch[i + (k + j*l1)*ido] = cc[i + (k + j*l1)*ido] + cc[i - 1 + (k + jc*l1)*ido];
+ ch[i + (k + jc*l1)*ido] = cc[i + (k + j*l1)*ido] - cc[i - 1 + (k + jc*l1)*ido];
+ }
+ }
+ }
+ }
+ for (ik=0; ik<idl1; ik++) cc[ik] = ch[ik];
+ for (j=1; j<ip; j++)
+ for (k=0; k<l1; k++)
+ cc[(k + j*l1)*ido] = ch[(k + j*l1)*ido];
+ if (nbd <= l1) {
+ is = -ido;
+ for (j=1; j<ip; j++) {
+ is += ido;
+ idij = is-1;
+ for (i=2; i<ido; i+=2) {
+ idij += 2;
+ for (k=0; k<l1; k++) {
+ cc[i - 1 + (k + j*l1)*ido] = wa[idij - 1]*ch[i - 1 + (k + j*l1)*ido] - wa[idij]*
+ ch[i + (k + j*l1)*ido];
+ cc[i + (k + j*l1)*ido] = wa[idij - 1]*ch[i + (k + j*l1)*ido] + wa[idij]*ch[i - 1 + (k + j*l1)*ido];
+ }
+ }
+ }
+ } else {
+ is = -ido;
+ for (j=1; j<ip; j++) {
+ is += ido;
+ for (k=0; k<l1; k++) {
+ idij = is - 1;
+ for (i=2; i<ido; i+=2) {
+ idij += 2;
+ cc[i - 1 + (k + j*l1)*ido] = wa[idij-1]*ch[i - 1 + (k + j*l1)*ido] - wa[idij]*
+ ch[i + (k + j*l1)*ido];
+ cc[i + (k + j*l1)*ido] = wa[idij-1]*ch[i + (k + j*l1)*ido] + wa[idij]*ch[i - 1 + (k + j*l1)*ido];
+ }
+ }
+ }
+ }
+ } /* radbg */
+
+ /* ----------------------------------------------------------------------
+cfftf1, cfftf, cfftb, cffti1, cffti. Complex FFTs.
+---------------------------------------------------------------------- */
+
+static void cfftf1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2], int isign)
+ {
+ int idot, i;
+ int k1, l1, l2;
+ int na, nf, ip, iw, ix2, ix3, ix4, nac, ido, idl1;
+ Treal *cinput, *coutput;
+ nf = ifac[1];
+ na = 0;
+ l1 = 1;
+ iw = 0;
+ for (k1=2; k1<=nf+1; k1++) {
+ ip = ifac[k1];
+ l2 = ip*l1;
+ ido = n / l2;
+ idot = ido + ido;
+ idl1 = idot*l1;
+ if (na) {
+ cinput = ch;
+ coutput = c;
+ } else {
+ cinput = c;
+ coutput = ch;
+ }
+ switch (ip) {
+ case 4:
+ ix2 = iw + idot;
+ ix3 = ix2 + idot;
+ passf4(idot, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], isign);
+ na = !na;
+ break;
+ case 2:
+ passf2(idot, l1, cinput, coutput, &wa[iw], isign);
+ na = !na;
+ break;
+ case 3:
+ ix2 = iw + idot;
+ passf3(idot, l1, cinput, coutput, &wa[iw], &wa[ix2], isign);
+ na = !na;
+ break;
+ case 5:
+ ix2 = iw + idot;
+ ix3 = ix2 + idot;
+ ix4 = ix3 + idot;
+ passf5(idot, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4], isign);
+ na = !na;
+ break;
+ default:
+ passf(&nac, idot, ip, l1, idl1, cinput, coutput, &wa[iw], isign);
+ if (nac != 0) na = !na;
+ }
+ l1 = l2;
+ iw += (ip - 1)*idot;
+ }
+ if (na == 0) return;
+ for (i=0; i<2*n; i++) c[i] = ch[i];
+ } /* cfftf1 */
+
+
+void cfftf(int n, Treal c[], Treal wsave[])
+ {
+ int iw1, iw2;
+ if (n == 1) return;
+ iw1 = 2*n;
+ iw2 = iw1 + 2*n;
+ cfftf1(n, c, wsave, wsave+iw1, (int*)(wsave+iw2), -1);
+ } /* cfftf */
+
+
+void cfftb(int n, Treal c[], Treal wsave[])
+ {
+ int iw1, iw2;
+ if (n == 1) return;
+ iw1 = 2*n;
+ iw2 = iw1 + 2*n;
+ cfftf1(n, c, wsave, wsave+iw1, (int*)(wsave+iw2), +1);
+ } /* cfftb */
+
+
+static void factorize(int n, int ifac[MAXFAC+2], const int ntryh[NSPECIAL])
+ /* Factorize n in factors in ntryh and rest. On exit,
+ifac[0] contains n and ifac[1] contains number of factors,
+the factors start from ifac[2]. */
+ {
+ int ntry=3, i, j=0, ib, nf=0, nl=n, nq, nr;
+startloop:
+ if (j < NSPECIAL)
+ ntry = ntryh[j];
+ else
+ ntry+= 2;
+ j++;
+ do {
+ nq = nl / ntry;
+ nr = nl - ntry*nq;
+ if (nr != 0) goto startloop;
+ nf++;
+ ifac[nf + 1] = ntry;
+ nl = nq;
+ if (ntry == 2 && nf != 1) {
+ for (i=2; i<=nf; i++) {
+ ib = nf - i + 2;
+ ifac[ib + 1] = ifac[ib];
+ }
+ ifac[2] = 2;
+ }
+ } while (nl != 1);
+ ifac[0] = n;
+ ifac[1] = nf;
+ }
+
+
+static void cffti1(int n, Treal wa[], int ifac[MAXFAC+2])
+ {
+ static const Treal twopi = 6.28318530717959;
+ Treal arg, argh, argld, fi;
+ int idot, i, j;
+ int i1, k1, l1, l2;
+ int ld, ii, nf, ip;
+ int ido, ipm;
+
+ static const int ntryh[NSPECIAL] = {
+ 3,4,2,5 }; /* Do not change the order of these. */
+
+ factorize(n,ifac,ntryh);
+ nf = ifac[1];
+ argh = twopi/(Treal)n;
+ i = 1;
+ l1 = 1;
+ for (k1=1; k1<=nf; k1++) {
+ ip = ifac[k1+1];
+ ld = 0;
+ l2 = l1*ip;
+ ido = n / l2;
+ idot = ido + ido + 2;
+ ipm = ip - 1;
+ for (j=1; j<=ipm; j++) {
+ i1 = i;
+ wa[i-1] = 1;
+ wa[i] = 0;
+ ld += l1;
+ fi = 0;
+ argld = ld*argh;
+ for (ii=4; ii<=idot; ii+=2) {
+ i+= 2;
+ fi+= 1;
+ arg = fi*argld;
+ wa[i-1] = cos(arg);
+ wa[i] = sin(arg);
+ }
+ if (ip > 5) {
+ wa[i1-1] = wa[i-1];
+ wa[i1] = wa[i];
+ }
+ }
+ l1 = l2;
+ }
+ } /* cffti1 */
+
+
+void cffti(int n, Treal wsave[])
+ {
+ int iw1, iw2;
+ if (n == 1) return;
+ iw1 = 2*n;
+ iw2 = iw1 + 2*n;
+ cffti1(n, wsave+iw1, (int*)(wsave+iw2));
+ } /* cffti */
+
+ /* ----------------------------------------------------------------------
+rfftf1, rfftb1, rfftf, rfftb, rffti1, rffti. Treal FFTs.
+---------------------------------------------------------------------- */
+
+static void rfftf1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2])
+ {
+ int i;
+ int k1, l1, l2, na, kh, nf, ip, iw, ix2, ix3, ix4, ido, idl1;
+ Treal *cinput, *coutput;
+ nf = ifac[1];
+ na = 1;
+ l2 = n;
+ iw = n-1;
+ for (k1 = 1; k1 <= nf; ++k1) {
+ kh = nf - k1;
+ ip = ifac[kh + 2];
+ l1 = l2 / ip;
+ ido = n / l2;
+ idl1 = ido*l1;
+ iw -= (ip - 1)*ido;
+ na = !na;
+ if (na) {
+ cinput = ch;
+ coutput = c;
+ } else {
+ cinput = c;
+ coutput = ch;
+ }
+ switch (ip) {
+ case 4:
+ ix2 = iw + ido;
+ ix3 = ix2 + ido;
+ radf4(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3]);
+ break;
+ case 2:
+ radf2(ido, l1, cinput, coutput, &wa[iw]);
+ break;
+ case 3:
+ ix2 = iw + ido;
+ radf3(ido, l1, cinput, coutput, &wa[iw], &wa[ix2]);
+ break;
+ case 5:
+ ix2 = iw + ido;
+ ix3 = ix2 + ido;
+ ix4 = ix3 + ido;
+ radf5(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]);
+ break;
+ default:
+ if (ido == 1)
+ na = !na;
+ if (na == 0) {
+ radfg(ido, ip, l1, idl1, c, ch, &wa[iw]);
+ na = 1;
+ } else {
+ radfg(ido, ip, l1, idl1, ch, c, &wa[iw]);
+ na = 0;
+ }
+ }
+ l2 = l1;
+ }
+ if (na == 1) return;
+ for (i = 0; i < n; i++) c[i] = ch[i];
+ } /* rfftf1 */
+
+
+void rfftb1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2])
+ {
+ int i;
+ int k1, l1, l2, na, nf, ip, iw, ix2, ix3, ix4, ido, idl1;
+ Treal *cinput, *coutput;
+ nf = ifac[1];
+ na = 0;
+ l1 = 1;
+ iw = 0;
+ for (k1=1; k1<=nf; k1++) {
+ ip = ifac[k1 + 1];
+ l2 = ip*l1;
+ ido = n / l2;
+ idl1 = ido*l1;
+ if (na) {
+ cinput = ch;
+ coutput = c;
+ } else {
+ cinput = c;
+ coutput = ch;
+ }
+ switch (ip) {
+ case 4:
+ ix2 = iw + ido;
+ ix3 = ix2 + ido;
+ radb4(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3]);
+ na = !na;
+ break;
+ case 2:
+ radb2(ido, l1, cinput, coutput, &wa[iw]);
+ na = !na;
+ break;
+ case 3:
+ ix2 = iw + ido;
+ radb3(ido, l1, cinput, coutput, &wa[iw], &wa[ix2]);
+ na = !na;
+ break;
+ case 5:
+ ix2 = iw + ido;
+ ix3 = ix2 + ido;
+ ix4 = ix3 + ido;
+ radb5(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]);
+ na = !na;
+ break;
+ default:
+ radbg(ido, ip, l1, idl1, cinput, coutput, &wa[iw]);
+ if (ido == 1) na = !na;
+ }
+ l1 = l2;
+ iw += (ip - 1)*ido;
+ }
+ if (na == 0) return;
+ for (i=0; i<n; i++) c[i] = ch[i];
+ } /* rfftb1 */
+
+
+void rfftf(int n, Treal r[], Treal wsave[])
+ {
+ if (n == 1) return;
+ rfftf1(n, r, wsave, wsave+n, (int*)(wsave+2*n));
+ } /* rfftf */
+
+
+void rfftb(int n, Treal r[], Treal wsave[])
+ {
+ if (n == 1) return;
+ rfftb1(n, r, wsave, wsave+n, (int*)(wsave+2*n));
+ } /* rfftb */
+
+
+static void rffti1(int n, Treal wa[], int ifac[MAXFAC+2])
+ {
+ static const Treal twopi = 6.28318530717959;
+ Treal arg, argh, argld, fi;
+ int i, j;
+ int k1, l1, l2;
+ int ld, ii, nf, ip, is;
+ int ido, ipm, nfm1;
+ static const int ntryh[NSPECIAL] = {
+ 4,2,3,5 }; /* Do not change the order of these. */
+ factorize(n,ifac,ntryh);
+ nf = ifac[1];
+ argh = twopi / n;
+ is = 0;
+ nfm1 = nf - 1;
+ l1 = 1;
+ if (nfm1 == 0) return;
+ for (k1 = 1; k1 <= nfm1; k1++) {
+ ip = ifac[k1 + 1];
+ ld = 0;
+ l2 = l1*ip;
+ ido = n / l2;
+ ipm = ip - 1;
+ for (j = 1; j <= ipm; ++j) {
+ ld += l1;
+ i = is;
+ argld = (Treal) ld*argh;
+ fi = 0;
+ for (ii = 3; ii <= ido; ii += 2) {
+ i += 2;
+ fi += 1;
+ arg = fi*argld;
+ wa[i - 2] = cos(arg);
+ wa[i - 1] = sin(arg);
+ }
+ is += ido;
+ }
+ l1 = l2;
+ }
+ } /* rffti1 */
+
+
+void rffti(int n, Treal wsave[])
+ {
+ if (n == 1) return;
+ rffti1(n, wsave+n, (int*)(wsave+2*n));
+ } /* rffti */
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/numpy/corefft/fftpack.h b/numpy/corefft/fftpack.h
new file mode 100644
index 000000000..d134784a2
--- /dev/null
+++ b/numpy/corefft/fftpack.h
@@ -0,0 +1,28 @@
+/*
+ * This file is part of tela the Tensor Language.
+ * Copyright (c) 1994-1995 Pekka Janhunen
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define DOUBLE
+
+#ifdef DOUBLE
+#define Treal double
+#else
+#define Treal float
+#endif
+
+extern void cfftf(int N, Treal data[], const Treal wrk[]);
+extern void cfftb(int N, Treal data[], const Treal wrk[]);
+extern void cffti(int N, Treal wrk[]);
+
+extern void rfftf(int N, Treal data[], const Treal wrk[]);
+extern void rfftb(int N, Treal data[], const Treal wrk[]);
+extern void rffti(int N, Treal wrk[]);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/numpy/corefft/fftpack.py b/numpy/corefft/fftpack.py
new file mode 100644
index 000000000..d95c87667
--- /dev/null
+++ b/numpy/corefft/fftpack.py
@@ -0,0 +1,335 @@
+"""
+Discrete Fourier Transforms - FFT.py
+
+The underlying code for these functions is an f2c translated and modified
+version of the FFTPACK routines.
+
+fft(a, n=None, axis=-1)
+inverse_fft(a, n=None, axis=-1)
+real_fft(a, n=None, axis=-1)
+inverse_real_fft(a, n=None, axis=-1)
+hermite_fft(a, n=None, axis=-1)
+inverse_hermite_fft(a, n=None, axis=-1)
+fftnd(a, s=None, axes=None)
+inverse_fftnd(a, s=None, axes=None)
+real_fftnd(a, s=None, axes=None)
+inverse_real_fftnd(a, s=None, axes=None)
+fft2d(a, s=None, axes=(-2,-1))
+inverse_fft2d(a, s=None, axes=(-2, -1))
+real_fft2d(a, s=None, axes=(-2,-1))
+inverse_real_fft2d(a, s=None, axes=(-2, -1))
+"""
+__all__ = ['fft','inverse_fft', 'ifft', 'real_fft', 'refft', 'inverse_real_fft',
+ 'irefft', 'hfft', 'ihfft', 'refftn', 'irefftn', 'refft2', 'irefft2',
+ 'fft2', 'ifft2',
+ 'hermite_fft','inverse_hermite_fft','fftnd','inverse_fftnd','fft2d',
+ 'inverse_fft2d', 'real_fftnd', 'real_fft2d', 'inverse_real_fftnd',
+ 'inverse_real_fft2d',]
+
+from scipy.base import *
+import fftpack_lite as fftpack
+from helper import *
+
+_fft_cache = {}
+_real_fft_cache = {}
+
+def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
+ work_function=fftpack.cfftf, fft_cache = _fft_cache ):
+ a = asarray(a)
+
+ if n == None: n = a.shape[axis]
+
+ try:
+ wsave = fft_cache[n]
+ except(KeyError):
+ wsave = init_function(n)
+ fft_cache[n] = wsave
+
+ if a.shape[axis] != n:
+ s = list(a.shape)
+ if s[axis] > n:
+ index = [slice(None)]*len(s)
+ index[axis] = slice(0,n)
+ a = a[index]
+ else:
+ index = [slice(None)]*len(s)
+ index[axis] = slice(0,s[axis])
+ s[axis] = n
+ z = zeros(s, a.dtypechar)
+ z[index] = a
+ a = z
+
+ if axis != -1:
+ a = swapaxes(a, axis, -1)
+ r = work_function(a, wsave)
+ if axis != -1:
+ r = swapaxes(r, axis, -1)
+ return r
+
+
+def fft(a, n=None, axis=-1):
+ """fft(a, n=None, axis=-1)
+
+ Will return the n point discrete Fourier transform of a. n defaults to the
+ length of a. If n is larger than a, then a will be zero-padded to make up
+ the difference. If n is smaller than a, the first n items in a will be
+ used.
+
+ The packing of the result is "standard": If A = fft(a, n), then A[0]
+ contains the zero-frequency term, A[1:n/2+1] contains the
+ positive-frequency terms, and A[n/2+1:] contains the negative-frequency
+ terms, in order of decreasingly negative frequency. So for an 8-point
+ transform, the frequencies of the result are [ 0, 1, 2, 3, 4, -3, -2, -1].
+
+ This is most efficient for n a power of two. This also stores a cache of
+ working memory for different sizes of fft's, so you could theoretically
+ run into memory problems if you call this too many times with too many
+ different n's."""
+
+ return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
+
+
+def inverse_fft(a, n=None, axis=-1):
+ """inverse_fft(a, n=None, axis=-1)
+
+ Will return the n point inverse discrete Fourier transform of a. n
+ defaults to the length of a. If n is larger than a, then a will be
+ zero-padded to make up the difference. If n is smaller than a, then a will
+ be truncated to reduce its size.
+
+ The input array is expected to be packed the same way as the output of
+ fft, as discussed in it's documentation.
+
+ This is the inverse of fft: inverse_fft(fft(a)) == a within numerical
+ accuracy.
+
+ This is most efficient for n a power of two. This also stores a cache of
+ working memory for different sizes of fft's, so you could theoretically
+ run into memory problems if you call this too many times with too many
+ different n's."""
+
+ a = asarray(a).astype(Complex)
+ if n == None:
+ n = shape(a)[axis]
+ return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
+
+
+def real_fft(a, n=None, axis=-1):
+ """real_fft(a, n=None, axis=-1)
+
+ Will return the n point discrete Fourier transform of the real valued
+ array a. n defaults to the length of a. n is the length of the input, not
+ the output.
+
+ The returned array will be the nonnegative frequency terms of the
+ Hermite-symmetric, complex transform of the real array. So for an 8-point
+ transform, the frequencies in the result are [ 0, 1, 2, 3, 4]. The first
+ term will be real, as will the last if n is even. The negative frequency
+ terms are not needed because they are the complex conjugates of the
+ positive frequency terms. (This is what I mean when I say
+ Hermite-symmetric.)
+
+ This is most efficient for n a power of two."""
+
+ a = asarray(a).astype(Float)
+ return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
+
+
+def inverse_real_fft(a, n=None, axis=-1):
+ """inverse_real_fft(a, n=None, axis=-1)
+
+ Will return the real valued n point inverse discrete Fourier transform of
+ a, where a contains the nonnegative frequency terms of a Hermite-symmetric
+ sequence. n is the length of the result, not the input. If n is not
+ supplied, the default is 2*(len(a)-1). If you want the length of the
+ result to be odd, you have to say so.
+
+ If you specify an n such that a must be zero-padded or truncated, the
+ extra/removed values will be added/removed at high frequencies. One can
+ thus resample a series to m points via Fourier interpolation by: a_resamp
+ = inverse_real_fft(real_fft(a), m).
+
+ This is the inverse of real_fft:
+ inverse_real_fft(real_fft(a), len(a)) == a
+ within numerical accuracy."""
+
+ a = asarray(a).astype(Complex)
+ if n == None:
+ n = (shape(a)[axis] - 1) * 2
+ return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
+ _real_fft_cache) / n
+
+
+def hermite_fft(a, n=None, axis=-1):
+ """hermite_fft(a, n=None, axis=-1)
+ inverse_hermite_fft(a, n=None, axis=-1)
+
+ These are a pair analogous to real_fft/inverse_real_fft, but for the
+ opposite case: here the signal is real in the frequency domain and has
+ Hermite symmetry in the time domain. So here it's hermite_fft for which
+ you must supply the length of the result if it is to be odd.
+
+ inverse_hermite_fft(hermite_fft(a), len(a)) == a
+ within numerical accuracy."""
+
+ a = asarray(a).astype(Complex)
+ if n == None:
+ n = (shape(a)[axis] - 1) * 2
+ return inverse_real_fft(conjugate(a), n, axis) * n
+
+
+def inverse_hermite_fft(a, n=None, axis=-1):
+ """hermite_fft(a, n=None, axis=-1)
+ inverse_hermite_fft(a, n=None, axis=-1)
+
+ These are a pair analogous to real_fft/inverse_real_fft, but for the
+ opposite case: here the signal is real in the frequency domain and has
+ Hermite symmetry in the time domain. So here it's hermite_fft for which
+ you must supply the length of the result if it is to be odd.
+
+ inverse_hermite_fft(hermite_fft(a), len(a)) == a
+ within numerical accuracy."""
+
+ a = asarray(a).astype(Float)
+ if n == None:
+ n = shape(a)[axis]
+ return conjugate(real_fft(a, n, axis))/n
+
+
+def _cook_nd_args(a, s=None, axes=None, invreal=0):
+ if s is None:
+ shapeless = 1
+ if axes == None:
+ s = list(a.shape)
+ else:
+ s = take(a.shape, axes)
+ else:
+ shapeless = 0
+ s = list(s)
+ if axes == None:
+ axes = range(-len(s), 0)
+ if len(s) != len(axes):
+ raise ValueError, "Shape and axes have different lengths."
+ if invreal and shapeless:
+ s[axes[-1]] = (s[axes[-1]] - 1) * 2
+ return s, axes
+
+
+def _raw_fftnd(a, s=None, axes=None, function=fft):
+ a = asarray(a)
+ s, axes = _cook_nd_args(a, s, axes)
+ itl = range(len(axes))
+ itl.reverse()
+ for ii in itl:
+ a = function(a, n=s[ii], axis=axes[ii])
+ return a
+
+
+def fftnd(a, s=None, axes=None):
+ """fftnd(a, s=None, axes=None)
+
+ The n-dimensional fft of a. s is a sequence giving the shape of the input
+ an result along the transformed axes, as n for fft. Results are packed
+ analogously to fft: the term for zero frequency in all axes is in the
+ low-order corner, while the term for the Nyquist frequency in all axes is
+ in the middle.
+
+ If neither s nor axes is specified, the transform is taken along all
+ axes. If s is specified and axes is not, the last len(s) axes are used.
+ If axes are specified and s is not, the input shape along the specified
+ axes is used. If s and axes are both specified and are not the same
+ length, an exception is raised."""
+
+ return _raw_fftnd(a,s,axes,fft)
+
+def inverse_fftnd(a, s=None, axes=None):
+ """inverse_fftnd(a, s=None, axes=None)
+
+ The inverse of fftnd."""
+
+ return _raw_fftnd(a, s, axes, inverse_fft)
+
+
+def fft2d(a, s=None, axes=(-2,-1)):
+ """fft2d(a, s=None, axes=(-2,-1))
+
+ The 2d fft of a. This is really just fftnd with different default
+ behavior."""
+
+ return _raw_fftnd(a,s,axes,fft)
+
+
+def inverse_fft2d(a, s=None, axes=(-2,-1)):
+ """inverse_fft2d(a, s=None, axes=(-2, -1))
+
+ The inverse of fft2d. This is really just inverse_fftnd with different
+ default behavior."""
+
+ return _raw_fftnd(a, s, axes, inverse_fft)
+
+
+def real_fftnd(a, s=None, axes=None):
+ """real_fftnd(a, s=None, axes=None)
+
+ The n-dimensional discrete Fourier transform of a real array a. A real
+ transform as real_fft is performed along the axis specified by the last
+ element of axes, then complex transforms as fft are performed along the
+ other axes."""
+
+ a = asarray(a).astype(Float)
+ s, axes = _cook_nd_args(a, s, axes)
+ a = real_fft(a, s[-1], axes[-1])
+ for ii in range(len(axes)-1):
+ a = fft(a, s[ii], axes[ii])
+ return a
+
+
+def real_fft2d(a, s=None, axes=(-2,-1)):
+ """real_fft2d(a, s=None, axes=(-2,-1))
+
+ The 2d fft of the real valued array a. This is really just real_fftnd with
+ different default behavior."""
+
+ return real_fftnd(a, s, axes)
+
+
+def inverse_real_fftnd(a, s=None, axes=None):
+ """inverse_real_fftnd(a, s=None, axes=None)
+
+ The inverse of real_fftnd. The transform implemented in inverse_fft is
+ applied along all axes but the last, then the transform implemented in
+ inverse_real_fft is performed along the last axis. As with
+ inverse_real_fft, the length of the result along that axis must be
+ specified if it is to be odd."""
+
+ a = asarray(a).astype(Complex)
+ s, axes = _cook_nd_args(a, s, axes, invreal=1)
+ for ii in range(len(axes)-1):
+ a = inverse_fft(a, s[ii], axes[ii])
+ a = inverse_real_fft(a, s[-1], axes[-1])
+ return a
+
+
+def inverse_real_fft2d(a, s=None, axes=(-2,-1)):
+ """inverse_real_fft2d(a, s=None, axes=(-2, -1))
+
+ The inverse of real_fft2d. This is really just inverse_real_fftnd with
+ different default behavior."""
+
+ return inverse_real_fftnd(a, s, axes)
+
+ifft = inverse_fft
+refft = real_fft
+irefft = inverse_real_fft
+hfft = hermite_fft
+ihfft = inverse_hermite_fft
+
+fftn = fftnd
+ifftn = inverse_fftnd
+refftn = real_fftnd
+irefftn = inverse_real_fftnd
+
+fft2 = fft2d
+ifft2 = inverse_fft2d
+refft2 = real_fft2d
+irefft2 = inverse_real_fft2d
diff --git a/numpy/corefft/fftpack_litemodule.c b/numpy/corefft/fftpack_litemodule.c
new file mode 100644
index 000000000..692da5e52
--- /dev/null
+++ b/numpy/corefft/fftpack_litemodule.c
@@ -0,0 +1,266 @@
+#include "fftpack.h"
+#include "Python.h"
+#include "scipy/arrayobject.h"
+
+static PyObject *ErrorObject;
+
+/* ----------------------------------------------------- */
+
+static char fftpack_cfftf__doc__[] ="";
+
+PyObject *
+fftpack_cfftf(PyObject *self, PyObject *args)
+{
+ PyObject *op1, *op2;
+ PyArrayObject *data;
+ double *wsave, *dptr;
+ int npts, nsave, nrepeats, i;
+
+ if(!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL;
+ data = (PyArrayObject *)PyArray_CopyFromObject(op1, PyArray_CDOUBLE, 1, 0);
+ if (data == NULL) return NULL;
+ if (PyArray_As1D(&op2, (char **)&wsave, &nsave, PyArray_DOUBLE) == -1)
+ goto fail;
+ if (data == NULL) goto fail;
+
+ npts = data->dimensions[data->nd-1];
+ if (nsave != npts*4+15) {
+ PyErr_SetString(ErrorObject, "invalid work array for fft size");
+ goto fail;
+ }
+
+ nrepeats = PyArray_SIZE(data)/npts;
+ dptr = (double *)data->data;
+ for (i=0; i<nrepeats; i++) {
+ cfftf(npts, dptr, wsave);
+ dptr += npts*2;
+ }
+ PyArray_Free(op2, (char *)wsave);
+ return (PyObject *)data;
+fail:
+ PyArray_Free(op2, (char *)wsave);
+ Py_DECREF(data);
+ return NULL;
+}
+
+static char fftpack_cfftb__doc__[] ="";
+
+PyObject *
+fftpack_cfftb(PyObject *self, PyObject *args)
+{
+ PyObject *op1, *op2;
+ PyArrayObject *data;
+ double *wsave, *dptr;
+ int npts, nsave, nrepeats, i;
+
+ if(!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL;
+ data = (PyArrayObject *)PyArray_CopyFromObject(op1, PyArray_CDOUBLE, 1, 0);
+ if (data == NULL) return NULL;
+ if (PyArray_As1D(&op2, (char **)&wsave, &nsave, PyArray_DOUBLE) == -1)
+ goto fail;
+ if (data == NULL) goto fail;
+
+ npts = data->dimensions[data->nd-1];
+ if (nsave != npts*4+15) {
+ PyErr_SetString(ErrorObject, "invalid work array for fft size");
+ goto fail;
+ }
+
+ nrepeats = PyArray_SIZE(data)/npts;
+ dptr = (double *)data->data;
+ for (i=0; i<nrepeats; i++) {
+ cfftb(npts, dptr, wsave);
+ dptr += npts*2;
+ }
+ PyArray_Free(op2, (char *)wsave);
+ return (PyObject *)data;
+fail:
+ PyArray_Free(op2, (char *)wsave);
+ Py_DECREF(data);
+ return NULL;
+}
+
+static char fftpack_cffti__doc__[] ="";
+
+static PyObject *
+fftpack_cffti(PyObject *self, PyObject *args)
+{
+ PyArrayObject *op;
+ int dim, n;
+
+ if (!PyArg_ParseTuple(args, "i", &n)) return NULL;
+
+ dim = 4*n+15; /*Magic size needed by cffti*/
+ /*Create a 1 dimensional array of dimensions of type double*/
+ op = (PyArrayObject *)PyArray_FromDims(1, &dim, PyArray_DOUBLE);
+ if (op == NULL) return NULL;
+
+ cffti(n, (double *)((PyArrayObject*)op)->data);
+
+ return (PyObject *)op;
+}
+
+static char fftpack_rfftf__doc__[] ="";
+
+PyObject *
+fftpack_rfftf(PyObject *self, PyObject *args)
+{
+ PyObject *op1, *op2;
+ PyArrayObject *data, *ret;
+ double *wsave, *dptr, *rptr;
+ int npts, nsave, nrepeats, i, rstep;
+
+ if(!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL;
+ data = (PyArrayObject *)PyArray_ContiguousFromObject(op1, PyArray_DOUBLE, 1, 0);
+ if (data == NULL) return NULL;
+ npts = data->dimensions[data->nd-1];
+ data->dimensions[data->nd-1] = npts/2+1;
+ ret = (PyArrayObject *)PyArray_SimpleNew(data->nd, data->dimensions,
+ PyArray_CDOUBLE);
+ data->dimensions[data->nd-1] = npts;
+ rstep = (ret->dimensions[ret->nd-1])*2;
+
+ if (PyArray_As1D(&op2, (char **)&wsave, &nsave, PyArray_DOUBLE) == -1)
+ goto fail;
+ if (data == NULL || ret == NULL) goto fail;
+
+ if (nsave != npts*2+15) {
+ PyErr_SetString(ErrorObject, "invalid work array for fft size");
+ goto fail;
+ }
+
+ nrepeats = PyArray_SIZE(data)/npts;
+ rptr = (double *)ret->data;
+ dptr = (double *)data->data;
+
+ for (i=0; i<nrepeats; i++) {
+ memcpy((char *)(rptr+1), dptr, npts*sizeof(double));
+ rfftf(npts, rptr+1, wsave);
+ rptr[0] = rptr[1];
+ rptr[1] = 0.0;
+ rptr += rstep;
+ dptr += npts;
+ }
+ PyArray_Free(op2, (char *)wsave);
+ Py_DECREF(data);
+ return (PyObject *)ret;
+fail:
+ PyArray_Free(op2, (char *)wsave);
+ Py_XDECREF(data);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+static char fftpack_rfftb__doc__[] ="";
+
+
+PyObject *
+fftpack_rfftb(PyObject *self, PyObject *args)
+{
+ PyObject *op1, *op2;
+ PyArrayObject *data, *ret;
+ double *wsave, *dptr, *rptr;
+ int npts, nsave, nrepeats, i;
+
+ if(!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL;
+ data = (PyArrayObject *)PyArray_ContiguousFromObject(op1, PyArray_CDOUBLE, 1, 0);
+ if (data == NULL) return NULL;
+ npts = data->dimensions[data->nd-1];
+ ret = (PyArrayObject *)PyArray_SimpleNew(data->nd, data->dimensions,
+ PyArray_DOUBLE);
+
+ if (PyArray_As1D(&op2, (char **)&wsave, &nsave, PyArray_DOUBLE) == -1)
+ goto fail;
+ if (data == NULL || ret == NULL) goto fail;
+
+ if (nsave != npts*2+15) {
+ PyErr_SetString(ErrorObject, "invalid work array for fft size");
+ goto fail;
+ }
+
+ nrepeats = PyArray_SIZE(ret)/npts;
+ rptr = (double *)ret->data;
+ dptr = (double *)data->data;
+
+ for (i=0; i<nrepeats; i++) {
+ memcpy((char *)(rptr+1), (dptr+2), (npts-1)*sizeof(double));
+ rptr[0] = dptr[0];
+ rfftb(npts, rptr, wsave);
+ rptr += npts;
+ dptr += npts*2;
+ }
+ PyArray_Free(op2, (char *)wsave);
+ Py_DECREF(data);
+ return (PyObject *)ret;
+fail:
+ PyArray_Free(op2, (char *)wsave);
+ Py_XDECREF(data);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+
+static char fftpack_rffti__doc__[] ="";
+
+static PyObject *
+fftpack_rffti(PyObject *self, PyObject *args)
+{
+ PyArrayObject *op;
+ int dim, n;
+
+ if (!PyArg_ParseTuple(args, "i", &n)) return NULL;
+
+ dim = 2*n+15; /*Magic size needed by rffti*/
+ /*Create a 1 dimensional array of dimensions of type double*/
+ op = (PyArrayObject *)PyArray_FromDims(1, &dim, PyArray_DOUBLE);
+ if (op == NULL) return NULL;
+
+ rffti(n, (double *)((PyArrayObject*)op)->data);
+
+ return (PyObject *)op;
+}
+
+
+/* List of methods defined in the module */
+
+static struct PyMethodDef fftpack_methods[] = {
+ {"cfftf", fftpack_cfftf, 1, fftpack_cfftf__doc__},
+ {"cfftb", fftpack_cfftb, 1, fftpack_cfftb__doc__},
+ {"cffti", fftpack_cffti, 1, fftpack_cffti__doc__},
+ {"rfftf", fftpack_rfftf, 1, fftpack_rfftf__doc__},
+ {"rfftb", fftpack_rfftb, 1, fftpack_rfftb__doc__},
+ {"rffti", fftpack_rffti, 1, fftpack_rffti__doc__},
+ {NULL, NULL} /* sentinel */
+};
+
+
+/* Initialization function for the module (*must* be called initfftpack) */
+
+static char fftpack_module_documentation[] =
+""
+;
+
+DL_EXPORT(void)
+initfftpack_lite(void)
+{
+ PyObject *m, *d;
+
+ /* Create the module and add the functions */
+ m = Py_InitModule4("fftpack_lite", fftpack_methods,
+ fftpack_module_documentation,
+ (PyObject*)NULL,PYTHON_API_VERSION);
+
+ /* Import the array object */
+ import_array();
+
+ /* Add some symbolic constants to the module */
+ d = PyModule_GetDict(m);
+ ErrorObject = PyErr_NewException("fftpack.error", NULL, NULL);
+ PyDict_SetItemString(d, "error", ErrorObject);
+
+ /* XXXX Add constants here */
+
+ /* Check for errors */
+ if (PyErr_Occurred())
+ Py_FatalError("can't initialize module fftpack");
+}
diff --git a/numpy/corefft/helper.py b/numpy/corefft/helper.py
new file mode 100644
index 000000000..36a9c3dec
--- /dev/null
+++ b/numpy/corefft/helper.py
@@ -0,0 +1,67 @@
+"""
+Discrete Fourier Transforms - helper.py
+"""
+# Created by Pearu Peterson, September 2002
+
+__all__ = ['fftshift','ifftshift','fftfreq']
+
+from scipy.base import asarray, concatenate, arange, take, \
+ array, integer
+import types
+
+def fftshift(x,axes=None):
+ """ fftshift(x, axes=None) -> y
+
+ Shift zero-frequency component to center of spectrum.
+
+ This function swaps half-spaces for all axes listed (defaults to all).
+
+ Notes:
+ If len(x) is even then the Nyquist component is y[0].
+ """
+ tmp = asarray(x)
+ ndim = len(tmp.shape)
+ if axes is None:
+ axes = range(ndim)
+ y = tmp
+ for k in axes:
+ n = tmp.shape[k]
+ p2 = (n+1)/2
+ mylist = concatenate((arange(p2,n),arange(p2)))
+ y = take(y,mylist,k)
+ return y
+
+
+def ifftshift(x,axes=None):
+ """ ifftshift(x,axes=None) - > y
+
+ Inverse of fftshift.
+ """
+ tmp = asarray(x)
+ ndim = len(tmp.shape)
+ if axes is None:
+ axes = range(ndim)
+ y = tmp
+ for k in axes:
+ n = tmp.shape[k]
+ p2 = n-(n+1)/2
+ mylist = concatenate((arange(p2,n),arange(p2)))
+ y = take(y,mylist,k)
+ return y
+
+def fftfreq(n,d=1.0):
+ """ fftfreq(n, d=1.0) -> f
+
+ DFT sample frequencies
+
+ The returned float array contains the frequency bins in
+ cycles/unit (with zero at the start) given a window length n and a
+ sample spacing d:
+
+ f = [0,1,...,n/2-1,-n/2,...,-1]/(d*n) if n is even
+ f = [0,1,...,(n-1)/2,-(n-1)/2,...,-1]/(d*n) if n is odd
+ """
+ assert isinstance(n,types.IntType) or isinstance(n, integer)
+ k = range(0,(n-1)/2+1)+range(-(n/2),0)
+ return array(k,'d')/(n*d)
+
diff --git a/numpy/corefft/info.py b/numpy/corefft/info.py
new file mode 100644
index 000000000..62c5c0630
--- /dev/null
+++ b/numpy/corefft/info.py
@@ -0,0 +1,30 @@
+"""\
+Core FFT routines
+==================
+
+ Standard FFTs
+
+ fft
+ ifft
+ fft2
+ ifft2
+ fftn
+ ifftn
+
+ Real FFTs
+
+ refft
+ irefft
+ refft2
+ irefft2
+ refftn
+ irefftn
+
+ Hermite FFTs
+
+ hfft
+ ihfft
+"""
+
+depends = ['base']
+global_symbols = ['fft','ifft']
diff --git a/numpy/corefft/setup.py b/numpy/corefft/setup.py
new file mode 100644
index 000000000..5ff44f37f
--- /dev/null
+++ b/numpy/corefft/setup.py
@@ -0,0 +1,20 @@
+
+from os.path import join
+
+def configuration(parent_package='',top_path=None):
+ from scipy.distutils.misc_util import Configuration
+ config = Configuration('corefft',parent_package,top_path)
+
+ config.add_data_dir('tests')
+
+ # Configure fftpack_lite
+ config.add_extension('fftpack_lite',
+ sources=['fftpack_litemodule.c', 'fftpack.c']
+ )
+
+
+ return config
+
+if __name__ == '__main__':
+ from scipy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/numpy/corefft/tests/test_helper.py b/numpy/corefft/tests/test_helper.py
new file mode 100644
index 000000000..f962096f6
--- /dev/null
+++ b/numpy/corefft/tests/test_helper.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# Copied from fftpack.helper by Pearu Peterson, October 2005
+""" Test functions for fftpack.helper module
+"""
+
+import sys
+from scipy.testing import *
+set_package_path()
+from scipy.corefft import fftshift,ifftshift,fftfreq
+del sys.path[0]
+
+from scipy import pi
+
+def random(size):
+ return rand(*size)
+
+class test_fftshift(ScipyTestCase):
+
+ def check_definition(self):
+ x = [0,1,2,3,4,-4,-3,-2,-1]
+ y = [-4,-3,-2,-1,0,1,2,3,4]
+ assert_array_almost_equal(fftshift(x),y)
+ assert_array_almost_equal(ifftshift(y),x)
+ x = [0,1,2,3,4,-5,-4,-3,-2,-1]
+ y = [-5,-4,-3,-2,-1,0,1,2,3,4]
+ assert_array_almost_equal(fftshift(x),y)
+ assert_array_almost_equal(ifftshift(y),x)
+
+ def check_inverse(self):
+ for n in [1,4,9,100,211]:
+ x = random((n,))
+ assert_array_almost_equal(ifftshift(fftshift(x)),x)
+
+class test_fftfreq(ScipyTestCase):
+
+ def check_definition(self):
+ x = [0,1,2,3,4,-4,-3,-2,-1]
+ assert_array_almost_equal(9*fftfreq(9),x)
+ assert_array_almost_equal(9*pi*fftfreq(9,pi),x)
+ x = [0,1,2,3,4,-5,-4,-3,-2,-1]
+ assert_array_almost_equal(10*fftfreq(10),x)
+ assert_array_almost_equal(10*pi*fftfreq(10,pi),x)
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/corelinalg/__init__.py b/numpy/corelinalg/__init__.py
new file mode 100644
index 000000000..561ab3438
--- /dev/null
+++ b/numpy/corelinalg/__init__.py
@@ -0,0 +1,25 @@
+# To get sub-modules
+from info import __doc__
+
+from linalg import *
+
+# re-define duplicated functions if full scipy installed.
+try:
+ import scipy.linalg
+except ImportError:
+ pass
+else:
+ inv = scipy.linalg.inv
+ svd = scipy.linalg.svd
+ solve = scipy.linalg.solve
+ det = scipy.linalg.det
+ eig = scipy.linalg.eig
+ eigvals = scipy.linalg.eigvals
+ lstsq = scipy.linalg.lstsq
+ pinv = scipy.linalg.pinv
+ cholesky = scipy.linalg.cholesky
+
+
+
+from scipy.testing import ScipyTest
+test = ScipyTest().test
diff --git a/numpy/corelinalg/blas_lite.c b/numpy/corelinalg/blas_lite.c
new file mode 100644
index 000000000..38adc2aeb
--- /dev/null
+++ b/numpy/corelinalg/blas_lite.c
@@ -0,0 +1,10659 @@
+/*
+NOTE: This is generated code. Look in Misc/lapack_lite for information on
+ remaking this file.
+*/
+#include "f2c.h"
+
+#ifdef HAVE_CONFIG
+#include "config.h"
+#else
+extern doublereal dlamch_(char *);
+#define EPSILON dlamch_("Epsilon")
+#define SAFEMINIMUM dlamch_("Safe minimum")
+#define PRECISION dlamch_("Precision")
+#define BASE dlamch_("Base")
+#endif
+
+extern doublereal dlapy2_(doublereal *x, doublereal *y);
+
+
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublecomplex c_b359 = {1.,0.};
+
+/* Subroutine */ int daxpy_(integer *n, doublereal *da, doublereal *dx,
+ integer *incx, doublereal *dy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+
+
+/*
+ constant times a vector plus a vector.
+ uses unrolled loops for increments equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*da == 0.) {
+ return 0;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[iy] += *da * dx[ix];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 4;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[i__] += *da * dx[i__];
+/* L30: */
+ }
+ if (*n < 4) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 4) {
+ dy[i__] += *da * dx[i__];
+ dy[i__ + 1] += *da * dx[i__ + 1];
+ dy[i__ + 2] += *da * dx[i__ + 2];
+ dy[i__ + 3] += *da * dx[i__ + 3];
+/* L50: */
+ }
+ return 0;
+} /* daxpy_ */
+
+doublereal dcabs1_(doublecomplex *z__)
+{
+ /* System generated locals */
+ doublereal ret_val;
+ static doublecomplex equiv_0[1];
+
+ /* Local variables */
+#define t ((doublereal *)equiv_0)
+#define zz (equiv_0)
+
+ zz->r = z__->r, zz->i = z__->i;
+ ret_val = abs(t[0]) + abs(t[1]);
+ return ret_val;
+} /* dcabs1_ */
+
+#undef zz
+#undef t
+
+
+/* Subroutine */ int dcopy_(integer *n, doublereal *dx, integer *incx,
+ doublereal *dy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+
+
+/*
+ copies a vector, x, to a vector, y.
+ uses unrolled loops for increments equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[iy] = dx[ix];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 7;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[i__] = dx[i__];
+/* L30: */
+ }
+ if (*n < 7) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 7) {
+ dy[i__] = dx[i__];
+ dy[i__ + 1] = dx[i__ + 1];
+ dy[i__ + 2] = dx[i__ + 2];
+ dy[i__ + 3] = dx[i__ + 3];
+ dy[i__ + 4] = dx[i__ + 4];
+ dy[i__ + 5] = dx[i__ + 5];
+ dy[i__ + 6] = dx[i__ + 6];
+/* L50: */
+ }
+ return 0;
+} /* dcopy_ */
+
+doublereal ddot_(integer *n, doublereal *dx, integer *incx, doublereal *dy,
+ integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+ static doublereal dtemp;
+
+
+/*
+ forms the dot product of two vectors.
+ uses unrolled loops for increments equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ ret_val = 0.;
+ dtemp = 0.;
+ if (*n <= 0) {
+ return ret_val;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp += dx[ix] * dy[iy];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val = dtemp;
+ return ret_val;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 5;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp += dx[i__] * dy[i__];
+/* L30: */
+ }
+ if (*n < 5) {
+ goto L60;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 5) {
+ dtemp = dtemp + dx[i__] * dy[i__] + dx[i__ + 1] * dy[i__ + 1] + dx[
+ i__ + 2] * dy[i__ + 2] + dx[i__ + 3] * dy[i__ + 3] + dx[i__ +
+ 4] * dy[i__ + 4];
+/* L50: */
+ }
+L60:
+ ret_val = dtemp;
+ return ret_val;
+} /* ddot_ */
+
+/* Subroutine */ int dgemm_(char *transa, char *transb, integer *m, integer *
+ n, integer *k, doublereal *alpha, doublereal *a, integer *lda,
+ doublereal *b, integer *ldb, doublereal *beta, doublereal *c__,
+ integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static logical nota, notb;
+ static doublereal temp;
+ static integer ncola;
+ extern logical lsame_(char *, char *);
+ static integer nrowa, nrowb;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DGEMM performs one of the matrix-matrix operations
+
+ C := alpha*op( A )*op( B ) + beta*C,
+
+ where op( X ) is one of
+
+ op( X ) = X or op( X ) = X',
+
+ alpha and beta are scalars, and A, B and C are matrices, with op( A )
+ an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n', op( A ) = A.
+
+ TRANSA = 'T' or 't', op( A ) = A'.
+
+ TRANSA = 'C' or 'c', op( A ) = A'.
+
+ Unchanged on exit.
+
+ TRANSB - CHARACTER*1.
+ On entry, TRANSB specifies the form of op( B ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSB = 'N' or 'n', op( B ) = B.
+
+ TRANSB = 'T' or 't', op( B ) = B'.
+
+ TRANSB = 'C' or 'c', op( B ) = B'.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix
+ op( A ) and of the matrix C. M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix
+ op( B ) and the number of columns of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry, K specifies the number of columns of the matrix
+ op( A ) and the number of rows of the matrix op( B ). K must
+ be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANSA = 'N' or 'n', and is m otherwise.
+ Before entry with TRANSA = 'N' or 'n', the leading m by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by m part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANSA = 'N' or 'n' then
+ LDA must be at least max( 1, m ), otherwise LDA must be at
+ least max( 1, k ).
+ Unchanged on exit.
+
+ B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
+ n when TRANSB = 'N' or 'n', and is k otherwise.
+ Before entry with TRANSB = 'N' or 'n', the leading k by n
+ part of the array B must contain the matrix B, otherwise
+ the leading n by k part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANSB = 'N' or 'n' then
+ LDB must be at least max( 1, k ), otherwise LDB must be at
+ least max( 1, n ).
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then C need not be set on input.
+ Unchanged on exit.
+
+ C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
+ Before entry, the leading m by n part of the array C must
+ contain the matrix C, except when beta is zero, in which
+ case C need not be set on entry.
+ On exit, the array C is overwritten by the m by n matrix
+ ( alpha*op( A )*op( B ) + beta*C ).
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Set NOTA and NOTB as true if A and B respectively are not
+ transposed and set NROWA, NCOLA and NROWB as the number of rows
+ and columns of A and the number of rows of B respectively.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ nota = lsame_(transa, "N");
+ notb = lsame_(transb, "N");
+ if (nota) {
+ nrowa = *m;
+ ncola = *k;
+ } else {
+ nrowa = *k;
+ ncola = *m;
+ }
+ if (notb) {
+ nrowb = *k;
+ } else {
+ nrowb = *n;
+ }
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (((! nota && ! lsame_(transa, "C")) && ! lsame_(
+ transa, "T"))) {
+ info = 1;
+ } else if (((! notb && ! lsame_(transb, "C")) && !
+ lsame_(transb, "T"))) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < max(1,nrowa)) {
+ info = 8;
+ } else if (*ldb < max(1,nrowb)) {
+ info = 10;
+ } else if (*ldc < max(1,*m)) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("DGEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) {
+ return 0;
+ }
+
+/* And if alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (notb) {
+ if (nota) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L50: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L60: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (b[l + j * b_dim1] != 0.) {
+ temp = *alpha * b[l + j * b_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+/* L100: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ } else {
+ if (nota) {
+
+/* Form C := alpha*A*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L130: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L140: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (b[j + l * b_dim1] != 0.) {
+ temp = *alpha * b[j + l * b_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * b[j + l * b_dim1];
+/* L180: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DGEMM . */
+
+} /* dgemm_ */
+
+/* Subroutine */ int dgemv_(char *trans, integer *m, integer *n, doublereal *
+ alpha, doublereal *a, integer *lda, doublereal *x, integer *incx,
+ doublereal *beta, doublereal *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublereal temp;
+ static integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DGEMV performs one of the matrix-vector operations
+
+ y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y,
+
+ where alpha and beta are scalars, x and y are vectors and A is an
+ m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' y := alpha*A*x + beta*y.
+
+ TRANS = 'T' or 't' y := alpha*A'*x + beta*y.
+
+ TRANS = 'C' or 'c' y := alpha*A'*x + beta*y.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+ X - DOUBLE PRECISION array of DIMENSION at least
+ ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.
+ Before entry, the incremented array X must contain the
+ vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - DOUBLE PRECISION array of DIMENSION at least
+ ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.
+ Before entry with BETA non-zero, the incremented array Y
+ must contain the vector y. On exit, Y is overwritten by the
+ updated vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (((! lsame_(trans, "N") && ! lsame_(trans, "T")) && ! lsame_(trans, "C"))) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*lda < max(1,*m)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("DGEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || (*alpha == 0. && *beta == 1.)) {
+ return 0;
+ }
+
+/*
+ Set LENX and LENY, the lengths of the vectors x and y, and set
+ up the start points in X and Y.
+*/
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+
+ First form y := beta*y.
+*/
+
+ if (*beta != 1.) {
+ if (*incy == 1) {
+ if (*beta == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.) {
+ return 0;
+ }
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ iy = ky;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[iy] += temp * a[i__ + j * a_dim1];
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L100: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DGEMV . */
+
+} /* dgemv_ */
+
+/* Subroutine */ int dger_(integer *m, integer *n, doublereal *alpha,
+ doublereal *x, integer *incx, doublereal *y, integer *incy,
+ doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, jy, kx, info;
+ static doublereal temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DGER performs the rank 1 operation
+
+ A := alpha*x*y' + A,
+
+ where alpha is a scalar, x is an m element vector, y is an n element
+ vector and A is an m by n matrix.
+
+ Parameters
+ ==========
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( m - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the m
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients. On exit, A is
+ overwritten by the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DGER ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || *alpha == 0.) {
+ return 0;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (y[jy] != 0.) {
+ temp = *alpha * y[jy];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[i__] * temp;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (y[jy] != 0.) {
+ temp = *alpha * y[jy];
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[ix] * temp;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of DGER . */
+
+} /* dger_ */
+
+doublereal dnrm2_(integer *n, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer ix;
+ static doublereal ssq, norm, scale, absxi;
+
+
+/*
+ DNRM2 returns the euclidean norm of a vector via the function
+ name, so that
+
+ DNRM2 := sqrt( x'*x )
+
+
+ -- This version written on 25-October-1982.
+ Modified on 14-October-1993 to inline the call to DLASSQ.
+ Sven Hammarling, Nag Ltd.
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n < 1 || *incx < 1) {
+ norm = 0.;
+ } else if (*n == 1) {
+ norm = abs(x[1]);
+ } else {
+ scale = 0.;
+ ssq = 1.;
+/*
+ The following loop is equivalent to this call to the LAPACK
+ auxiliary routine:
+ CALL DLASSQ( N, X, INCX, SCALE, SSQ )
+*/
+
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ if (x[ix] != 0.) {
+ absxi = (d__1 = x[ix], abs(d__1));
+ if (scale < absxi) {
+/* Computing 2nd power */
+ d__1 = scale / absxi;
+ ssq = ssq * (d__1 * d__1) + 1.;
+ scale = absxi;
+ } else {
+/* Computing 2nd power */
+ d__1 = absxi / scale;
+ ssq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ norm = scale * sqrt(ssq);
+ }
+
+ ret_val = norm;
+ return ret_val;
+
+/* End of DNRM2. */
+
+} /* dnrm2_ */
+
+/* Subroutine */ int drot_(integer *n, doublereal *dx, integer *incx,
+ doublereal *dy, integer *incy, doublereal *c__, doublereal *s)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static doublereal dtemp;
+
+
+/*
+ applies a plane rotation.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = *c__ * dx[ix] + *s * dy[iy];
+ dy[iy] = *c__ * dy[iy] - *s * dx[ix];
+ dx[ix] = dtemp;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = *c__ * dx[i__] + *s * dy[i__];
+ dy[i__] = *c__ * dy[i__] - *s * dx[i__];
+ dx[i__] = dtemp;
+/* L30: */
+ }
+ return 0;
+} /* drot_ */
+
+/* Subroutine */ int dscal_(integer *n, doublereal *da, doublereal *dx,
+ integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ static integer i__, m, mp1, nincx;
+
+
+/*
+ scales a vector by a constant.
+ uses unrolled loops for increment equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0 || *incx <= 0) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ dx[i__] = *da * dx[i__];
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for increment equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 5;
+ if (m == 0) {
+ goto L40;
+ }
+ i__2 = m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ dx[i__] = *da * dx[i__];
+/* L30: */
+ }
+ if (*n < 5) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__2 = *n;
+ for (i__ = mp1; i__ <= i__2; i__ += 5) {
+ dx[i__] = *da * dx[i__];
+ dx[i__ + 1] = *da * dx[i__ + 1];
+ dx[i__ + 2] = *da * dx[i__ + 2];
+ dx[i__ + 3] = *da * dx[i__ + 3];
+ dx[i__ + 4] = *da * dx[i__ + 4];
+/* L50: */
+ }
+ return 0;
+} /* dscal_ */
+
+/* Subroutine */ int dswap_(integer *n, doublereal *dx, integer *incx,
+ doublereal *dy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+ static doublereal dtemp;
+
+
+/*
+ interchanges two vectors.
+ uses unrolled loops for increments equal one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = dx[ix];
+ dx[ix] = dy[iy];
+ dy[iy] = dtemp;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 3;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = dx[i__];
+ dx[i__] = dy[i__];
+ dy[i__] = dtemp;
+/* L30: */
+ }
+ if (*n < 3) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 3) {
+ dtemp = dx[i__];
+ dx[i__] = dy[i__];
+ dy[i__] = dtemp;
+ dtemp = dx[i__ + 1];
+ dx[i__ + 1] = dy[i__ + 1];
+ dy[i__ + 1] = dtemp;
+ dtemp = dx[i__ + 2];
+ dx[i__ + 2] = dy[i__ + 2];
+ dy[i__ + 2] = dtemp;
+/* L50: */
+ }
+ return 0;
+} /* dswap_ */
+
+/* Subroutine */ int dsymv_(char *uplo, integer *n, doublereal *alpha,
+ doublereal *a, integer *lda, doublereal *x, integer *incx, doublereal
+ *beta, doublereal *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DSYMV performs the matrix-vector operation
+
+ y := alpha*A*x + beta*y,
+
+ where alpha and beta are scalars, x and y are n element vectors and
+ A is an n by n symmetric matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of A is not referenced.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y. On exit, Y is overwritten by the updated
+ vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("DSYMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (*alpha == 0. && *beta == 1.)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+
+ First form y := beta*y.
+*/
+
+ if (*beta != 1.) {
+ if (*incy == 1) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if ((*incx == 1 && *incy == 1)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[i__];
+/* L50: */
+ }
+ y[j] = y[j] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[iy] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ y[jy] = y[jy] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if ((*incx == 1 && *incy == 1)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.;
+ y[j] += temp1 * a[j + j * a_dim1];
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[j] += *alpha * temp2;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.;
+ y[jy] += temp1 * a[j + j * a_dim1];
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ y[iy] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ y[jy] += *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYMV . */
+
+} /* dsymv_ */
+
+/* Subroutine */ int dsyr2_(char *uplo, integer *n, doublereal *alpha,
+ doublereal *x, integer *incx, doublereal *y, integer *incy,
+ doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DSYR2 performs the symmetric rank 2 operation
+
+ A := alpha*x*y' + alpha*y*x' + A,
+
+ where alpha is a scalar, x and y are n element vectors and A is an n
+ by n symmetric matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of A is not referenced. On exit, the
+ upper triangular part of the array A is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of A is not referenced. On exit, the
+ lower triangular part of the array A is overwritten by the
+ lower triangular part of the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*n)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DSYR2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.) {
+ return 0;
+ }
+
+/*
+ Set up the start points in X and Y if the increments are not both
+ unity.
+*/
+
+ if (*incx != 1 || *incy != 1) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+*/
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in the upper triangle. */
+
+ if ((*incx == 1 && *incy == 1)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0. || y[j] != 0.) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
+ temp1 + y[i__] * temp2;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0. || y[jy] != 0.) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = kx;
+ iy = ky;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
+ temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in the lower triangle. */
+
+ if ((*incx == 1 && *incy == 1)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0. || y[j] != 0.) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
+ temp1 + y[i__] * temp2;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0. || y[jy] != 0.) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
+ temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYR2 . */
+
+} /* dsyr2_ */
+
+/* Subroutine */ int dsyr2k_(char *uplo, char *trans, integer *n, integer *k,
+ doublereal *alpha, doublereal *a, integer *lda, doublereal *b,
+ integer *ldb, doublereal *beta, doublereal *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DSYR2K performs one of the symmetric rank 2k operations
+
+ C := alpha*A*B' + alpha*B*A' + beta*C,
+
+ or
+
+ C := alpha*A'*B + alpha*B'*A + beta*C,
+
+ where alpha and beta are scalars, C is an n by n symmetric matrix
+ and A and B are n by k matrices in the first case and k by n
+ matrices in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' +
+ beta*C.
+
+ TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A +
+ beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*A'*B + alpha*B'*A +
+ beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrices A and B, and on entry with
+ TRANS = 'T' or 't' or 'C' or 'c', K specifies the number
+ of rows of the matrices A and B. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array B must contain the matrix B, otherwise
+ the leading k by n part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDB must be at least max( 1, n ), otherwise LDB must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if (((! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) && ! lsame_(trans, "C"))) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("DSYR2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*B' + alpha*B*A' + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) {
+ temp1 = *alpha * b[j + l * b_dim1];
+ temp2 = *alpha * a[j + l * a_dim1];
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
+ i__ + l * a_dim1] * temp1 + b[i__ + l *
+ b_dim1] * temp2;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) {
+ temp1 = *alpha * b[j + l * b_dim1];
+ temp2 = *alpha * a[j + l * a_dim1];
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
+ i__ + l * a_dim1] * temp1 + b[i__ + l *
+ b_dim1] * temp2;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*B + alpha*B'*A + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1 = 0.;
+ temp2 = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+ temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
+/* L190: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
+ temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + *alpha * temp1 + *alpha * temp2;
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1 = 0.;
+ temp2 = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+ temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
+/* L220: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
+ temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + *alpha * temp1 + *alpha * temp2;
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYR2K. */
+
+} /* dsyr2k_ */
+
+/* Subroutine */ int dsyrk_(char *uplo, char *trans, integer *n, integer *k,
+ doublereal *alpha, doublereal *a, integer *lda, doublereal *beta,
+ doublereal *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static doublereal temp;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DSYRK performs one of the symmetric rank k operations
+
+ C := alpha*A*A' + beta*C,
+
+ or
+
+ C := alpha*A'*A + beta*C,
+
+ where alpha and beta are scalars, C is an n by n symmetric matrix
+ and A is an n by k matrix in the first case and a k by n matrix
+ in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*A' + beta*C.
+
+ TRANS = 'T' or 't' C := alpha*A'*A + beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*A'*A + beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrix A, and on entry with
+ TRANS = 'T' or 't' or 'C' or 'c', K specifies the number
+ of rows of the matrix A. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if (((! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) && ! lsame_(trans, "C"))) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("DSYRK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*A' + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.) {
+ temp = *alpha * a[j + l * a_dim1];
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.) {
+ temp = *alpha * a[j + l * a_dim1];
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
+/* L190: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
+/* L220: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYRK . */
+
+} /* dsyrk_ */
+
+/* Subroutine */ int dtrmm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublereal *alpha, doublereal *a, integer *
+ lda, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static doublereal temp;
+ static logical lside;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ Purpose
+ =======
+
+ DTRMM performs one of the matrix-matrix operations
+
+ B := alpha*op( A )*B, or B := alpha*B*op( A ),
+
+ where alpha is a scalar, B is an m by n matrix, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A'.
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) multiplies B from
+ the left or right as follows:
+
+ SIDE = 'L' or 'l' B := alpha*op( A )*B.
+
+ SIDE = 'R' or 'r' B := alpha*B*op( A ).
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = A'.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - DOUBLE PRECISION array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the matrix B, and on exit is overwritten by the
+ transformed matrix.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if ((! lside && ! lsame_(side, "R"))) {
+ info = 1;
+ } else if ((! upper && ! lsame_(uplo, "L"))) {
+ info = 2;
+ } else if (((! lsame_(transa, "N") && ! lsame_(
+ transa, "T")) && ! lsame_(transa, "C"))) {
+ info = 3;
+ } else if ((! lsame_(diag, "U") && ! lsame_(diag,
+ "N"))) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("DTRMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*A*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (b[k + j * b_dim1] != 0.) {
+ temp = *alpha * b[k + j * b_dim1];
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * a[i__ + k *
+ a_dim1];
+/* L30: */
+ }
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ b[k + j * b_dim1] = temp;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (k = *m; k >= 1; --k) {
+ if (b[k + j * b_dim1] != 0.) {
+ temp = *alpha * b[k + j * b_dim1];
+ b[k + j * b_dim1] = temp;
+ if (nounit) {
+ b[k + j * b_dim1] *= a[k + k * a_dim1];
+ }
+ i__2 = *m;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * a[i__ + k *
+ a_dim1];
+/* L60: */
+ }
+ }
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*A'*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp = b[i__ + j * b_dim1];
+ if (nounit) {
+ temp *= a[i__ + i__ * a_dim1];
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L90: */
+ }
+ b[i__ + j * b_dim1] = *alpha * temp;
+/* L100: */
+ }
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = b[i__ + j * b_dim1];
+ if (nounit) {
+ temp *= a[i__ + i__ * a_dim1];
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L120: */
+ }
+ b[i__ + j * b_dim1] = *alpha * temp;
+/* L130: */
+ }
+/* L140: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*A. */
+
+ if (upper) {
+ for (j = *n; j >= 1; --j) {
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L150: */
+ }
+ i__1 = j - 1;
+ for (k = 1; k <= i__1; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ temp = *alpha * a[k + j * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L190: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ temp = *alpha * a[k + j * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+/* L220: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*A'. */
+
+ if (upper) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k - 1;
+ for (j = 1; j <= i__2; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = *alpha * a[j + k * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ if (temp != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = *alpha * a[j + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ if (temp != 1.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L290: */
+ }
+ }
+/* L300: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRMM . */
+
+} /* dtrmm_ */
+
+/* Subroutine */ int dtrmv_(char *uplo, char *trans, char *diag, integer *n,
+ doublereal *a, integer *lda, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, jx, kx, info;
+ static doublereal temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ Purpose
+ =======
+
+ DTRMV performs one of the matrix-vector operations
+
+ x := A*x, or x := A'*x,
+
+ where x is an n element vector and A is an n by n unit, or non-unit,
+ upper or lower triangular matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' x := A*x.
+
+ TRANS = 'T' or 't' x := A'*x.
+
+ TRANS = 'C' or 'c' x := A'*x.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit
+ triangular as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x. On exit, X is overwritten with the
+ tranformed vector x.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if (((! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) && ! lsame_(trans, "C"))) {
+ info = 2;
+ } else if ((! lsame_(diag, "U") && ! lsame_(diag,
+ "N"))) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("DTRMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/*
+ Set up the start point in X if the increment is not unity. This
+ will be ( N - 1 )*INCX too small for descending loops.
+*/
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ temp = x[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__] += temp * a[i__ + j * a_dim1];
+/* L10: */
+ }
+ if (nounit) {
+ x[j] *= a[j + j * a_dim1];
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[ix] += temp * a[i__ + j * a_dim1];
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ x[jx] *= a[j + j * a_dim1];
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.) {
+ temp = x[j];
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[i__] += temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ if (nounit) {
+ x[j] *= a[j + j * a_dim1];
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[ix] += temp * a[i__ + j * a_dim1];
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ x[jx] *= a[j + j * a_dim1];
+ }
+ }
+ jx -= *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ temp += a[i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ x[jx] = temp;
+ jx -= *incx;
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ temp += a[i__ + j * a_dim1] * x[ix];
+/* L150: */
+ }
+ x[jx] = temp;
+ jx += *incx;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRMV . */
+
+} /* dtrmv_ */
+
+/* Subroutine */ int dtrsm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublereal *alpha, doublereal *a, integer *
+ lda, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static doublereal temp;
+ static logical lside;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ Purpose
+ =======
+
+ DTRSM solves one of the matrix equations
+
+ op( A )*X = alpha*B, or X*op( A ) = alpha*B,
+
+ where alpha is a scalar, X and B are m by n matrices, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A'.
+
+ The matrix X is overwritten on B.
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) appears on the left
+ or right of X as follows:
+
+ SIDE = 'L' or 'l' op( A )*X = alpha*B.
+
+ SIDE = 'R' or 'r' X*op( A ) = alpha*B.
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = A'.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - DOUBLE PRECISION array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the right-hand side matrix B, and on exit is
+ overwritten by the solution matrix X.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if ((! lside && ! lsame_(side, "R"))) {
+ info = 1;
+ } else if ((! upper && ! lsame_(uplo, "L"))) {
+ info = 2;
+ } else if (((! lsame_(transa, "N") && ! lsame_(
+ transa, "T")) && ! lsame_(transa, "C"))) {
+ info = 3;
+ } else if ((! lsame_(diag, "U") && ! lsame_(diag,
+ "N"))) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("DTRSM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*inv( A )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L30: */
+ }
+ }
+ for (k = *m; k >= 1; --k) {
+ if (b[k + j * b_dim1] != 0.) {
+ if (nounit) {
+ b[k + j * b_dim1] /= a[k + k * a_dim1];
+ }
+ i__2 = k - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
+ i__ + k * a_dim1];
+/* L40: */
+ }
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L70: */
+ }
+ }
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (b[k + j * b_dim1] != 0.) {
+ if (nounit) {
+ b[k + j * b_dim1] /= a[k + k * a_dim1];
+ }
+ i__3 = *m;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
+ i__ + k * a_dim1];
+/* L80: */
+ }
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*inv( A' )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = *alpha * b[i__ + j * b_dim1];
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L110: */
+ }
+ if (nounit) {
+ temp /= a[i__ + i__ * a_dim1];
+ }
+ b[i__ + j * b_dim1] = temp;
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp = *alpha * b[i__ + j * b_dim1];
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L140: */
+ }
+ if (nounit) {
+ temp /= a[i__ + i__ * a_dim1];
+ }
+ b[i__ + j * b_dim1] = temp;
+/* L150: */
+ }
+/* L160: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*inv( A ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L170: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
+ i__ + k * b_dim1];
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+ if (nounit) {
+ temp = 1. / a[j + j * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+ } else {
+ for (j = *n; j >= 1; --j) {
+ if (*alpha != 1.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L220: */
+ }
+ }
+ i__1 = *n;
+ for (k = j + 1; k <= i__1; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
+ i__ + k * b_dim1];
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ if (nounit) {
+ temp = 1. / a[j + j * a_dim1];
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*inv( A' ). */
+
+ if (upper) {
+ for (k = *n; k >= 1; --k) {
+ if (nounit) {
+ temp = 1. / a[k + k * a_dim1];
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L270: */
+ }
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = a[j + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= temp * b[i__ + k *
+ b_dim1];
+/* L280: */
+ }
+ }
+/* L290: */
+ }
+ if (*alpha != 1.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
+ ;
+/* L300: */
+ }
+ }
+/* L310: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (nounit) {
+ temp = 1. / a[k + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L320: */
+ }
+ }
+ i__2 = *n;
+ for (j = k + 1; j <= i__2; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = a[j + k * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= temp * b[i__ + k *
+ b_dim1];
+/* L330: */
+ }
+ }
+/* L340: */
+ }
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
+ ;
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRSM . */
+
+} /* dtrsm_ */
+
+doublereal dzasum_(integer *n, doublecomplex *zx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val;
+
+ /* Local variables */
+ static integer i__, ix;
+ static doublereal stemp;
+ extern doublereal dcabs1_(doublecomplex *);
+
+
+/*
+ takes the sum of the absolute values.
+ jack dongarra, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ ret_val = 0.;
+ stemp = 0.;
+ if (*n <= 0 || *incx <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp += dcabs1_(&zx[ix]);
+ ix += *incx;
+/* L10: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp += dcabs1_(&zx[i__]);
+/* L30: */
+ }
+ ret_val = stemp;
+ return ret_val;
+} /* dzasum_ */
+
+doublereal dznrm2_(integer *n, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal ret_val, d__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer ix;
+ static doublereal ssq, temp, norm, scale;
+
+
+/*
+ DZNRM2 returns the euclidean norm of a vector via the function
+ name, so that
+
+ DZNRM2 := sqrt( conjg( x' )*x )
+
+
+ -- This version written on 25-October-1982.
+ Modified on 14-October-1993 to inline the call to ZLASSQ.
+ Sven Hammarling, Nag Ltd.
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n < 1 || *incx < 1) {
+ norm = 0.;
+ } else {
+ scale = 0.;
+ ssq = 1.;
+/*
+ The following loop is equivalent to this call to the LAPACK
+ auxiliary routine:
+ CALL ZLASSQ( N, X, INCX, SCALE, SSQ )
+*/
+
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ i__3 = ix;
+ if (x[i__3].r != 0.) {
+ i__3 = ix;
+ temp = (d__1 = x[i__3].r, abs(d__1));
+ if (scale < temp) {
+/* Computing 2nd power */
+ d__1 = scale / temp;
+ ssq = ssq * (d__1 * d__1) + 1.;
+ scale = temp;
+ } else {
+/* Computing 2nd power */
+ d__1 = temp / scale;
+ ssq += d__1 * d__1;
+ }
+ }
+ if (d_imag(&x[ix]) != 0.) {
+ temp = (d__1 = d_imag(&x[ix]), abs(d__1));
+ if (scale < temp) {
+/* Computing 2nd power */
+ d__1 = scale / temp;
+ ssq = ssq * (d__1 * d__1) + 1.;
+ scale = temp;
+ } else {
+/* Computing 2nd power */
+ d__1 = temp / scale;
+ ssq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ norm = scale * sqrt(ssq);
+ }
+
+ ret_val = norm;
+ return ret_val;
+
+/* End of DZNRM2. */
+
+} /* dznrm2_ */
+
+integer idamax_(integer *n, doublereal *dx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__, ix;
+ static doublereal dmax__;
+
+
+/*
+ finds the index of element having max. absolute value.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dx;
+
+ /* Function Body */
+ ret_val = 0;
+ if (*n < 1 || *incx <= 0) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ dmax__ = abs(dx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if ((d__1 = dx[ix], abs(d__1)) <= dmax__) {
+ goto L5;
+ }
+ ret_val = i__;
+ dmax__ = (d__1 = dx[ix], abs(d__1));
+L5:
+ ix += *incx;
+/* L10: */
+ }
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ dmax__ = abs(dx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if ((d__1 = dx[i__], abs(d__1)) <= dmax__) {
+ goto L30;
+ }
+ ret_val = i__;
+ dmax__ = (d__1 = dx[i__], abs(d__1));
+L30:
+ ;
+ }
+ return ret_val;
+} /* idamax_ */
+
+integer izamax_(integer *n, doublecomplex *zx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+
+ /* Local variables */
+ static integer i__, ix;
+ static doublereal smax;
+ extern doublereal dcabs1_(doublecomplex *);
+
+
+/*
+ finds the index of element having max. absolute value.
+ jack dongarra, 1/15/85.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ ret_val = 0;
+ if (*n < 1 || *incx <= 0) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ smax = dcabs1_(&zx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (dcabs1_(&zx[ix]) <= smax) {
+ goto L5;
+ }
+ ret_val = i__;
+ smax = dcabs1_(&zx[ix]);
+L5:
+ ix += *incx;
+/* L10: */
+ }
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ smax = dcabs1_(&zx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (dcabs1_(&zx[i__]) <= smax) {
+ goto L30;
+ }
+ ret_val = i__;
+ smax = dcabs1_(&zx[i__]);
+L30:
+ ;
+ }
+ return ret_val;
+} /* izamax_ */
+
+logical lsame_(char *ca, char *cb)
+{
+ /* System generated locals */
+ logical ret_val;
+
+ /* Local variables */
+ static integer inta, intb, zcode;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ LSAME returns .TRUE. if CA is the same letter as CB regardless of
+ case.
+
+ Arguments
+ =========
+
+ CA (input) CHARACTER*1
+ CB (input) CHARACTER*1
+ CA and CB specify the single characters to be compared.
+
+ =====================================================================
+
+
+ Test if the characters are equal
+*/
+
+ ret_val = *(unsigned char *)ca == *(unsigned char *)cb;
+ if (ret_val) {
+ return ret_val;
+ }
+
+/* Now test for equivalence if both characters are alphabetic. */
+
+ zcode = 'Z';
+
+/*
+ Use 'Z' rather than 'A' so that ASCII can be detected on Prime
+ machines, on which ICHAR returns a value with bit 8 set.
+ ICHAR('A') on Prime machines returns 193 which is the same as
+ ICHAR('A') on an EBCDIC machine.
+*/
+
+ inta = *(unsigned char *)ca;
+ intb = *(unsigned char *)cb;
+
+ if (zcode == 90 || zcode == 122) {
+
+/*
+ ASCII is assumed - ZCODE is the ASCII code of either lower or
+ upper case 'Z'.
+*/
+
+ if ((inta >= 97 && inta <= 122)) {
+ inta += -32;
+ }
+ if ((intb >= 97 && intb <= 122)) {
+ intb += -32;
+ }
+
+ } else if (zcode == 233 || zcode == 169) {
+
+/*
+ EBCDIC is assumed - ZCODE is the EBCDIC code of either lower or
+ upper case 'Z'.
+*/
+
+ if ((inta >= 129 && inta <= 137) || (inta >= 145 && inta <= 153) || (
+ inta >= 162 && inta <= 169)) {
+ inta += 64;
+ }
+ if ((intb >= 129 && intb <= 137) || (intb >= 145 && intb <= 153) || (
+ intb >= 162 && intb <= 169)) {
+ intb += 64;
+ }
+
+ } else if (zcode == 218 || zcode == 250) {
+
+/*
+ ASCII is assumed, on Prime machines - ZCODE is the ASCII code
+ plus 128 of either lower or upper case 'Z'.
+*/
+
+ if ((inta >= 225 && inta <= 250)) {
+ inta += -32;
+ }
+ if ((intb >= 225 && intb <= 250)) {
+ intb += -32;
+ }
+ }
+ ret_val = inta == intb;
+
+/*
+ RETURN
+
+ End of LSAME
+*/
+
+ return ret_val;
+} /* lsame_ */
+
+/* Subroutine */ int xerbla_(char *srname, integer *info)
+{
+ /* Format strings */
+ static char fmt_9999[] = "(\002 ** On entry to \002,a6,\002 parameter nu"
+ "mber \002,i2,\002 had \002,\002an illegal value\002)";
+
+ /* Builtin functions */
+ integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void);
+ /* Subroutine */ int s_stop(char *, ftnlen);
+
+ /* Fortran I/O blocks */
+ static cilist io___147 = { 0, 6, 0, fmt_9999, 0 };
+
+
+/*
+ -- LAPACK auxiliary routine (preliminary version) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ XERBLA is an error handler for the LAPACK routines.
+ It is called by an LAPACK routine if an input parameter has an
+ invalid value. A message is printed and execution stops.
+
+ Installers may consider modifying the STOP statement in order to
+ call system-specific exception-handling facilities.
+
+ Arguments
+ =========
+
+ SRNAME (input) CHARACTER*6
+ The name of the routine which called XERBLA.
+
+ INFO (input) INTEGER
+ The position of the invalid parameter in the parameter list
+ of the calling routine.
+*/
+
+
+ s_wsfe(&io___147);
+ do_fio(&c__1, srname, (ftnlen)6);
+ do_fio(&c__1, (char *)&(*info), (ftnlen)sizeof(integer));
+ e_wsfe();
+
+ s_stop("", (ftnlen)0);
+
+
+/* End of XERBLA */
+
+ return 0;
+} /* xerbla_ */
+
+/* Subroutine */ int zaxpy_(integer *n, doublecomplex *za, doublecomplex *zx,
+ integer *incx, doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ extern doublereal dcabs1_(doublecomplex *);
+
+
+/*
+ constant times a vector plus a vector.
+ jack dongarra, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (dcabs1_(za) == 0.) {
+ return 0;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ i__4 = ix;
+ z__2.r = za->r * zx[i__4].r - za->i * zx[i__4].i, z__2.i = za->r * zx[
+ i__4].i + za->i * zx[i__4].r;
+ z__1.r = zy[i__3].r + z__2.r, z__1.i = zy[i__3].i + z__2.i;
+ zy[i__2].r = z__1.r, zy[i__2].i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ z__2.r = za->r * zx[i__4].r - za->i * zx[i__4].i, z__2.i = za->r * zx[
+ i__4].i + za->i * zx[i__4].r;
+ z__1.r = zy[i__3].r + z__2.r, z__1.i = zy[i__3].i + z__2.i;
+ zy[i__2].r = z__1.r, zy[i__2].i = z__1.i;
+/* L30: */
+ }
+ return 0;
+} /* zaxpy_ */
+
+/* Subroutine */ int zcopy_(integer *n, doublecomplex *zx, integer *incx,
+ doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+
+
+/*
+ copies a vector, x, to a vector, y.
+ jack dongarra, linpack, 4/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = ix;
+ zy[i__2].r = zx[i__3].r, zy[i__2].i = zx[i__3].i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ zy[i__2].r = zx[i__3].r, zy[i__2].i = zx[i__3].i;
+/* L30: */
+ }
+ return 0;
+} /* zcopy_ */
+
+/* Double Complex */ VOID zdotc_(doublecomplex * ret_val, integer *n,
+ doublecomplex *zx, integer *incx, doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static doublecomplex ztemp;
+
+
+/*
+ forms the dot product of a vector.
+ jack dongarra, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ ztemp.r = 0., ztemp.i = 0.;
+ ret_val->r = 0., ret_val->i = 0.;
+ if (*n <= 0) {
+ return ;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d_cnjg(&z__3, &zx[ix]);
+ i__2 = iy;
+ z__2.r = z__3.r * zy[i__2].r - z__3.i * zy[i__2].i, z__2.i = z__3.r *
+ zy[i__2].i + z__3.i * zy[i__2].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d_cnjg(&z__3, &zx[i__]);
+ i__2 = i__;
+ z__2.r = z__3.r * zy[i__2].r - z__3.i * zy[i__2].i, z__2.i = z__3.r *
+ zy[i__2].i + z__3.i * zy[i__2].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+/* L30: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+} /* zdotc_ */
+
+/* Double Complex */ VOID zdotu_(doublecomplex * ret_val, integer *n,
+ doublecomplex *zx, integer *incx, doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static doublecomplex ztemp;
+
+
+/*
+ forms the dot product of two vectors.
+ jack dongarra, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ ztemp.r = 0., ztemp.i = 0.;
+ ret_val->r = 0., ret_val->i = 0.;
+ if (*n <= 0) {
+ return ;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ i__3 = iy;
+ z__2.r = zx[i__2].r * zy[i__3].r - zx[i__2].i * zy[i__3].i, z__2.i =
+ zx[i__2].r * zy[i__3].i + zx[i__2].i * zy[i__3].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__2.r = zx[i__2].r * zy[i__3].r - zx[i__2].i * zy[i__3].i, z__2.i =
+ zx[i__2].r * zy[i__3].i + zx[i__2].i * zy[i__3].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+/* L30: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+} /* zdotu_ */
+
+/* Subroutine */ int zdscal_(integer *n, doublereal *da, doublecomplex *zx,
+ integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ static integer i__, ix;
+
+
+/*
+ scales a vector by a constant.
+ jack dongarra, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0 || *incx <= 0) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ z__2.r = *da, z__2.i = 0.;
+ i__3 = ix;
+ z__1.r = z__2.r * zx[i__3].r - z__2.i * zx[i__3].i, z__1.i = z__2.r *
+ zx[i__3].i + z__2.i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+ ix += *incx;
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ z__2.r = *da, z__2.i = 0.;
+ i__3 = i__;
+ z__1.r = z__2.r * zx[i__3].r - z__2.i * zx[i__3].i, z__1.i = z__2.r *
+ zx[i__3].i + z__2.i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+/* L30: */
+ }
+ return 0;
+} /* zdscal_ */
+
+/* Subroutine */ int zgemm_(char *transa, char *transb, integer *m, integer *
+ n, integer *k, doublecomplex *alpha, doublecomplex *a, integer *lda,
+ doublecomplex *b, integer *ldb, doublecomplex *beta, doublecomplex *
+ c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static logical nota, notb;
+ static doublecomplex temp;
+ static logical conja, conjb;
+ static integer ncola;
+ extern logical lsame_(char *, char *);
+ static integer nrowa, nrowb;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZGEMM performs one of the matrix-matrix operations
+
+ C := alpha*op( A )*op( B ) + beta*C,
+
+ where op( X ) is one of
+
+ op( X ) = X or op( X ) = X' or op( X ) = conjg( X' ),
+
+ alpha and beta are scalars, and A, B and C are matrices, with op( A )
+ an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n', op( A ) = A.
+
+ TRANSA = 'T' or 't', op( A ) = A'.
+
+ TRANSA = 'C' or 'c', op( A ) = conjg( A' ).
+
+ Unchanged on exit.
+
+ TRANSB - CHARACTER*1.
+ On entry, TRANSB specifies the form of op( B ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSB = 'N' or 'n', op( B ) = B.
+
+ TRANSB = 'T' or 't', op( B ) = B'.
+
+ TRANSB = 'C' or 'c', op( B ) = conjg( B' ).
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix
+ op( A ) and of the matrix C. M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix
+ op( B ) and the number of columns of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry, K specifies the number of columns of the matrix
+ op( A ) and the number of rows of the matrix op( B ). K must
+ be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANSA = 'N' or 'n', and is m otherwise.
+ Before entry with TRANSA = 'N' or 'n', the leading m by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by m part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANSA = 'N' or 'n' then
+ LDA must be at least max( 1, m ), otherwise LDA must be at
+ least max( 1, k ).
+ Unchanged on exit.
+
+ B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is
+ n when TRANSB = 'N' or 'n', and is k otherwise.
+ Before entry with TRANSB = 'N' or 'n', the leading k by n
+ part of the array B must contain the matrix B, otherwise
+ the leading n by k part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANSB = 'N' or 'n' then
+ LDB must be at least max( 1, k ), otherwise LDB must be at
+ least max( 1, n ).
+ Unchanged on exit.
+
+ BETA - COMPLEX*16 .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then C need not be set on input.
+ Unchanged on exit.
+
+ C - COMPLEX*16 array of DIMENSION ( LDC, n ).
+ Before entry, the leading m by n part of the array C must
+ contain the matrix C, except when beta is zero, in which
+ case C need not be set on entry.
+ On exit, the array C is overwritten by the m by n matrix
+ ( alpha*op( A )*op( B ) + beta*C ).
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Set NOTA and NOTB as true if A and B respectively are not
+ conjugated or transposed, set CONJA and CONJB as true if A and
+ B respectively are to be transposed but not conjugated and set
+ NROWA, NCOLA and NROWB as the number of rows and columns of A
+ and the number of rows of B respectively.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ nota = lsame_(transa, "N");
+ notb = lsame_(transb, "N");
+ conja = lsame_(transa, "C");
+ conjb = lsame_(transb, "C");
+ if (nota) {
+ nrowa = *m;
+ ncola = *k;
+ } else {
+ nrowa = *k;
+ ncola = *m;
+ }
+ if (notb) {
+ nrowb = *k;
+ } else {
+ nrowb = *n;
+ }
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (((! nota && ! conja) && ! lsame_(transa, "T")))
+ {
+ info = 1;
+ } else if (((! notb && ! conjb) && ! lsame_(transb, "T"))) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < max(1,nrowa)) {
+ info = 8;
+ } else if (*ldb < max(1,nrowb)) {
+ info = 10;
+ } else if (*ldc < max(1,*m)) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("ZGEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || (((alpha->r == 0. && alpha->i == 0.) || *k == 0)
+ && ((beta->r == 1. && beta->i == 0.)))) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if ((alpha->r == 0. && alpha->i == 0.)) {
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ z__1.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (notb) {
+ if (nota) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+ } else if (beta->r != 1. || beta->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L60: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * b_dim1;
+ if (b[i__3].r != 0. || b[i__3].i != 0.) {
+ i__3 = l + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ } else if (conja) {
+
+/* Form C := alpha*conjg( A' )*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L130: */
+ }
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L140: */
+ }
+/* L150: */
+ }
+ }
+ } else if (nota) {
+ if (conjb) {
+
+/* Form C := alpha*A*conjg( B' ) + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L160: */
+ }
+ } else if (beta->r != 1. || beta->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L170: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * b_dim1;
+ if (b[i__3].r != 0. || b[i__3].i != 0.) {
+ d_cnjg(&z__2, &b[j + l * b_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
+ z__1.i = alpha->r * z__2.i + alpha->i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ } else {
+
+/* Form C := alpha*A*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L210: */
+ }
+ } else if (beta->r != 1. || beta->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L220: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * b_dim1;
+ if (b[i__3].r != 0. || b[i__3].i != 0.) {
+ i__3 = j + l * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+/* L250: */
+ }
+ }
+ } else if (conja) {
+ if (conjb) {
+
+/* Form C := alpha*conjg( A' )*conjg( B' ) + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ d_cnjg(&z__4, &b[j + l * b_dim1]);
+ z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i =
+ z__3.r * z__4.i + z__3.i * z__4.r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L260: */
+ }
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L270: */
+ }
+/* L280: */
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = j + l * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L290: */
+ }
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L300: */
+ }
+/* L310: */
+ }
+ }
+ } else {
+ if (conjb) {
+
+/* Form C := alpha*A'*conjg( B' ) + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ d_cnjg(&z__3, &b[j + l * b_dim1]);
+ z__2.r = a[i__4].r * z__3.r - a[i__4].i * z__3.i,
+ z__2.i = a[i__4].r * z__3.i + a[i__4].i *
+ z__3.r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L320: */
+ }
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L330: */
+ }
+/* L340: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = j + l * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L350: */
+ }
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L360: */
+ }
+/* L370: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZGEMM . */
+
+} /* zgemm_ */
+
+/* Subroutine */ int zgemv_(char *trans, integer *m, integer *n,
+ doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
+ x, integer *incx, doublecomplex *beta, doublecomplex *y, integer *
+ incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublecomplex temp;
+ static integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj;
+
+
+/*
+ Purpose
+ =======
+
+ ZGEMV performs one of the matrix-vector operations
+
+ y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, or
+
+ y := alpha*conjg( A' )*x + beta*y,
+
+ where alpha and beta are scalars, x and y are vectors and A is an
+ m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' y := alpha*A*x + beta*y.
+
+ TRANS = 'T' or 't' y := alpha*A'*x + beta*y.
+
+ TRANS = 'C' or 'c' y := alpha*conjg( A' )*x + beta*y.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of DIMENSION at least
+ ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.
+ Before entry, the incremented array X must contain the
+ vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - COMPLEX*16 .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - COMPLEX*16 array of DIMENSION at least
+ ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.
+ Before entry with BETA non-zero, the incremented array Y
+ must contain the vector y. On exit, Y is overwritten by the
+ updated vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (((! lsame_(trans, "N") && ! lsame_(trans, "T")) && ! lsame_(trans, "C"))) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*lda < max(1,*m)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("ZGEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || ((alpha->r == 0. && alpha->i == 0.) && ((
+ beta->r == 1. && beta->i == 0.)))) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+
+/*
+ Set LENX and LENY, the lengths of the vectors x and y, and set
+ up the start points in X and Y.
+*/
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+
+ First form y := beta*y.
+*/
+
+ if (beta->r != 1. || beta->i != 0.) {
+ if (*incy == 1) {
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if ((alpha->r == 0. && alpha->i == 0.)) {
+ return 0;
+ }
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i +
+ z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ iy = ky;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i +
+ z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y or y := alpha*conjg( A' )*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0., temp.i = 0.;
+ if (noconj) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jy += *incy;
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0., temp.i = 0.;
+ ix = kx;
+ if (noconj) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jy += *incy;
+/* L140: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZGEMV . */
+
+} /* zgemv_ */
+
+/* Subroutine */ int zgerc_(integer *m, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
+ doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, jy, kx, info;
+ static doublecomplex temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZGERC performs the rank 1 operation
+
+ A := alpha*x*conjg( y' ) + A,
+
+ where alpha is a scalar, x is an m element vector, y is an n element
+ vector and A is an m by n matrix.
+
+ Parameters
+ ==========
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( m - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the m
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients. On exit, A is
+ overwritten by the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZGERC ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || (alpha->r == 0. && alpha->i == 0.)) {
+ return 0;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0. || y[i__2].i != 0.) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0. || y[i__2].i != 0.) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of ZGERC . */
+
+} /* zgerc_ */
+
+/* Subroutine */ int zgeru_(integer *m, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
+ doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ static integer i__, j, ix, jy, kx, info;
+ static doublecomplex temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZGERU performs the rank 1 operation
+
+ A := alpha*x*y' + A,
+
+ where alpha is a scalar, x is an m element vector, y is an n element
+ vector and A is an m by n matrix.
+
+ Parameters
+ ==========
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( m - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the m
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients. On exit, A is
+ overwritten by the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZGERU ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || (alpha->r == 0. && alpha->i == 0.)) {
+ return 0;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0. || y[i__2].i != 0.) {
+ i__2 = jy;
+ z__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, z__1.i =
+ alpha->r * y[i__2].i + alpha->i * y[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0. || y[i__2].i != 0.) {
+ i__2 = jy;
+ z__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, z__1.i =
+ alpha->r * y[i__2].i + alpha->i * y[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of ZGERU . */
+
+} /* zgeru_ */
+
+/* Subroutine */ int zhemv_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *a, integer *lda, doublecomplex *x, integer *incx,
+ doublecomplex *beta, doublecomplex *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZHEMV performs the matrix-vector operation
+
+ y := alpha*A*x + beta*y,
+
+ where alpha and beta are scalars, x and y are n element vectors and
+ A is an n by n hermitian matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of A is not referenced.
+ Note that the imaginary parts of the diagonal elements need
+ not be set and are assumed to be zero.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - COMPLEX*16 .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y. On exit, Y is overwritten by the updated
+ vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("ZHEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || ((alpha->r == 0. && alpha->i == 0.) && ((beta->r == 1. &&
+ beta->i == 0.)))) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+
+ First form y := beta*y.
+*/
+
+ if (beta->r != 1. || beta->i != 0.) {
+ if (*incy == 1) {
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if ((beta->r == 0. && beta->i == 0.)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if ((alpha->r == 0. && alpha->i == 0.)) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if ((*incx == 1 && *incy == 1)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if ((*incx == 1 && *incy == 1)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHEMV . */
+
+} /* zhemv_ */
+
+/* Subroutine */ int zher2_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
+ doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZHER2 performs the hermitian rank 2 operation
+
+ A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A,
+
+ where alpha is a scalar, x and y are n element vectors and A is an n
+ by n hermitian matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of A is not referenced. On exit, the
+ upper triangular part of the array A is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of A is not referenced. On exit, the
+ lower triangular part of the array A is overwritten by the
+ lower triangular part of the updated matrix.
+ Note that the imaginary parts of the diagonal elements need
+ not be set, they are assumed to be zero, and on exit they
+ are set to zero.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*n)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZHER2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (alpha->r == 0. && alpha->i == 0.)) {
+ return 0;
+ }
+
+/*
+ Set up the start points in X and Y if the increments are not both
+ unity.
+*/
+
+ if (*incx != 1 || *incy != 1) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+*/
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in the upper triangle. */
+
+ if ((*incx == 1 && *incy == 1)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[j]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = j;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = i__;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = jx;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = iy;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ jx += *incx;
+ jy += *incy;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in the lower triangle. */
+
+ if ((*incx == 1 && *incy == 1)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[j]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = j;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = i__;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L50: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = jx;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = iy;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHER2 . */
+
+} /* zher2_ */
+
+/* Subroutine */ int zher2k_(char *uplo, char *trans, integer *n, integer *k,
+ doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
+ b, integer *ldb, doublereal *beta, doublecomplex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6, i__7;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZHER2K performs one of the hermitian rank 2k operations
+
+ C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) + beta*C,
+
+ or
+
+ C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A + beta*C,
+
+ where alpha and beta are scalars with beta real, C is an n by n
+ hermitian matrix and A and B are n by k matrices in the first case
+ and k by n matrices in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*conjg( B' ) +
+ conjg( alpha )*B*conjg( A' ) +
+ beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*conjg( A' )*B +
+ conjg( alpha )*conjg( B' )*A +
+ beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrices A and B, and on entry with
+ TRANS = 'C' or 'c', K specifies the number of rows of the
+ matrices A and B. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array B must contain the matrix B, otherwise
+ the leading k by n part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDB must be at least max( 1, n ), otherwise LDB must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION .
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - COMPLEX*16 array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+ Note that the imaginary parts of the diagonal elements need
+ not be set, they are assumed to be zero, and on exit they
+ are set to zero.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+ -- Modified 8-Nov-93 to set C(J,J) to DBLE( C(J,J) ) when BETA = 1.
+ Ed Anderson, Cray Research Inc.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if ((! lsame_(trans, "N") && ! lsame_(trans,
+ "C"))) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("ZHER2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (((alpha->r == 0. && alpha->i == 0.) || *k == 0) && *beta
+ == 1.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if ((alpha->r == 0. && alpha->i == 0.)) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/*
+ Form C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) +
+ C.
+*/
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L100: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r !=
+ 0. || b[i__4].i != 0.)) {
+ d_cnjg(&z__2, &b[j + l * b_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
+ z__1.i = alpha->r * z__2.i + alpha->i *
+ z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__3 = j + l * a_dim1;
+ z__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__2.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, z__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5]
+ .i + z__3.i;
+ i__7 = i__ + l * b_dim1;
+ z__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, z__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
+ z__4.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L110: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ z__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
+ z__2.i = a[i__5].r * temp1.i + a[i__5].i *
+ temp1.r;
+ i__6 = j + l * b_dim1;
+ z__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
+ z__3.i = b[i__6].r * temp2.i + b[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L150: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r !=
+ 0. || b[i__4].i != 0.)) {
+ d_cnjg(&z__2, &b[j + l * b_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
+ z__1.i = alpha->r * z__2.i + alpha->i *
+ z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__3 = j + l * a_dim1;
+ z__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__2.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, z__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5]
+ .i + z__3.i;
+ i__7 = i__ + l * b_dim1;
+ z__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, z__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
+ z__4.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L160: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ z__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
+ z__2.i = a[i__5].r * temp1.i + a[i__5].i *
+ temp1.r;
+ i__6 = j + l * b_dim1;
+ z__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
+ z__3.i = b[i__6].r * temp2.i + b[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/*
+ Form C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A +
+ C.
+*/
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1.r = 0., temp1.i = 0.;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ d_cnjg(&z__3, &b[l + i__ * b_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L190: */
+ }
+ if (i__ == j) {
+ if (*beta == 0.) {
+ i__3 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ } else {
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = *beta * c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+ } else {
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta *
+ c__[i__4].i;
+ z__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i +
+ z__4.i;
+ d_cnjg(&z__6, alpha);
+ z__5.r = z__6.r * temp2.r - z__6.i * temp2.i,
+ z__5.i = z__6.r * temp2.i + z__6.i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i +
+ z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1.r = 0., temp1.i = 0.;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ d_cnjg(&z__3, &b[l + i__ * b_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L220: */
+ }
+ if (i__ == j) {
+ if (*beta == 0.) {
+ i__3 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ } else {
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = *beta * c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+ } else {
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta *
+ c__[i__4].i;
+ z__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i +
+ z__4.i;
+ d_cnjg(&z__6, alpha);
+ z__5.r = z__6.r * temp2.r - z__6.i * temp2.i,
+ z__5.i = z__6.r * temp2.i + z__6.i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i +
+ z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHER2K. */
+
+} /* zher2k_ */
+
+/* Subroutine */ int zherk_(char *uplo, char *trans, integer *n, integer *k,
+ doublereal *alpha, doublecomplex *a, integer *lda, doublereal *beta,
+ doublecomplex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static doublereal rtemp;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZHERK performs one of the hermitian rank k operations
+
+ C := alpha*A*conjg( A' ) + beta*C,
+
+ or
+
+ C := alpha*conjg( A' )*A + beta*C,
+
+ where alpha and beta are real scalars, C is an n by n hermitian
+ matrix and A is an n by k matrix in the first case and a k by n
+ matrix in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*conjg( A' ) + beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*conjg( A' )*A + beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrix A, and on entry with
+ TRANS = 'C' or 'c', K specifies the number of rows of the
+ matrix A. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - COMPLEX*16 array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+ Note that the imaginary parts of the diagonal elements need
+ not be set, they are assumed to be zero, and on exit they
+ are set to zero.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+ -- Modified 8-Nov-93 to set C(J,J) to DBLE( C(J,J) ) when BETA = 1.
+ Ed Anderson, Cray Research Inc.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if ((! lsame_(trans, "N") && ! lsame_(trans,
+ "C"))) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("ZHERK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*conjg( A' ) + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L100: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ d_cnjg(&z__2, &a[j + l * a_dim1]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L110: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = i__ + l * a_dim1;
+ z__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L150: */
+ }
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ d_cnjg(&z__2, &a[j + l * a_dim1]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ z__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L190: */
+ }
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = *alpha * temp.r, z__1.i = *alpha * temp.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = *alpha * temp.r, z__2.i = *alpha * temp.i;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta * c__[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L200: */
+ }
+ rtemp = 0.;
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ d_cnjg(&z__3, &a[l + j * a_dim1]);
+ i__3 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__3].r - z__3.i * a[i__3].i, z__2.i =
+ z__3.r * a[i__3].i + z__3.i * a[i__3].r;
+ z__1.r = rtemp + z__2.r, z__1.i = z__2.i;
+ rtemp = z__1.r;
+/* L210: */
+ }
+ if (*beta == 0.) {
+ i__2 = j + j * c_dim1;
+ d__1 = *alpha * rtemp;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *alpha * rtemp + *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+/* L220: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ rtemp = 0.;
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ d_cnjg(&z__3, &a[l + j * a_dim1]);
+ i__3 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__3].r - z__3.i * a[i__3].i, z__2.i =
+ z__3.r * a[i__3].i + z__3.i * a[i__3].r;
+ z__1.r = rtemp + z__2.r, z__1.i = z__2.i;
+ rtemp = z__1.r;
+/* L230: */
+ }
+ if (*beta == 0.) {
+ i__2 = j + j * c_dim1;
+ d__1 = *alpha * rtemp;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *alpha * rtemp + *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L240: */
+ }
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = *alpha * temp.r, z__1.i = *alpha * temp.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = *alpha * temp.r, z__2.i = *alpha * temp.i;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta * c__[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHERK . */
+
+} /* zherk_ */
+
+/* Subroutine */ int zscal_(integer *n, doublecomplex *za, doublecomplex *zx,
+ integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__, ix;
+
+
+/*
+ scales a vector by a constant.
+ jack dongarra, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0 || *incx <= 0) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ i__3 = ix;
+ z__1.r = za->r * zx[i__3].r - za->i * zx[i__3].i, z__1.i = za->r * zx[
+ i__3].i + za->i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+ ix += *incx;
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = za->r * zx[i__3].r - za->i * zx[i__3].i, z__1.i = za->r * zx[
+ i__3].i + za->i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+/* L30: */
+ }
+ return 0;
+} /* zscal_ */
+
+/* Subroutine */ int zswap_(integer *n, doublecomplex *zx, integer *incx,
+ doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static doublecomplex ztemp;
+
+
+/*
+ interchanges two vectors.
+ jack dongarra, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ ztemp.r = zx[i__2].r, ztemp.i = zx[i__2].i;
+ i__2 = ix;
+ i__3 = iy;
+ zx[i__2].r = zy[i__3].r, zx[i__2].i = zy[i__3].i;
+ i__2 = iy;
+ zy[i__2].r = ztemp.r, zy[i__2].i = ztemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ ztemp.r = zx[i__2].r, ztemp.i = zx[i__2].i;
+ i__2 = i__;
+ i__3 = i__;
+ zx[i__2].r = zy[i__3].r, zx[i__2].i = zy[i__3].i;
+ i__2 = i__;
+ zy[i__2].r = ztemp.r, zy[i__2].i = ztemp.i;
+/* L30: */
+ }
+ return 0;
+} /* zswap_ */
+
+/* Subroutine */ int ztrmm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublecomplex *alpha, doublecomplex *a,
+ integer *lda, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static doublecomplex temp;
+ static logical lside;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ ZTRMM performs one of the matrix-matrix operations
+
+ B := alpha*op( A )*B, or B := alpha*B*op( A )
+
+ where alpha is a scalar, B is an m by n matrix, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ).
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) multiplies B from
+ the left or right as follows:
+
+ SIDE = 'L' or 'l' B := alpha*op( A )*B.
+
+ SIDE = 'R' or 'r' B := alpha*B*op( A ).
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = conjg( A' ).
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - COMPLEX*16 array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the matrix B, and on exit is overwritten by the
+ transformed matrix.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ noconj = lsame_(transa, "T");
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if ((! lside && ! lsame_(side, "R"))) {
+ info = 1;
+ } else if ((! upper && ! lsame_(uplo, "L"))) {
+ info = 2;
+ } else if (((! lsame_(transa, "N") && ! lsame_(
+ transa, "T")) && ! lsame_(transa, "C"))) {
+ info = 3;
+ } else if ((! lsame_(diag, "U") && ! lsame_(diag,
+ "N"))) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("ZTRMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if ((alpha->r == 0. && alpha->i == 0.)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*A*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * b_dim1;
+ if (b[i__3].r != 0. || b[i__3].i != 0.) {
+ i__3 = k + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, z__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6]
+ .i, z__2.i = temp.r * a[i__6].i +
+ temp.i * a[i__6].r;
+ z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
+ .i + z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L30: */
+ }
+ if (nounit) {
+ i__3 = k + k * a_dim1;
+ z__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
+ .i, z__1.i = temp.r * a[i__3].i +
+ temp.i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = k + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (k = *m; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ if (b[i__2].r != 0. || b[i__2].i != 0.) {
+ i__2 = k + j * b_dim1;
+ z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2]
+ .i, z__1.i = alpha->r * b[i__2].i +
+ alpha->i * b[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = k + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+ if (nounit) {
+ i__2 = k + j * b_dim1;
+ i__3 = k + j * b_dim1;
+ i__4 = k + k * a_dim1;
+ z__1.r = b[i__3].r * a[i__4].r - b[i__3].i *
+ a[i__4].i, z__1.i = b[i__3].r * a[
+ i__4].i + b[i__3].i * a[i__4].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5]
+ .i, z__2.i = temp.r * a[i__5].i +
+ temp.i * a[i__5].r;
+ z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
+ .i + z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L60: */
+ }
+ }
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*A'*B or B := alpha*conjg( A' )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ if (noconj) {
+ if (nounit) {
+ i__2 = i__ + i__ * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2]
+ .i, z__1.i = temp.r * a[i__2].i +
+ temp.i * a[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * a_dim1;
+ i__4 = k + j * b_dim1;
+ z__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
+ b[i__4].i, z__2.i = a[i__3].r * b[
+ i__4].i + a[i__3].i * b[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__3 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3]
+ .i, z__2.i = z__3.r * b[i__3].i +
+ z__3.i * b[i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__2 = i__ + j * b_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ if (noconj) {
+ if (nounit) {
+ i__3 = i__ + i__ * a_dim1;
+ z__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
+ .i, z__1.i = temp.r * a[i__3].i +
+ temp.i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__4 = k + i__ * a_dim1;
+ i__5 = k + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
+ b[i__5].i, z__2.i = a[i__4].r * b[
+ i__5].i + a[i__4].i * b[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L130: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__4 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4]
+ .i, z__2.i = z__3.r * b[i__4].i +
+ z__3.i * b[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L140: */
+ }
+ }
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*A. */
+
+ if (upper) {
+ for (j = *n; j >= 1; --j) {
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[i__1]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[i__3]
+ .r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L170: */
+ }
+ i__1 = j - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + j * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ i__2 = k + j * a_dim1;
+ z__1.r = alpha->r * a[i__2].r - alpha->i * a[i__2]
+ .i, z__1.i = alpha->r * a[i__2].i +
+ alpha->i * a[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, z__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
+ .i + z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[i__4]
+ .r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L210: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ i__3 = k + j * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ i__3 = k + j * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3]
+ .i, z__1.i = alpha->r * a[i__3].i +
+ alpha->i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, z__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
+ .i + z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L220: */
+ }
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*A' or B := alpha*B*conjg( A' ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + k * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ if (noconj) {
+ i__3 = j + k * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[
+ i__3].i, z__1.i = alpha->r * a[i__3]
+ .i + alpha->i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[j + k * a_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i *
+ z__2.i, z__1.i = alpha->r * z__2.i +
+ alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, z__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
+ .i + z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ if (noconj) {
+ i__2 = k + k * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ if (temp.r != 1. || temp.i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ if (noconj) {
+ i__2 = j + k * a_dim1;
+ z__1.r = alpha->r * a[i__2].r - alpha->i * a[
+ i__2].i, z__1.i = alpha->r * a[i__2]
+ .i + alpha->i * a[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[j + k * a_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i *
+ z__2.i, z__1.i = alpha->r * z__2.i +
+ alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, z__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
+ .i + z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L290: */
+ }
+ }
+/* L300: */
+ }
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ if (noconj) {
+ i__1 = k + k * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ if (temp.r != 1. || temp.i != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L310: */
+ }
+ }
+/* L320: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRMM . */
+
+} /* ztrmm_ */
+
+/* Subroutine */ int ztrmv_(char *uplo, char *trans, char *diag, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, jx, kx, info;
+ static doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ ZTRMV performs one of the matrix-vector operations
+
+ x := A*x, or x := A'*x, or x := conjg( A' )*x,
+
+ where x is an n element vector and A is an n by n unit, or non-unit,
+ upper or lower triangular matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' x := A*x.
+
+ TRANS = 'T' or 't' x := A'*x.
+
+ TRANS = 'C' or 'c' x := conjg( A' )*x.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit
+ triangular as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x. On exit, X is overwritten with the
+ tranformed vector x.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if (((! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) && ! lsame_(trans, "C"))) {
+ info = 2;
+ } else if ((! lsame_(diag, "U") && ! lsame_(diag,
+ "N"))) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("ZTRMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/*
+ Set up the start point in X if the increment is not unity. This
+ will be ( N - 1 )*INCX too small for descending loops.
+*/
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i +
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L10: */
+ }
+ if (nounit) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
+ i__4].i, z__1.i = x[i__3].r * a[i__4].i +
+ x[i__3].i * a[i__4].r;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i +
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ i__2 = jx;
+ i__3 = jx;
+ i__4 = j + j * a_dim1;
+ z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
+ i__4].i, z__1.i = x[i__3].r * a[i__4].i +
+ x[i__3].i * a[i__4].r;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ z__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L50: */
+ }
+ if (nounit) {
+ i__1 = j;
+ i__2 = j;
+ i__3 = j + j * a_dim1;
+ z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, z__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ z__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ i__1 = jx;
+ i__2 = jx;
+ i__3 = j + j * a_dim1;
+ z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, z__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ }
+ jx -= *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x or x := conjg( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__ + j * a_dim1;
+ i__2 = i__;
+ z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, z__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__1 = i__;
+ z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
+ z__2.i = z__3.r * x[i__1].i + z__3.i * x[
+ i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ i__1 = i__ + j * a_dim1;
+ i__2 = ix;
+ z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, z__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L120: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__1 = ix;
+ z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
+ z__2.i = z__3.r * x[i__1].i + z__3.i * x[
+ i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L130: */
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+/* L170: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L180: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L190: */
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRMV . */
+
+} /* ztrmv_ */
+
+/* Subroutine */ int ztrsm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublecomplex *alpha, doublecomplex *a,
+ integer *lda, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6, i__7;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static doublecomplex temp;
+ static logical lside;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ ZTRSM solves one of the matrix equations
+
+ op( A )*X = alpha*B, or X*op( A ) = alpha*B,
+
+ where alpha is a scalar, X and B are m by n matrices, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ).
+
+ The matrix X is overwritten on B.
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) appears on the left
+ or right of X as follows:
+
+ SIDE = 'L' or 'l' op( A )*X = alpha*B.
+
+ SIDE = 'R' or 'r' X*op( A ) = alpha*B.
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = conjg( A' ).
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - COMPLEX*16 array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the right-hand side matrix B, and on exit is
+ overwritten by the solution matrix X.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ noconj = lsame_(transa, "T");
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if ((! lside && ! lsame_(side, "R"))) {
+ info = 1;
+ } else if ((! upper && ! lsame_(uplo, "L"))) {
+ info = 2;
+ } else if (((! lsame_(transa, "N") && ! lsame_(
+ transa, "T")) && ! lsame_(transa, "C"))) {
+ info = 3;
+ } else if ((! lsame_(diag, "U") && ! lsame_(diag,
+ "N"))) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("ZTRSM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if ((alpha->r == 0. && alpha->i == 0.)) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*inv( A )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L30: */
+ }
+ }
+ for (k = *m; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ if (b[i__2].r != 0. || b[i__2].i != 0.) {
+ if (nounit) {
+ i__2 = k + j * b_dim1;
+ z_div(&z__1, &b[k + j * b_dim1], &a[k + k *
+ a_dim1]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+ i__2 = k - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = k + j * b_dim1;
+ i__6 = i__ + k * a_dim1;
+ z__2.r = b[i__5].r * a[i__6].r - b[i__5].i *
+ a[i__6].i, z__2.i = b[i__5].r * a[
+ i__6].i + b[i__5].i * a[i__6].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
+ .i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L40: */
+ }
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L70: */
+ }
+ }
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * b_dim1;
+ if (b[i__3].r != 0. || b[i__3].i != 0.) {
+ if (nounit) {
+ i__3 = k + j * b_dim1;
+ z_div(&z__1, &b[k + j * b_dim1], &a[k + k *
+ a_dim1]);
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = k + j * b_dim1;
+ i__7 = i__ + k * a_dim1;
+ z__2.r = b[i__6].r * a[i__7].r - b[i__6].i *
+ a[i__7].i, z__2.i = b[i__6].r * a[
+ i__7].i + b[i__6].i * a[i__7].r;
+ z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
+ .i - z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L80: */
+ }
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/*
+ Form B := alpha*inv( A' )*B
+ or B := alpha*inv( conjg( A' ) )*B.
+*/
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ if (noconj) {
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = k + i__ * a_dim1;
+ i__5 = k + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
+ b[i__5].i, z__2.i = a[i__4].r * b[
+ i__5].i + a[i__4].i * b[i__5].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L110: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[i__ + i__ * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__4 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4]
+ .i, z__2.i = z__3.r * b[i__4].i +
+ z__3.i * b[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L120: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i,
+ z__1.i = alpha->r * b[i__2].i + alpha->i * b[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ if (noconj) {
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + i__ * a_dim1;
+ i__4 = k + j * b_dim1;
+ z__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
+ b[i__4].i, z__2.i = a[i__3].r * b[
+ i__4].i + a[i__3].i * b[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[i__ + i__ * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__3 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3]
+ .i, z__2.i = z__3.r * b[i__3].i +
+ z__3.i * b[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*inv( A ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L190: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = k + j * a_dim1;
+ i__7 = i__ + k * b_dim1;
+ z__2.r = a[i__6].r * b[i__7].r - a[i__6].i *
+ b[i__7].i, z__2.i = a[i__6].r * b[
+ i__7].i + a[i__6].i * b[i__7].r;
+ z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
+ .i - z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+ if (nounit) {
+ z_div(&z__1, &c_b359, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L220: */
+ }
+ }
+/* L230: */
+ }
+ } else {
+ for (j = *n; j >= 1; --j) {
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, z__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L240: */
+ }
+ }
+ i__1 = *n;
+ for (k = j + 1; k <= i__1; ++k) {
+ i__2 = k + j * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = k + j * a_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = a[i__5].r * b[i__6].r - a[i__5].i *
+ b[i__6].i, z__2.i = a[i__5].r * b[
+ i__6].i + a[i__5].i * b[i__6].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
+ .i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ if (nounit) {
+ z_div(&z__1, &c_b359, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ }
+ } else {
+
+/*
+ Form B := alpha*B*inv( A' )
+ or B := alpha*B*inv( conjg( A' ) ).
+*/
+
+ if (upper) {
+ for (k = *n; k >= 1; --k) {
+ if (nounit) {
+ if (noconj) {
+ z_div(&z__1, &c_b359, &a[k + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z_div(&z__1, &c_b359, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L290: */
+ }
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ if (noconj) {
+ i__2 = j + k * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ } else {
+ d_cnjg(&z__1, &a[j + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, z__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
+ .i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L300: */
+ }
+ }
+/* L310: */
+ }
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, z__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L320: */
+ }
+ }
+/* L330: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (nounit) {
+ if (noconj) {
+ z_div(&z__1, &c_b359, &a[k + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z_div(&z__1, &c_b359, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L340: */
+ }
+ }
+ i__2 = *n;
+ for (j = k + 1; j <= i__2; ++j) {
+ i__3 = j + k * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ if (noconj) {
+ i__3 = j + k * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ } else {
+ d_cnjg(&z__1, &a[j + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, z__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
+ .i - z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L370: */
+ }
+ }
+/* L380: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRSM . */
+
+} /* ztrsm_ */
+
+/* Subroutine */ int ztrsv_(char *uplo, char *trans, char *diag, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, jx, kx, info;
+ static doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ ZTRSV solves one of the systems of equations
+
+ A*x = b, or A'*x = b, or conjg( A' )*x = b,
+
+ where b and x are n element vectors and A is an n by n unit, or
+ non-unit, upper or lower triangular matrix.
+
+ No test for singularity or near-singularity is included in this
+ routine. Such tests must be performed before calling this routine.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the equations to be solved as
+ follows:
+
+ TRANS = 'N' or 'n' A*x = b.
+
+ TRANS = 'T' or 't' A'*x = b.
+
+ TRANS = 'C' or 'c' conjg( A' )*x = b.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit
+ triangular as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element right-hand side vector b. On exit, X is overwritten
+ with the solution vector x.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
+ info = 1;
+ } else if (((! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) && ! lsame_(trans, "C"))) {
+ info = 2;
+ } else if ((! lsame_(diag, "U") && ! lsame_(diag,
+ "N"))) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("ZTRSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/*
+ Set up the start point in X if the increment is not unity. This
+ will be ( N - 1 )*INCX too small for descending loops.
+*/
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ if (nounit) {
+ i__1 = j;
+ z_div(&z__1, &x[j], &a[j + j * a_dim1]);
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__;
+ i__2 = i__;
+ i__3 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ z__2.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i -
+ z__2.i;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ if (nounit) {
+ i__1 = jx;
+ z_div(&z__1, &x[jx], &a[j + j * a_dim1]);
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ i__1 = ix;
+ i__2 = ix;
+ i__3 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ z__2.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i -
+ z__2.i;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ if (nounit) {
+ i__2 = j;
+ z_div(&z__1, &x[j], &a[j + j * a_dim1]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ if (nounit) {
+ i__2 = jx;
+ z_div(&z__1, &x[jx], &a[j + j * a_dim1]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x or x := inv( conjg( A' ) )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ix = kx;
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__;
+ z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, z__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__2 = i__;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+/* L170: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ ix = kx;
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = ix;
+ z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, z__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L180: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__2 = ix;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L190: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRSV . */
+
+} /* ztrsv_ */
+
diff --git a/numpy/corelinalg/dlamch.c b/numpy/corelinalg/dlamch.c
new file mode 100644
index 000000000..dda3f36e2
--- /dev/null
+++ b/numpy/corelinalg/dlamch.c
@@ -0,0 +1,951 @@
+#include <stdio.h>
+#include "f2c.h"
+
+/* If config.h is available, we only need dlamc3 */
+#ifndef HAVE_CONFIG
+doublereal dlamch_(char *cmach)
+{
+/* -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAMCH determines double precision machine parameters.
+
+ Arguments
+ =========
+
+ CMACH (input) CHARACTER*1
+ Specifies the value to be returned by DLAMCH:
+ = 'E' or 'e', DLAMCH := eps
+ = 'S' or 's , DLAMCH := sfmin
+ = 'B' or 'b', DLAMCH := base
+ = 'P' or 'p', DLAMCH := eps*base
+ = 'N' or 'n', DLAMCH := t
+ = 'R' or 'r', DLAMCH := rnd
+ = 'M' or 'm', DLAMCH := emin
+ = 'U' or 'u', DLAMCH := rmin
+ = 'L' or 'l', DLAMCH := emax
+ = 'O' or 'o', DLAMCH := rmax
+
+ where
+
+ eps = relative machine precision
+ sfmin = safe minimum, such that 1/sfmin does not overflow
+ base = base of the machine
+ prec = eps*base
+ t = number of (base) digits in the mantissa
+ rnd = 1.0 when rounding occurs in addition, 0.0 otherwise
+ emin = minimum exponent before (gradual) underflow
+ rmin = underflow threshold - base**(emin-1)
+ emax = largest exponent before overflow
+ rmax = overflow threshold - (base**emax)*(1-eps)
+
+ =====================================================================
+*/
+/* >>Start of File<<
+ Initialized data */
+ static logical first = TRUE_;
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val;
+ /* Builtin functions */
+ double pow_di(doublereal *, integer *);
+ /* Local variables */
+ static doublereal base;
+ static integer beta;
+ static doublereal emin, prec, emax;
+ static integer imin, imax;
+ static logical lrnd;
+ static doublereal rmin, rmax, t, rmach;
+ extern logical lsame_(char *, char *);
+ static doublereal small, sfmin;
+ extern /* Subroutine */ int dlamc2_(integer *, integer *, logical *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *);
+ static integer it;
+ static doublereal rnd, eps;
+
+
+
+ if (first) {
+ first = FALSE_;
+ dlamc2_(&beta, &it, &lrnd, &eps, &imin, &rmin, &imax, &rmax);
+ base = (doublereal) beta;
+ t = (doublereal) it;
+ if (lrnd) {
+ rnd = 1.;
+ i__1 = 1 - it;
+ eps = pow_di(&base, &i__1) / 2;
+ } else {
+ rnd = 0.;
+ i__1 = 1 - it;
+ eps = pow_di(&base, &i__1);
+ }
+ prec = eps * base;
+ emin = (doublereal) imin;
+ emax = (doublereal) imax;
+ sfmin = rmin;
+ small = 1. / rmax;
+ if (small >= sfmin) {
+
+/* Use SMALL plus a bit, to avoid the possibility of rou
+nding
+ causing overflow when computing 1/sfmin. */
+
+ sfmin = small * (eps + 1.);
+ }
+ }
+
+ if (lsame_(cmach, "E")) {
+ rmach = eps;
+ } else if (lsame_(cmach, "S")) {
+ rmach = sfmin;
+ } else if (lsame_(cmach, "B")) {
+ rmach = base;
+ } else if (lsame_(cmach, "P")) {
+ rmach = prec;
+ } else if (lsame_(cmach, "N")) {
+ rmach = t;
+ } else if (lsame_(cmach, "R")) {
+ rmach = rnd;
+ } else if (lsame_(cmach, "M")) {
+ rmach = emin;
+ } else if (lsame_(cmach, "U")) {
+ rmach = rmin;
+ } else if (lsame_(cmach, "L")) {
+ rmach = emax;
+ } else if (lsame_(cmach, "O")) {
+ rmach = rmax;
+ }
+
+ ret_val = rmach;
+ return ret_val;
+
+/* End of DLAMCH */
+
+} /* dlamch_ */
+
+
+/* Subroutine */ int dlamc1_(integer *beta, integer *t, logical *rnd, logical
+ *ieee1)
+{
+/* -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAMC1 determines the machine parameters given by BETA, T, RND, and
+ IEEE1.
+
+ Arguments
+ =========
+
+ BETA (output) INTEGER
+ The base of the machine.
+
+ T (output) INTEGER
+ The number of ( BETA ) digits in the mantissa.
+
+ RND (output) LOGICAL
+ Specifies whether proper rounding ( RND = .TRUE. ) or
+ chopping ( RND = .FALSE. ) occurs in addition. This may not
+
+ be a reliable guide to the way in which the machine performs
+
+ its arithmetic.
+
+ IEEE1 (output) LOGICAL
+ Specifies whether rounding appears to be done in the IEEE
+ 'round to nearest' style.
+
+ Further Details
+ ===============
+
+ The routine is based on the routine ENVRON by Malcolm and
+ incorporates suggestions by Gentleman and Marovich. See
+
+ Malcolm M. A. (1972) Algorithms to reveal properties of
+ floating-point arithmetic. Comms. of the ACM, 15, 949-951.
+
+ Gentleman W. M. and Marovich S. B. (1974) More on algorithms
+ that reveal properties of floating point arithmetic units.
+ Comms. of the ACM, 17, 276-277.
+
+ =====================================================================
+*/
+ /* Initialized data */
+ static logical first = TRUE_;
+ /* System generated locals */
+ doublereal d__1, d__2;
+ /* Local variables */
+ static logical lrnd;
+ static doublereal a, b, c, f;
+ static integer lbeta;
+ static doublereal savec;
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ static logical lieee1;
+ static doublereal t1, t2;
+ static integer lt;
+ static doublereal one, qtr;
+
+
+
+ if (first) {
+ first = FALSE_;
+ one = 1.;
+
+/* LBETA, LIEEE1, LT and LRND are the local values of BE
+TA,
+ IEEE1, T and RND.
+
+ Throughout this routine we use the function DLAMC3 to ens
+ure
+ that relevant values are stored and not held in registers,
+ or
+ are not affected by optimizers.
+
+ Compute a = 2.0**m with the smallest positive integer m s
+uch
+ that
+
+ fl( a + 1.0 ) = a. */
+
+ a = 1.;
+ c = 1.;
+
+/* + WHILE( C.EQ.ONE )LOOP */
+L10:
+ if (c == one) {
+ a *= 2;
+ c = dlamc3_(&a, &one);
+ d__1 = -a;
+ c = dlamc3_(&c, &d__1);
+ goto L10;
+ }
+/* + END WHILE
+
+ Now compute b = 2.0**m with the smallest positive integer
+m
+ such that
+
+ fl( a + b ) .gt. a. */
+
+ b = 1.;
+ c = dlamc3_(&a, &b);
+
+/* + WHILE( C.EQ.A )LOOP */
+L20:
+ if (c == a) {
+ b *= 2;
+ c = dlamc3_(&a, &b);
+ goto L20;
+ }
+/* + END WHILE
+
+ Now compute the base. a and c are neighbouring floating po
+int
+ numbers in the interval ( beta**t, beta**( t + 1 ) ) and
+ so
+ their difference is beta. Adding 0.25 to c is to ensure that
+ it
+ is truncated to beta and not ( beta - 1 ). */
+
+ qtr = one / 4;
+ savec = c;
+ d__1 = -a;
+ c = dlamc3_(&c, &d__1);
+ lbeta = (integer) (c + qtr);
+
+/* Now determine whether rounding or chopping occurs, by addin
+g a
+ bit less than beta/2 and a bit more than beta/2 to
+ a. */
+
+ b = (doublereal) lbeta;
+ d__1 = b / 2;
+ d__2 = -b / 100;
+ f = dlamc3_(&d__1, &d__2);
+ c = dlamc3_(&f, &a);
+ if (c == a) {
+ lrnd = TRUE_;
+ } else {
+ lrnd = FALSE_;
+ }
+ d__1 = b / 2;
+ d__2 = b / 100;
+ f = dlamc3_(&d__1, &d__2);
+ c = dlamc3_(&f, &a);
+ if (lrnd && c == a) {
+ lrnd = FALSE_;
+ }
+
+/* Try and decide whether rounding is done in the IEEE 'round
+ to
+ nearest' style. B/2 is half a unit in the last place of the
+two
+ numbers A and SAVEC. Furthermore, A is even, i.e. has last
+bit
+ zero, and SAVEC is odd. Thus adding B/2 to A should not cha
+nge
+ A, but adding B/2 to SAVEC should change SAVEC. */
+
+ d__1 = b / 2;
+ t1 = dlamc3_(&d__1, &a);
+ d__1 = b / 2;
+ t2 = dlamc3_(&d__1, &savec);
+ lieee1 = t1 == a && t2 > savec && lrnd;
+
+/* Now find the mantissa, t. It should be the integer part
+ of
+ log to the base beta of a, however it is safer to determine
+ t
+ by powering. So we find t as the smallest positive integer
+for
+ which
+
+ fl( beta**t + 1.0 ) = 1.0. */
+
+ lt = 0;
+ a = 1.;
+ c = 1.;
+
+/* + WHILE( C.EQ.ONE )LOOP */
+L30:
+ if (c == one) {
+ ++lt;
+ a *= lbeta;
+ c = dlamc3_(&a, &one);
+ d__1 = -a;
+ c = dlamc3_(&c, &d__1);
+ goto L30;
+ }
+/* + END WHILE */
+
+ }
+
+ *beta = lbeta;
+ *t = lt;
+ *rnd = lrnd;
+ *ieee1 = lieee1;
+ return 0;
+
+/* End of DLAMC1 */
+
+} /* dlamc1_ */
+
+
+/* Subroutine */ int dlamc2_(integer *beta, integer *t, logical *rnd,
+ doublereal *eps, integer *emin, doublereal *rmin, integer *emax,
+ doublereal *rmax)
+{
+/* -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAMC2 determines the machine parameters specified in its argument
+ list.
+
+ Arguments
+ =========
+
+ BETA (output) INTEGER
+ The base of the machine.
+
+ T (output) INTEGER
+ The number of ( BETA ) digits in the mantissa.
+
+ RND (output) LOGICAL
+ Specifies whether proper rounding ( RND = .TRUE. ) or
+ chopping ( RND = .FALSE. ) occurs in addition. This may not
+
+ be a reliable guide to the way in which the machine performs
+
+ its arithmetic.
+
+ EPS (output) DOUBLE PRECISION
+ The smallest positive number such that
+
+ fl( 1.0 - EPS ) .LT. 1.0,
+
+ where fl denotes the computed value.
+
+ EMIN (output) INTEGER
+ The minimum exponent before (gradual) underflow occurs.
+
+ RMIN (output) DOUBLE PRECISION
+ The smallest normalized number for the machine, given by
+ BASE**( EMIN - 1 ), where BASE is the floating point value
+
+ of BETA.
+
+ EMAX (output) INTEGER
+ The maximum exponent before overflow occurs.
+
+ RMAX (output) DOUBLE PRECISION
+ The largest positive number for the machine, given by
+ BASE**EMAX * ( 1 - EPS ), where BASE is the floating point
+
+ value of BETA.
+
+ Further Details
+ ===============
+
+ The computation of EPS is based on a routine PARANOIA by
+ W. Kahan of the University of California at Berkeley.
+
+ =====================================================================
+*/
+
+ /* Initialized data */
+ static logical first = TRUE_;
+ static logical iwarn = FALSE_;
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3, d__4, d__5;
+ /* Builtin functions */
+ double pow_di(doublereal *, integer *);
+ /* Local variables */
+ static logical ieee;
+ static doublereal half;
+ static logical lrnd;
+ static doublereal leps, zero, a, b, c;
+ static integer i, lbeta;
+ static doublereal rbase;
+ static integer lemin, lemax, gnmin;
+ static doublereal small;
+ static integer gpmin;
+ static doublereal third, lrmin, lrmax, sixth;
+ extern /* Subroutine */ int dlamc1_(integer *, integer *, logical *,
+ logical *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ static logical lieee1;
+ extern /* Subroutine */ int dlamc4_(integer *, doublereal *, integer *),
+ dlamc5_(integer *, integer *, integer *, logical *, integer *,
+ doublereal *);
+ static integer lt, ngnmin, ngpmin;
+ static doublereal one, two;
+
+
+
+ if (first) {
+ first = FALSE_;
+ zero = 0.;
+ one = 1.;
+ two = 2.;
+
+/* LBETA, LT, LRND, LEPS, LEMIN and LRMIN are the local values
+ of
+ BETA, T, RND, EPS, EMIN and RMIN.
+
+ Throughout this routine we use the function DLAMC3 to ens
+ure
+ that relevant values are stored and not held in registers,
+ or
+ are not affected by optimizers.
+
+ DLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1.
+*/
+
+ dlamc1_(&lbeta, &lt, &lrnd, &lieee1);
+
+/* Start to find EPS. */
+
+ b = (doublereal) lbeta;
+ i__1 = -lt;
+ a = pow_di(&b, &i__1);
+ leps = a;
+
+/* Try some tricks to see whether or not this is the correct E
+PS. */
+
+ b = two / 3;
+ half = one / 2;
+ d__1 = -half;
+ sixth = dlamc3_(&b, &d__1);
+ third = dlamc3_(&sixth, &sixth);
+ d__1 = -half;
+ b = dlamc3_(&third, &d__1);
+ b = dlamc3_(&b, &sixth);
+ b = abs(b);
+ if (b < leps) {
+ b = leps;
+ }
+
+ leps = 1.;
+
+/* + WHILE( ( LEPS.GT.B ).AND.( B.GT.ZERO ) )LOOP */
+L10:
+ if (leps > b && b > zero) {
+ leps = b;
+ d__1 = half * leps;
+/* Computing 5th power */
+ d__3 = two, d__4 = d__3, d__3 *= d__3;
+/* Computing 2nd power */
+ d__5 = leps;
+ d__2 = d__4 * (d__3 * d__3) * (d__5 * d__5);
+ c = dlamc3_(&d__1, &d__2);
+ d__1 = -c;
+ c = dlamc3_(&half, &d__1);
+ b = dlamc3_(&half, &c);
+ d__1 = -b;
+ c = dlamc3_(&half, &d__1);
+ b = dlamc3_(&half, &c);
+ goto L10;
+ }
+/* + END WHILE */
+
+ if (a < leps) {
+ leps = a;
+ }
+
+/* Computation of EPS complete.
+
+ Now find EMIN. Let A = + or - 1, and + or - (1 + BASE**(-3
+)).
+ Keep dividing A by BETA until (gradual) underflow occurs. T
+his
+ is detected when we cannot recover the previous A. */
+
+ rbase = one / lbeta;
+ small = one;
+ for (i = 1; i <= 3; ++i) {
+ d__1 = small * rbase;
+ small = dlamc3_(&d__1, &zero);
+/* L20: */
+ }
+ a = dlamc3_(&one, &small);
+ dlamc4_(&ngpmin, &one, &lbeta);
+ d__1 = -one;
+ dlamc4_(&ngnmin, &d__1, &lbeta);
+ dlamc4_(&gpmin, &a, &lbeta);
+ d__1 = -a;
+ dlamc4_(&gnmin, &d__1, &lbeta);
+ ieee = FALSE_;
+
+ if (ngpmin == ngnmin && gpmin == gnmin) {
+ if (ngpmin == gpmin) {
+ lemin = ngpmin;
+/* ( Non twos-complement machines, no gradual under
+flow;
+ e.g., VAX ) */
+ } else if (gpmin - ngpmin == 3) {
+ lemin = ngpmin - 1 + lt;
+ ieee = TRUE_;
+/* ( Non twos-complement machines, with gradual und
+erflow;
+ e.g., IEEE standard followers ) */
+ } else {
+ lemin = min(ngpmin,gpmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else if (ngpmin == gpmin && ngnmin == gnmin) {
+ if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1) {
+ lemin = max(ngpmin,ngnmin);
+/* ( Twos-complement machines, no gradual underflow
+;
+ e.g., CYBER 205 ) */
+ } else {
+ lemin = min(ngpmin,ngnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1 && gpmin == gnmin)
+ {
+ if (gpmin - min(ngpmin,ngnmin) == 3) {
+ lemin = max(ngpmin,ngnmin) - 1 + lt;
+/* ( Twos-complement machines with gradual underflo
+w;
+ no known machine ) */
+ } else {
+ lemin = min(ngpmin,ngnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else {
+/* Computing MIN */
+ i__1 = min(ngpmin,ngnmin), i__1 = min(i__1,gpmin);
+ lemin = min(i__1,gnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+/* **
+ Comment out this if block if EMIN is ok */
+ if (iwarn) {
+ first = TRUE_;
+ printf("\n\n WARNING. The value EMIN may be incorrect:- ");
+ printf("EMIN = %8i\n",lemin);
+ printf("If, after inspection, the value EMIN looks acceptable");
+ printf("please comment out \n the IF block as marked within the");
+ printf("code of routine DLAMC2, \n otherwise supply EMIN");
+ printf("explicitly.\n");
+ }
+/* **
+
+ Assume IEEE arithmetic if we found denormalised numbers abo
+ve,
+ or if arithmetic seems to round in the IEEE style, determi
+ned
+ in routine DLAMC1. A true IEEE machine should have both thi
+ngs
+ true; however, faulty machines may have one or the other. */
+
+ ieee = ieee || lieee1;
+
+/* Compute RMIN by successive division by BETA. We could comp
+ute
+ RMIN as BASE**( EMIN - 1 ), but some machines underflow dur
+ing
+ this computation. */
+
+ lrmin = 1.;
+ i__1 = 1 - lemin;
+ for (i = 1; i <= 1-lemin; ++i) {
+ d__1 = lrmin * rbase;
+ lrmin = dlamc3_(&d__1, &zero);
+/* L30: */
+ }
+
+/* Finally, call DLAMC5 to compute EMAX and RMAX. */
+
+ dlamc5_(&lbeta, &lt, &lemin, &ieee, &lemax, &lrmax);
+ }
+
+ *beta = lbeta;
+ *t = lt;
+ *rnd = lrnd;
+ *eps = leps;
+ *emin = lemin;
+ *rmin = lrmin;
+ *emax = lemax;
+ *rmax = lrmax;
+
+ return 0;
+
+
+/* End of DLAMC2 */
+
+} /* dlamc2_ */
+#endif
+
+
+doublereal dlamc3_(doublereal *a, doublereal *b)
+{
+/* -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAMC3 is intended to force A and B to be stored prior to doing
+
+ the addition of A and B , for use in situations where optimizers
+
+ might hold one of these in a register.
+
+ Arguments
+ =========
+
+ A, B (input) DOUBLE PRECISION
+ The values A and B.
+
+ =====================================================================
+*/
+/* >>Start of File<<
+ System generated locals */
+ doublereal ret_val;
+
+
+
+ ret_val = *a + *b;
+
+ return ret_val;
+
+/* End of DLAMC3 */
+
+} /* dlamc3_ */
+
+
+#ifndef HAVE_CONFIG
+/* Subroutine */ int dlamc4_(integer *emin, doublereal *start, integer *base)
+{
+/* -- LAPACK auxiliary routine (version 2.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAMC4 is a service routine for DLAMC2.
+
+ Arguments
+ =========
+
+ EMIN (output) EMIN
+ The minimum exponent before (gradual) underflow, computed by
+
+ setting A = START and dividing by BASE until the previous A
+ can not be recovered.
+
+ START (input) DOUBLE PRECISION
+ The starting point for determining EMIN.
+
+ BASE (input) INTEGER
+ The base of the machine.
+
+ =====================================================================
+*/
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+ /* Local variables */
+ static doublereal zero, a;
+ static integer i;
+ static doublereal rbase, b1, b2, c1, c2, d1, d2;
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ static doublereal one;
+
+
+
+ a = *start;
+ one = 1.;
+ rbase = one / *base;
+ zero = 0.;
+ *emin = 1;
+ d__1 = a * rbase;
+ b1 = dlamc3_(&d__1, &zero);
+ c1 = a;
+ c2 = a;
+ d1 = a;
+ d2 = a;
+/* + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND.
+ $ ( D1.EQ.A ).AND.( D2.EQ.A ) )LOOP */
+L10:
+ if (c1 == a && c2 == a && d1 == a && d2 == a) {
+ --(*emin);
+ a = b1;
+ d__1 = a / *base;
+ b1 = dlamc3_(&d__1, &zero);
+ d__1 = b1 * *base;
+ c1 = dlamc3_(&d__1, &zero);
+ d1 = zero;
+ i__1 = *base;
+ for (i = 1; i <= *base; ++i) {
+ d1 += b1;
+/* L20: */
+ }
+ d__1 = a * rbase;
+ b2 = dlamc3_(&d__1, &zero);
+ d__1 = b2 / rbase;
+ c2 = dlamc3_(&d__1, &zero);
+ d2 = zero;
+ i__1 = *base;
+ for (i = 1; i <= *base; ++i) {
+ d2 += b2;
+/* L30: */
+ }
+ goto L10;
+ }
+/* + END WHILE */
+
+ return 0;
+
+/* End of DLAMC4 */
+
+} /* dlamc4_ */
+
+
+/* Subroutine */ int dlamc5_(integer *beta, integer *p, integer *emin,
+ logical *ieee, integer *emax, doublereal *rmax)
+{
+/* -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAMC5 attempts to compute RMAX, the largest machine floating-point
+ number, without overflow. It assumes that EMAX + abs(EMIN) sum
+ approximately to a power of 2. It will fail on machines where this
+ assumption does not hold, for example, the Cyber 205 (EMIN = -28625,
+
+ EMAX = 28718). It will also fail if the value supplied for EMIN is
+ too large (i.e. too close to zero), probably with overflow.
+
+ Arguments
+ =========
+
+ BETA (input) INTEGER
+ The base of floating-point arithmetic.
+
+ P (input) INTEGER
+ The number of base BETA digits in the mantissa of a
+ floating-point value.
+
+ EMIN (input) INTEGER
+ The minimum exponent before (gradual) underflow.
+
+ IEEE (input) LOGICAL
+ A logical flag specifying whether or not the arithmetic
+ system is thought to comply with the IEEE standard.
+
+ EMAX (output) INTEGER
+ The largest exponent before overflow
+
+ RMAX (output) DOUBLE PRECISION
+ The largest machine floating-point number.
+
+ =====================================================================
+
+
+
+ First compute LEXP and UEXP, two powers of 2 that bound
+ abs(EMIN). We then assume that EMAX + abs(EMIN) will sum
+ approximately to the bound that is closest to abs(EMIN).
+ (EMAX is the exponent of the required number RMAX). */
+ /* Table of constant values */
+ static doublereal c_b5 = 0.;
+
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+ /* Local variables */
+ static integer lexp;
+ static doublereal oldy;
+ static integer uexp, i;
+ static doublereal y, z;
+ static integer nbits;
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ static doublereal recbas;
+ static integer exbits, expsum, try__;
+
+
+
+ lexp = 1;
+ exbits = 1;
+L10:
+ try__ = lexp << 1;
+ if (try__ <= -(*emin)) {
+ lexp = try__;
+ ++exbits;
+ goto L10;
+ }
+ if (lexp == -(*emin)) {
+ uexp = lexp;
+ } else {
+ uexp = try__;
+ ++exbits;
+ }
+
+/* Now -LEXP is less than or equal to EMIN, and -UEXP is greater
+ than or equal to EMIN. EXBITS is the number of bits needed to
+ store the exponent. */
+
+ if (uexp + *emin > -lexp - *emin) {
+ expsum = lexp << 1;
+ } else {
+ expsum = uexp << 1;
+ }
+
+/* EXPSUM is the exponent range, approximately equal to
+ EMAX - EMIN + 1 . */
+
+ *emax = expsum + *emin - 1;
+ nbits = exbits + 1 + *p;
+
+/* NBITS is the total number of bits needed to store a
+ floating-point number. */
+
+ if (nbits % 2 == 1 && *beta == 2) {
+
+/* Either there are an odd number of bits used to store a
+ floating-point number, which is unlikely, or some bits are
+
+ not used in the representation of numbers, which is possible
+,
+ (e.g. Cray machines) or the mantissa has an implicit bit,
+ (e.g. IEEE machines, Dec Vax machines), which is perhaps the
+
+ most likely. We have to assume the last alternative.
+ If this is true, then we need to reduce EMAX by one because
+
+ there must be some way of representing zero in an implicit-b
+it
+ system. On machines like Cray, we are reducing EMAX by one
+
+ unnecessarily. */
+
+ --(*emax);
+ }
+
+ if (*ieee) {
+
+/* Assume we are on an IEEE machine which reserves one exponent
+
+ for infinity and NaN. */
+
+ --(*emax);
+ }
+
+/* Now create RMAX, the largest machine number, which should
+ be equal to (1.0 - BETA**(-P)) * BETA**EMAX .
+
+ First compute 1.0 - BETA**(-P), being careful that the
+ result is less than 1.0 . */
+
+ recbas = 1. / *beta;
+ z = *beta - 1.;
+ y = 0.;
+ i__1 = *p;
+ for (i = 1; i <= *p; ++i) {
+ z *= recbas;
+ if (y < 1.) {
+ oldy = y;
+ }
+ y = dlamc3_(&y, &z);
+/* L20: */
+ }
+ if (y >= 1.) {
+ y = oldy;
+ }
+
+/* Now multiply by BETA**EMAX to get RMAX. */
+
+ i__1 = *emax;
+ for (i = 1; i <= *emax; ++i) {
+ d__1 = y * *beta;
+ y = dlamc3_(&d__1, &c_b5);
+/* L30: */
+ }
+
+ *rmax = y;
+ return 0;
+
+/* End of DLAMC5 */
+
+} /* dlamc5_ */
+#endif
diff --git a/numpy/corelinalg/dlapack_lite.c b/numpy/corelinalg/dlapack_lite.c
new file mode 100644
index 000000000..e6634491f
--- /dev/null
+++ b/numpy/corelinalg/dlapack_lite.c
@@ -0,0 +1,36005 @@
+/*
+NOTE: This is generated code. Look in Misc/lapack_lite for information on
+ remaking this file.
+*/
+#include "f2c.h"
+
+#ifdef HAVE_CONFIG
+#include "config.h"
+#else
+extern doublereal dlamch_(char *);
+#define EPSILON dlamch_("Epsilon")
+#define SAFEMINIMUM dlamch_("Safe minimum")
+#define PRECISION dlamch_("Precision")
+#define BASE dlamch_("Base")
+#endif
+
+extern doublereal dlapy2_(doublereal *x, doublereal *y);
+
+
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static doublereal c_b15 = 1.;
+static integer c__1 = 1;
+static doublereal c_b29 = 0.;
+static doublereal c_b94 = -.125;
+static doublereal c_b151 = -1.;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__8 = 8;
+static integer c__4 = 4;
+static integer c__65 = 65;
+static integer c__6 = 6;
+static integer c__15 = 15;
+static logical c_false = FALSE_;
+static integer c__10 = 10;
+static integer c__11 = 11;
+static doublereal c_b2804 = 2.;
+static logical c_true = TRUE_;
+static real c_b3825 = 0.f;
+static real c_b3826 = 1.f;
+
+/* Subroutine */ int dbdsdc_(char *uplo, char *compq, integer *n, doublereal *
+ d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt,
+ integer *ldvt, doublereal *q, integer *iq, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *), log(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k;
+ static doublereal p, r__;
+ static integer z__, ic, ii, kk;
+ static doublereal cs;
+ static integer is, iu;
+ static doublereal sn;
+ static integer nm1;
+ static doublereal eps;
+ static integer ivt, difl, difr, ierr, perm, mlvl, sqre;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *
+ , doublereal *, integer *), dswap_(integer *, doublereal *,
+ integer *, doublereal *, integer *);
+ static integer poles, iuplo, nsize, start;
+ extern /* Subroutine */ int dlasd0_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ integer *, integer *, doublereal *, integer *);
+
+ extern /* Subroutine */ int dlasda_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaset_(char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static integer givcol;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ static integer icompq;
+ static doublereal orgnrm;
+ static integer givnum, givptr, qstart, smlsiz, wstart, smlszp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 1, 1999
+
+
+ Purpose
+ =======
+
+ DBDSDC computes the singular value decomposition (SVD) of a real
+ N-by-N (upper or lower) bidiagonal matrix B: B = U * S * VT,
+ using a divide and conquer method, where S is a diagonal matrix
+ with non-negative diagonal elements (the singular values of B), and
+ U and VT are orthogonal matrices of left and right singular vectors,
+ respectively. DBDSDC can be used to compute all singular values,
+ and optionally, singular vectors or singular vectors in compact form.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none. See DLASD3 for details.
+
+ The code currently call DLASDQ if singular values only are desired.
+ However, it can be slightly modified to compute singular values
+ using the divide and conquer method.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': B is upper bidiagonal.
+ = 'L': B is lower bidiagonal.
+
+ COMPQ (input) CHARACTER*1
+ Specifies whether singular vectors are to be computed
+ as follows:
+ = 'N': Compute singular values only;
+ = 'P': Compute singular values and compute singular
+ vectors in compact form;
+ = 'I': Compute singular values and singular vectors.
+
+ N (input) INTEGER
+ The order of the matrix B. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the n diagonal elements of the bidiagonal matrix B.
+ On exit, if INFO=0, the singular values of B.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the elements of E contain the offdiagonal
+ elements of the bidiagonal matrix whose SVD is desired.
+ On exit, E has been destroyed.
+
+ U (output) DOUBLE PRECISION array, dimension (LDU,N)
+ If COMPQ = 'I', then:
+ On exit, if INFO = 0, U contains the left singular vectors
+ of the bidiagonal matrix.
+ For other values of COMPQ, U is not referenced.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= 1.
+ If singular vectors are desired, then LDU >= max( 1, N ).
+
+ VT (output) DOUBLE PRECISION array, dimension (LDVT,N)
+ If COMPQ = 'I', then:
+ On exit, if INFO = 0, VT' contains the right singular
+ vectors of the bidiagonal matrix.
+ For other values of COMPQ, VT is not referenced.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= 1.
+ If singular vectors are desired, then LDVT >= max( 1, N ).
+
+ Q (output) DOUBLE PRECISION array, dimension (LDQ)
+ If COMPQ = 'P', then:
+ On exit, if INFO = 0, Q and IQ contain the left
+ and right singular vectors in a compact form,
+ requiring O(N log N) space instead of 2*N**2.
+ In particular, Q contains all the DOUBLE PRECISION data in
+ LDQ >= N*(11 + 2*SMLSIZ + 8*INT(LOG_2(N/(SMLSIZ+1))))
+ words of memory, where SMLSIZ is returned by ILAENV and
+ is equal to the maximum size of the subproblems at the
+ bottom of the computation tree (usually about 25).
+ For other values of COMPQ, Q is not referenced.
+
+ IQ (output) INTEGER array, dimension (LDIQ)
+ If COMPQ = 'P', then:
+ On exit, if INFO = 0, Q and IQ contain the left
+ and right singular vectors in a compact form,
+ requiring O(N log N) space instead of 2*N**2.
+ In particular, IQ contains all INTEGER data in
+ LDIQ >= N*(3 + 3*INT(LOG_2(N/(SMLSIZ+1))))
+ words of memory, where SMLSIZ is returned by ILAENV and
+ is equal to the maximum size of the subproblems at the
+ bottom of the computation tree (usually about 25).
+ For other values of COMPQ, IQ is not referenced.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK)
+ If COMPQ = 'N' then LWORK >= (4 * N).
+ If COMPQ = 'P' then LWORK >= (6 * N).
+ If COMPQ = 'I' then LWORK >= (3 * N**2 + 4 * N).
+
+ IWORK (workspace) INTEGER array, dimension (8*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an singular value.
+ The update process of divide and conquer failed.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ --q;
+ --iq;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ iuplo = 0;
+ if (lsame_(uplo, "U")) {
+ iuplo = 1;
+ }
+ if (lsame_(uplo, "L")) {
+ iuplo = 2;
+ }
+ if (lsame_(compq, "N")) {
+ icompq = 0;
+ } else if (lsame_(compq, "P")) {
+ icompq = 1;
+ } else if (lsame_(compq, "I")) {
+ icompq = 2;
+ } else {
+ icompq = -1;
+ }
+ if (iuplo == 0) {
+ *info = -1;
+ } else if (icompq < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldu < 1 || (icompq == 2 && *ldu < *n)) {
+ *info = -7;
+ } else if (*ldvt < 1 || (icompq == 2 && *ldvt < *n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DBDSDC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ smlsiz = ilaenv_(&c__9, "DBDSDC", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+ if (*n == 1) {
+ if (icompq == 1) {
+ q[1] = d_sign(&c_b15, &d__[1]);
+ q[smlsiz * *n + 1] = 1.;
+ } else if (icompq == 2) {
+ u[u_dim1 + 1] = d_sign(&c_b15, &d__[1]);
+ vt[vt_dim1 + 1] = 1.;
+ }
+ d__[1] = abs(d__[1]);
+ return 0;
+ }
+ nm1 = *n - 1;
+
+/*
+ If matrix lower bidiagonal, rotate to be upper bidiagonal
+ by applying Givens rotations on the left
+*/
+
+ wstart = 1;
+ qstart = 3;
+ if (icompq == 1) {
+ dcopy_(n, &d__[1], &c__1, &q[1], &c__1);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &e[1], &c__1, &q[*n + 1], &c__1);
+ }
+ if (iuplo == 2) {
+ qstart = 5;
+ wstart = ((*n) << (1)) - 1;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (icompq == 1) {
+ q[i__ + ((*n) << (1))] = cs;
+ q[i__ + *n * 3] = sn;
+ } else if (icompq == 2) {
+ work[i__] = cs;
+ work[nm1 + i__] = -sn;
+ }
+/* L10: */
+ }
+ }
+
+/* If ICOMPQ = 0, use DLASDQ to compute the singular values. */
+
+ if (icompq == 0) {
+ dlasdq_("U", &c__0, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
+ wstart], info);
+ goto L40;
+ }
+
+/*
+ If N is smaller than the minimum divide size SMLSIZ, then solve
+ the problem with another solver.
+*/
+
+ if (*n <= smlsiz) {
+ if (icompq == 2) {
+ dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu);
+ dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt);
+ dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
+ , ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
+ wstart], info);
+ } else if (icompq == 1) {
+ iu = 1;
+ ivt = iu + *n;
+ dlaset_("A", n, n, &c_b29, &c_b15, &q[iu + (qstart - 1) * *n], n);
+ dlaset_("A", n, n, &c_b29, &c_b15, &q[ivt + (qstart - 1) * *n], n);
+ dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &q[ivt + (
+ qstart - 1) * *n], n, &q[iu + (qstart - 1) * *n], n, &q[
+ iu + (qstart - 1) * *n], n, &work[wstart], info);
+ }
+ goto L40;
+ }
+
+ if (icompq == 2) {
+ dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu);
+ dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt);
+ }
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ return 0;
+ }
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, &ierr);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1, &
+ ierr);
+
+ eps = EPSILON;
+
+ mlvl = (integer) (log((doublereal) (*n) / (doublereal) (smlsiz + 1)) /
+ log(2.)) + 1;
+ smlszp = smlsiz + 1;
+
+ if (icompq == 1) {
+ iu = 1;
+ ivt = smlsiz + 1;
+ difl = ivt + smlszp;
+ difr = difl + mlvl;
+ z__ = difr + ((mlvl) << (1));
+ ic = z__ + mlvl;
+ is = ic + 1;
+ poles = is + 1;
+ givnum = poles + ((mlvl) << (1));
+
+ k = 1;
+ givptr = 2;
+ perm = 3;
+ givcol = perm + mlvl;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) < eps) {
+ d__[i__] = d_sign(&eps, &d__[i__]);
+ }
+/* L20: */
+ }
+
+ start = 1;
+ sqre = 0;
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) {
+
+/*
+ Subproblem found. First determine its size and then
+ apply divide and conquer on it.
+*/
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - start + 1;
+ } else if ((d__1 = e[i__], abs(d__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - start + 1;
+ } else {
+
+/*
+ A subproblem with E(NM1) small. This implies an
+ 1-by-1 subproblem at D(N). Solve this 1-by-1 problem
+ first.
+*/
+
+ nsize = i__ - start + 1;
+ if (icompq == 2) {
+ u[*n + *n * u_dim1] = d_sign(&c_b15, &d__[*n]);
+ vt[*n + *n * vt_dim1] = 1.;
+ } else if (icompq == 1) {
+ q[*n + (qstart - 1) * *n] = d_sign(&c_b15, &d__[*n]);
+ q[*n + (smlsiz + qstart - 1) * *n] = 1.;
+ }
+ d__[*n] = (d__1 = d__[*n], abs(d__1));
+ }
+ if (icompq == 2) {
+ dlasd0_(&nsize, &sqre, &d__[start], &e[start], &u[start +
+ start * u_dim1], ldu, &vt[start + start * vt_dim1],
+ ldvt, &smlsiz, &iwork[1], &work[wstart], info);
+ } else {
+ dlasda_(&icompq, &smlsiz, &nsize, &sqre, &d__[start], &e[
+ start], &q[start + (iu + qstart - 2) * *n], n, &q[
+ start + (ivt + qstart - 2) * *n], &iq[start + k * *n],
+ &q[start + (difl + qstart - 2) * *n], &q[start + (
+ difr + qstart - 2) * *n], &q[start + (z__ + qstart -
+ 2) * *n], &q[start + (poles + qstart - 2) * *n], &iq[
+ start + givptr * *n], &iq[start + givcol * *n], n, &
+ iq[start + perm * *n], &q[start + (givnum + qstart -
+ 2) * *n], &q[start + (ic + qstart - 2) * *n], &q[
+ start + (is + qstart - 2) * *n], &work[wstart], &
+ iwork[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ start = i__ + 1;
+ }
+/* L30: */
+ }
+
+/* Unscale */
+
+ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, &ierr);
+L40:
+
+/* Use Selection Sort to minimize swaps of singular vectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ kk = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] > p) {
+ kk = j;
+ p = d__[j];
+ }
+/* L50: */
+ }
+ if (kk != i__) {
+ d__[kk] = d__[i__];
+ d__[i__] = p;
+ if (icompq == 1) {
+ iq[i__] = kk;
+ } else if (icompq == 2) {
+ dswap_(n, &u[i__ * u_dim1 + 1], &c__1, &u[kk * u_dim1 + 1], &
+ c__1);
+ dswap_(n, &vt[i__ + vt_dim1], ldvt, &vt[kk + vt_dim1], ldvt);
+ }
+ } else if (icompq == 1) {
+ iq[i__] = i__;
+ }
+/* L60: */
+ }
+
+/* If ICOMPQ = 1, use IQ(N,1) as the indicator for UPLO */
+
+ if (icompq == 1) {
+ if (iuplo == 1) {
+ iq[*n] = 1;
+ } else {
+ iq[*n] = 0;
+ }
+ }
+
+/*
+ If B is lower bidiagonal, update U by those Givens rotations
+ which rotated B to be upper bidiagonal
+*/
+
+ if ((iuplo == 2 && icompq == 2)) {
+ dlasr_("L", "V", "B", n, n, &work[1], &work[*n], &u[u_offset], ldu);
+ }
+
+ return 0;
+
+/* End of DBDSDC */
+
+} /* dbdsdc_ */
+
+/* Subroutine */ int dbdsqr_(char *uplo, integer *n, integer *ncvt, integer *
+ nru, integer *ncc, doublereal *d__, doublereal *e, doublereal *vt,
+ integer *ldvt, doublereal *u, integer *ldu, doublereal *c__, integer *
+ ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double pow_dd(doublereal *, doublereal *), sqrt(doublereal), d_sign(
+ doublereal *, doublereal *);
+
+ /* Local variables */
+ static doublereal f, g, h__;
+ static integer i__, j, m;
+ static doublereal r__, cs;
+ static integer ll;
+ static doublereal sn, mu;
+ static integer nm1, nm12, nm13, lll;
+ static doublereal eps, sll, tol, abse;
+ static integer idir;
+ static doublereal abss;
+ static integer oldm;
+ static doublereal cosl;
+ static integer isub, iter;
+ static doublereal unfl, sinl, cosr, smin, smax, sinr;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *), dlas2_(
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ static doublereal oldcs;
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *);
+ static integer oldll;
+ static doublereal shift, sigmn, oldsn;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer maxit;
+ static doublereal sminl, sigmx;
+ static logical lower;
+ extern /* Subroutine */ int dlasq1_(integer *, doublereal *, doublereal *,
+ doublereal *, integer *), dlasv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), xerbla_(char *,
+ integer *);
+ static doublereal sminoa, thresh;
+ static logical rotate;
+ static doublereal sminlo, tolmul;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DBDSQR computes the singular value decomposition (SVD) of a real
+ N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P'
+ denotes the transpose of P), where S is a diagonal matrix with
+ non-negative diagonal elements (the singular values of B), and Q
+ and P are orthogonal matrices.
+
+ The routine computes S, and optionally computes U * Q, P' * VT,
+ or Q' * C, for given real input matrices U, VT, and C.
+
+ See "Computing Small Singular Values of Bidiagonal Matrices With
+ Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan,
+ LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11,
+ no. 5, pp. 873-912, Sept 1990) and
+ "Accurate singular values and differential qd algorithms," by
+ B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics
+ Department, University of California at Berkeley, July 1992
+ for a detailed description of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': B is upper bidiagonal;
+ = 'L': B is lower bidiagonal.
+
+ N (input) INTEGER
+ The order of the matrix B. N >= 0.
+
+ NCVT (input) INTEGER
+ The number of columns of the matrix VT. NCVT >= 0.
+
+ NRU (input) INTEGER
+ The number of rows of the matrix U. NRU >= 0.
+
+ NCC (input) INTEGER
+ The number of columns of the matrix C. NCC >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the n diagonal elements of the bidiagonal matrix B.
+ On exit, if INFO=0, the singular values of B in decreasing
+ order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the elements of E contain the
+ offdiagonal elements of the bidiagonal matrix whose SVD
+ is desired. On normal exit (INFO = 0), E is destroyed.
+ If the algorithm does not converge (INFO > 0), D and E
+ will contain the diagonal and superdiagonal elements of a
+ bidiagonal matrix orthogonally equivalent to the one given
+ as input. E(N) is used for workspace.
+
+ VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT)
+ On entry, an N-by-NCVT matrix VT.
+ On exit, VT is overwritten by P' * VT.
+ VT is not referenced if NCVT = 0.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT.
+ LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0.
+
+ U (input/output) DOUBLE PRECISION array, dimension (LDU, N)
+ On entry, an NRU-by-N matrix U.
+ On exit, U is overwritten by U * Q.
+ U is not referenced if NRU = 0.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= max(1,NRU).
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC)
+ On entry, an N-by-NCC matrix C.
+ On exit, C is overwritten by Q' * C.
+ C is not referenced if NCC = 0.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C.
+ LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: If INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm did not converge; D and E contain the
+ elements of a bidiagonal matrix which is orthogonally
+ similar to the input matrix B; if INFO = i, i
+ elements of E have not converged to zero.
+
+ Internal Parameters
+ ===================
+
+ TOLMUL DOUBLE PRECISION, default = max(10,min(100,EPS**(-1/8)))
+ TOLMUL controls the convergence criterion of the QR loop.
+ If it is positive, TOLMUL*EPS is the desired relative
+ precision in the computed singular values.
+ If it is negative, abs(TOLMUL*EPS*sigma_max) is the
+ desired absolute accuracy in the computed singular
+ values (corresponds to relative accuracy
+ abs(TOLMUL*EPS) in the largest singular value.
+ abs(TOLMUL) should be between 1 and 1/EPS, and preferably
+ between 10 (for fast convergence) and .1/EPS
+ (for there to be some accuracy in the results).
+ Default is to lose at either one eighth or 2 of the
+ available decimal digits in each computed singular value
+ (whichever is smaller).
+
+ MAXITR INTEGER, default = 6
+ MAXITR controls the maximum number of passes of the
+ algorithm through its inner loop. The algorithms stops
+ (and so fails to converge) if the number of passes
+ through the inner loop exceeds MAXITR*N**2.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if ((! lsame_(uplo, "U") && ! lower)) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ncvt < 0) {
+ *info = -3;
+ } else if (*nru < 0) {
+ *info = -4;
+ } else if (*ncc < 0) {
+ *info = -5;
+ } else if ((*ncvt == 0 && *ldvt < 1) || (*ncvt > 0 && *ldvt < max(1,*n)))
+ {
+ *info = -9;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -11;
+ } else if ((*ncc == 0 && *ldc < 1) || (*ncc > 0 && *ldc < max(1,*n))) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DBDSQR", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ goto L160;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
+
+/* If no singular vectors desired, use qd algorithm */
+
+ if (! rotate) {
+ dlasq1_(n, &d__[1], &e[1], &work[1], info);
+ return 0;
+ }
+
+ nm1 = *n - 1;
+ nm12 = nm1 + nm1;
+ nm13 = nm12 + nm1;
+ idir = 0;
+
+/* Get machine constants */
+
+ eps = EPSILON;
+ unfl = SAFEMINIMUM;
+
+/*
+ If matrix lower bidiagonal, rotate to be upper bidiagonal
+ by applying Givens rotations on the left
+*/
+
+ if (lower) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ work[i__] = cs;
+ work[nm1 + i__] = sn;
+/* L10: */
+ }
+
+/* Update singular vectors if desired */
+
+ if (*nru > 0) {
+ dlasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset],
+ ldu);
+ }
+ if (*ncc > 0) {
+ dlasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset],
+ ldc);
+ }
+ }
+
+/*
+ Compute singular values to relative accuracy TOL
+ (By setting TOL to be negative, algorithm will compute
+ singular values to absolute accuracy ABS(TOL)*norm(input matrix))
+
+ Computing MAX
+ Computing MIN
+*/
+ d__3 = 100., d__4 = pow_dd(&eps, &c_b94);
+ d__1 = 10., d__2 = min(d__3,d__4);
+ tolmul = max(d__1,d__2);
+ tol = tolmul * eps;
+
+/* Compute approximate maximum, minimum singular values */
+
+ smax = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = smax, d__3 = (d__1 = d__[i__], abs(d__1));
+ smax = max(d__2,d__3);
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = smax, d__3 = (d__1 = e[i__], abs(d__1));
+ smax = max(d__2,d__3);
+/* L30: */
+ }
+ sminl = 0.;
+ if (tol >= 0.) {
+
+/* Relative accuracy desired */
+
+ sminoa = abs(d__[1]);
+ if (sminoa == 0.) {
+ goto L50;
+ }
+ mu = sminoa;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ mu = (d__2 = d__[i__], abs(d__2)) * (mu / (mu + (d__1 = e[i__ - 1]
+ , abs(d__1))));
+ sminoa = min(sminoa,mu);
+ if (sminoa == 0.) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ sminoa /= sqrt((doublereal) (*n));
+/* Computing MAX */
+ d__1 = tol * sminoa, d__2 = *n * 6 * *n * unfl;
+ thresh = max(d__1,d__2);
+ } else {
+
+/*
+ Absolute accuracy desired
+
+ Computing MAX
+*/
+ d__1 = abs(tol) * smax, d__2 = *n * 6 * *n * unfl;
+ thresh = max(d__1,d__2);
+ }
+
+/*
+ Prepare for main iteration loop for the singular values
+ (MAXIT is the maximum number of passes through the inner
+ loop permitted before nonconvergence signalled.)
+*/
+
+ maxit = *n * 6 * *n;
+ iter = 0;
+ oldll = -1;
+ oldm = -1;
+
+/* M points to last element of unconverged part of matrix */
+
+ m = *n;
+
+/* Begin main iteration loop */
+
+L60:
+
+/* Check for convergence or exceeding iteration count */
+
+ if (m <= 1) {
+ goto L160;
+ }
+ if (iter > maxit) {
+ goto L200;
+ }
+
+/* Find diagonal block of matrix to work on */
+
+ if ((tol < 0. && (d__1 = d__[m], abs(d__1)) <= thresh)) {
+ d__[m] = 0.;
+ }
+ smax = (d__1 = d__[m], abs(d__1));
+ smin = smax;
+ i__1 = m - 1;
+ for (lll = 1; lll <= i__1; ++lll) {
+ ll = m - lll;
+ abss = (d__1 = d__[ll], abs(d__1));
+ abse = (d__1 = e[ll], abs(d__1));
+ if ((tol < 0. && abss <= thresh)) {
+ d__[ll] = 0.;
+ }
+ if (abse <= thresh) {
+ goto L80;
+ }
+ smin = min(smin,abss);
+/* Computing MAX */
+ d__1 = max(smax,abss);
+ smax = max(d__1,abse);
+/* L70: */
+ }
+ ll = 0;
+ goto L90;
+L80:
+ e[ll] = 0.;
+
+/* Matrix splits since E(LL) = 0 */
+
+ if (ll == m - 1) {
+
+/* Convergence of bottom singular value, return to top of loop */
+
+ --m;
+ goto L60;
+ }
+L90:
+ ++ll;
+
+/* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */
+
+ if (ll == m - 1) {
+
+/* 2 by 2 block, handle separately */
+
+ dlasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr,
+ &sinl, &cosl);
+ d__[m - 1] = sigmx;
+ e[m - 1] = 0.;
+ d__[m] = sigmn;
+
+/* Compute singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ drot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, &
+ cosr, &sinr);
+ }
+ if (*nru > 0) {
+ drot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], &
+ c__1, &cosl, &sinl);
+ }
+ if (*ncc > 0) {
+ drot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, &
+ cosl, &sinl);
+ }
+ m += -2;
+ goto L60;
+ }
+
+/*
+ If working on new submatrix, choose shift direction
+ (from larger end diagonal element towards smaller)
+*/
+
+ if (ll > oldm || m < oldll) {
+ if ((d__1 = d__[ll], abs(d__1)) >= (d__2 = d__[m], abs(d__2))) {
+
+/* Chase bulge from top (big end) to bottom (small end) */
+
+ idir = 1;
+ } else {
+
+/* Chase bulge from bottom (big end) to top (small end) */
+
+ idir = 2;
+ }
+ }
+
+/* Apply convergence tests */
+
+ if (idir == 1) {
+
+/*
+ Run convergence test in forward direction
+ First apply standard test to bottom of matrix
+*/
+
+ if ((d__2 = e[m - 1], abs(d__2)) <= abs(tol) * (d__1 = d__[m], abs(
+ d__1)) || (tol < 0. && (d__3 = e[m - 1], abs(d__3)) <= thresh)
+ ) {
+ e[m - 1] = 0.;
+ goto L60;
+ }
+
+ if (tol >= 0.) {
+
+/*
+ If relative accuracy desired,
+ apply convergence criterion forward
+*/
+
+ mu = (d__1 = d__[ll], abs(d__1));
+ sminl = mu;
+ i__1 = m - 1;
+ for (lll = ll; lll <= i__1; ++lll) {
+ if ((d__1 = e[lll], abs(d__1)) <= tol * mu) {
+ e[lll] = 0.;
+ goto L60;
+ }
+ sminlo = sminl;
+ mu = (d__2 = d__[lll + 1], abs(d__2)) * (mu / (mu + (d__1 = e[
+ lll], abs(d__1))));
+ sminl = min(sminl,mu);
+/* L100: */
+ }
+ }
+
+ } else {
+
+/*
+ Run convergence test in backward direction
+ First apply standard test to top of matrix
+*/
+
+ if ((d__2 = e[ll], abs(d__2)) <= abs(tol) * (d__1 = d__[ll], abs(d__1)
+ ) || (tol < 0. && (d__3 = e[ll], abs(d__3)) <= thresh)) {
+ e[ll] = 0.;
+ goto L60;
+ }
+
+ if (tol >= 0.) {
+
+/*
+ If relative accuracy desired,
+ apply convergence criterion backward
+*/
+
+ mu = (d__1 = d__[m], abs(d__1));
+ sminl = mu;
+ i__1 = ll;
+ for (lll = m - 1; lll >= i__1; --lll) {
+ if ((d__1 = e[lll], abs(d__1)) <= tol * mu) {
+ e[lll] = 0.;
+ goto L60;
+ }
+ sminlo = sminl;
+ mu = (d__2 = d__[lll], abs(d__2)) * (mu / (mu + (d__1 = e[lll]
+ , abs(d__1))));
+ sminl = min(sminl,mu);
+/* L110: */
+ }
+ }
+ }
+ oldll = ll;
+ oldm = m;
+
+/*
+ Compute shift. First, test if shifting would ruin relative
+ accuracy, and if so set the shift to zero.
+
+ Computing MAX
+*/
+ d__1 = eps, d__2 = tol * .01;
+ if ((tol >= 0. && *n * tol * (sminl / smax) <= max(d__1,d__2))) {
+
+/* Use a zero shift to avoid loss of relative accuracy */
+
+ shift = 0.;
+ } else {
+
+/* Compute the shift from 2-by-2 block at end of matrix */
+
+ if (idir == 1) {
+ sll = (d__1 = d__[ll], abs(d__1));
+ dlas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
+ } else {
+ sll = (d__1 = d__[m], abs(d__1));
+ dlas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
+ }
+
+/* Test if shift negligible, and if so set to zero */
+
+ if (sll > 0.) {
+/* Computing 2nd power */
+ d__1 = shift / sll;
+ if (d__1 * d__1 < eps) {
+ shift = 0.;
+ }
+ }
+ }
+
+/* Increment iteration count */
+
+ iter = iter + m - ll;
+
+/* If SHIFT = 0, do simplified QR iteration */
+
+ if (shift == 0.) {
+ if (idir == 1) {
+
+/*
+ Chase bulge from top to bottom
+ Save cosines and sines for later singular vector updates
+*/
+
+ cs = 1.;
+ oldcs = 1.;
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ d__1 = d__[i__] * cs;
+ dlartg_(&d__1, &e[i__], &cs, &sn, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = oldsn * r__;
+ }
+ d__1 = oldcs * r__;
+ d__2 = d__[i__ + 1] * sn;
+ dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
+ work[i__ - ll + 1] = cs;
+ work[i__ - ll + 1 + nm1] = sn;
+ work[i__ - ll + 1 + nm12] = oldcs;
+ work[i__ - ll + 1 + nm13] = oldsn;
+/* L120: */
+ }
+ h__ = d__[m] * cs;
+ d__[m] = h__ * oldcs;
+ e[m - 1] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
+ + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
+ + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) {
+ e[m - 1] = 0.;
+ }
+
+ } else {
+
+/*
+ Chase bulge from bottom to top
+ Save cosines and sines for later singular vector updates
+*/
+
+ cs = 1.;
+ oldcs = 1.;
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ d__1 = d__[i__] * cs;
+ dlartg_(&d__1, &e[i__ - 1], &cs, &sn, &r__);
+ if (i__ < m) {
+ e[i__] = oldsn * r__;
+ }
+ d__1 = oldcs * r__;
+ d__2 = d__[i__ - 1] * sn;
+ dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
+ work[i__ - ll] = cs;
+ work[i__ - ll + nm1] = -sn;
+ work[i__ - ll + nm12] = oldcs;
+ work[i__ - ll + nm13] = -oldsn;
+/* L130: */
+ }
+ h__ = d__[ll] * cs;
+ d__[ll] = h__ * oldcs;
+ e[ll] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
+ u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[ll], abs(d__1)) <= thresh) {
+ e[ll] = 0.;
+ }
+ }
+ } else {
+
+/* Use nonzero shift */
+
+ if (idir == 1) {
+
+/*
+ Chase bulge from top to bottom
+ Save cosines and sines for later singular vector updates
+*/
+
+ f = ((d__1 = d__[ll], abs(d__1)) - shift) * (d_sign(&c_b15, &d__[
+ ll]) + shift / d__[ll]);
+ g = e[ll];
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ dlartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__];
+ e[i__] = cosr * e[i__] - sinr * d__[i__];
+ g = sinr * d__[i__ + 1];
+ d__[i__ + 1] = cosr * d__[i__ + 1];
+ dlartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__] + sinl * d__[i__ + 1];
+ d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
+ if (i__ < m - 1) {
+ g = sinl * e[i__ + 1];
+ e[i__ + 1] = cosl * e[i__ + 1];
+ }
+ work[i__ - ll + 1] = cosr;
+ work[i__ - ll + 1 + nm1] = sinr;
+ work[i__ - ll + 1 + nm12] = cosl;
+ work[i__ - ll + 1 + nm13] = sinl;
+/* L140: */
+ }
+ e[m - 1] = f;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
+ + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
+ + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) {
+ e[m - 1] = 0.;
+ }
+
+ } else {
+
+/*
+ Chase bulge from bottom to top
+ Save cosines and sines for later singular vector updates
+*/
+
+ f = ((d__1 = d__[m], abs(d__1)) - shift) * (d_sign(&c_b15, &d__[m]
+ ) + shift / d__[m]);
+ g = e[m - 1];
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ dlartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ < m) {
+ e[i__] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__ - 1];
+ e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
+ g = sinr * d__[i__ - 1];
+ d__[i__ - 1] = cosr * d__[i__ - 1];
+ dlartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
+ d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
+ if (i__ > ll + 1) {
+ g = sinl * e[i__ - 2];
+ e[i__ - 2] = cosl * e[i__ - 2];
+ }
+ work[i__ - ll] = cosr;
+ work[i__ - ll + nm1] = -sinr;
+ work[i__ - ll + nm12] = cosl;
+ work[i__ - ll + nm13] = -sinl;
+/* L150: */
+ }
+ e[ll] = f;
+
+/* Test convergence */
+
+ if ((d__1 = e[ll], abs(d__1)) <= thresh) {
+ e[ll] = 0.;
+ }
+
+/* Update singular vectors if desired */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
+ u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+ }
+ }
+
+/* QR iteration finished, go back and check convergence */
+
+ goto L60;
+
+/* All singular values converged, so make them positive */
+
+L160:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] < 0.) {
+ d__[i__] = -d__[i__];
+
+/* Change sign of singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ dscal_(ncvt, &c_b151, &vt[i__ + vt_dim1], ldvt);
+ }
+ }
+/* L170: */
+ }
+
+/*
+ Sort the singular values into decreasing order (insertion sort on
+ singular values, but only one transposition per singular vector)
+*/
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I) */
+
+ isub = 1;
+ smin = d__[1];
+ i__2 = *n + 1 - i__;
+ for (j = 2; j <= i__2; ++j) {
+ if (d__[j] <= smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L180: */
+ }
+ if (isub != *n + 1 - i__) {
+
+/* Swap singular values and vectors */
+
+ d__[isub] = d__[*n + 1 - i__];
+ d__[*n + 1 - i__] = smin;
+ if (*ncvt > 0) {
+ dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ +
+ vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) *
+ u_dim1 + 1], &c__1);
+ }
+ if (*ncc > 0) {
+ dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ +
+ c_dim1], ldc);
+ }
+ }
+/* L190: */
+ }
+ goto L220;
+
+/* Maximum number of iterations exceeded, failure to converge */
+
+L200:
+ *info = 0;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L210: */
+ }
+L220:
+ return 0;
+
+/* End of DBDSQR */
+
+} /* dbdsqr_ */
+
+/* Subroutine */ int dgebak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, doublereal *scale, integer *m, doublereal *v, integer *
+ ldv, integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ static integer i__, k;
+ static doublereal s;
+ static integer ii;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static logical leftv;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical rightv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DGEBAK forms the right or left eigenvectors of a real general matrix
+ by backward transformation on the computed eigenvectors of the
+ balanced matrix output by DGEBAL.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ Specifies the type of backward transformation required:
+ = 'N', do nothing, return immediately;
+ = 'P', do backward transformation for permutation only;
+ = 'S', do backward transformation for scaling only;
+ = 'B', do backward transformations for both permutation and
+ scaling.
+ JOB must be the same as the argument JOB supplied to DGEBAL.
+
+ SIDE (input) CHARACTER*1
+ = 'R': V contains right eigenvectors;
+ = 'L': V contains left eigenvectors.
+
+ N (input) INTEGER
+ The number of rows of the matrix V. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ The integers ILO and IHI determined by DGEBAL.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ SCALE (input) DOUBLE PRECISION array, dimension (N)
+ Details of the permutation and scaling factors, as returned
+ by DGEBAL.
+
+ M (input) INTEGER
+ The number of columns of the matrix V. M >= 0.
+
+ V (input/output) DOUBLE PRECISION array, dimension (LDV,M)
+ On entry, the matrix of right or left eigenvectors to be
+ transformed, as returned by DHSEIN or DTREVC.
+ On exit, V is overwritten by the transformed eigenvectors.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V. LDV >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Decode and Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --scale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1 * 1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S"))
+ && ! lsame_(job, "B"))) {
+ *info = -1;
+ } else if ((! rightv && ! leftv)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*ldv < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if (lsame_(job, "S") || lsame_(job, "B")) {
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = scale[i__];
+ dscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = 1. / scale[i__];
+ dscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+
+ }
+
+/*
+ Backward permutation
+
+ For I = ILO-1 step -1 until 1,
+ IHI+1 step 1 until N do --
+*/
+
+L30:
+ if (lsame_(job, "P") || lsame_(job, "B")) {
+ if (rightv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if ((i__ >= *ilo && i__ <= *ihi)) {
+ goto L40;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = (integer) scale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+ }
+
+ if (leftv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if ((i__ >= *ilo && i__ <= *ihi)) {
+ goto L50;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = (integer) scale[i__];
+ if (k == i__) {
+ goto L50;
+ }
+ dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L50:
+ ;
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DGEBAK */
+
+} /* dgebak_ */
+
+/* Subroutine */ int dgebal_(char *job, integer *n, doublereal *a, integer *
+ lda, integer *ilo, integer *ihi, doublereal *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static doublereal c__, f, g;
+ static integer i__, j, k, l, m;
+ static doublereal r__, s, ca, ra;
+ static integer ica, ira, iexc;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static doublereal sfmin1, sfmin2, sfmax1, sfmax2;
+
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DGEBAL balances a general real matrix A. This involves, first,
+ permuting A by a similarity transformation to isolate eigenvalues
+ in the first 1 to ILO-1 and last IHI+1 to N elements on the
+ diagonal; and second, applying a diagonal similarity transformation
+ to rows and columns ILO to IHI to make the rows and columns as
+ close in norm as possible. Both steps are optional.
+
+ Balancing may reduce the 1-norm of the matrix, and improve the
+ accuracy of the computed eigenvalues and/or eigenvectors.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ Specifies the operations to be performed on A:
+ = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0
+ for i = 1,...,N;
+ = 'P': permute only;
+ = 'S': scale only;
+ = 'B': both permute and scale.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the input matrix A.
+ On exit, A is overwritten by the balanced matrix.
+ If JOB = 'N', A is not referenced.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ ILO (output) INTEGER
+ IHI (output) INTEGER
+ ILO and IHI are set to integers such that on exit
+ A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N.
+ If JOB = 'N' or 'S', ILO = 1 and IHI = N.
+
+ SCALE (output) DOUBLE PRECISION array, dimension (N)
+ Details of the permutations and scaling factors applied to
+ A. If P(j) is the index of the row and column interchanged
+ with row and column j and D(j) is the scaling factor
+ applied to row and column j, then
+ SCALE(j) = P(j) for j = 1,...,ILO-1
+ = D(j) for j = ILO,...,IHI
+ = P(j) for j = IHI+1,...,N.
+ The order in which the interchanges are made is N to IHI+1,
+ then 1 to ILO-1.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The permutations consist of row and column interchanges which put
+ the matrix in the form
+
+ ( T1 X Y )
+ P A P = ( 0 B Z )
+ ( 0 0 T2 )
+
+ where T1 and T2 are upper triangular matrices whose eigenvalues lie
+ along the diagonal. The column indices ILO and IHI mark the starting
+ and ending columns of the submatrix B. Balancing consists of applying
+ a diagonal similarity transformation inv(D) * B * D to make the
+ 1-norms of each row of B and its corresponding column nearly equal.
+ The output matrix is
+
+ ( T1 X*D Y )
+ ( 0 inv(D)*B*D inv(D)*Z ).
+ ( 0 0 T2 )
+
+ Information about the permutations P and the diagonal matrix D is
+ returned in the vector SCALE.
+
+ This subroutine is based on the EISPACK routine BALANC.
+
+ Modified by Tzu-Yi Chen, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --scale;
+
+ /* Function Body */
+ *info = 0;
+ if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S"))
+ && ! lsame_(job, "B"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBAL", &i__1);
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+
+ if (*n == 0) {
+ goto L210;
+ }
+
+ if (lsame_(job, "N")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scale[i__] = 1.;
+/* L10: */
+ }
+ goto L210;
+ }
+
+ if (lsame_(job, "S")) {
+ goto L120;
+ }
+
+/* Permutation to isolate eigenvalues if possible */
+
+ goto L50;
+
+/* Row and column exchange. */
+
+L20:
+ scale[m] = (doublereal) j;
+ if (j == m) {
+ goto L30;
+ }
+
+ dswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ i__1 = *n - k + 1;
+ dswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+
+L30:
+ switch (iexc) {
+ case 1: goto L40;
+ case 2: goto L80;
+ }
+
+/* Search for rows isolating an eigenvalue and push them down. */
+
+L40:
+ if (l == 1) {
+ goto L210;
+ }
+ --l;
+
+L50:
+ for (j = l; j >= 1; --j) {
+
+ i__1 = l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ == j) {
+ goto L60;
+ }
+ if (a[j + i__ * a_dim1] != 0.) {
+ goto L70;
+ }
+L60:
+ ;
+ }
+
+ m = l;
+ iexc = 1;
+ goto L20;
+L70:
+ ;
+ }
+
+ goto L90;
+
+/* Search for columns isolating an eigenvalue and push them left. */
+
+L80:
+ ++k;
+
+L90:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+
+ i__2 = l;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (i__ == j) {
+ goto L100;
+ }
+ if (a[i__ + j * a_dim1] != 0.) {
+ goto L110;
+ }
+L100:
+ ;
+ }
+
+ m = k;
+ iexc = 2;
+ goto L20;
+L110:
+ ;
+ }
+
+L120:
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ scale[i__] = 1.;
+/* L130: */
+ }
+
+ if (lsame_(job, "P")) {
+ goto L210;
+ }
+
+/*
+ Balance the submatrix in rows K to L.
+
+ Iterative loop for norm reduction
+*/
+
+ sfmin1 = SAFEMINIMUM / PRECISION;
+ sfmax1 = 1. / sfmin1;
+ sfmin2 = sfmin1 * 8.;
+ sfmax2 = 1. / sfmin2;
+L140:
+ noconv = FALSE_;
+
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ c__ = 0.;
+ r__ = 0.;
+
+ i__2 = l;
+ for (j = k; j <= i__2; ++j) {
+ if (j == i__) {
+ goto L150;
+ }
+ c__ += (d__1 = a[j + i__ * a_dim1], abs(d__1));
+ r__ += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+L150:
+ ;
+ }
+ ica = idamax_(&l, &a[i__ * a_dim1 + 1], &c__1);
+ ca = (d__1 = a[ica + i__ * a_dim1], abs(d__1));
+ i__2 = *n - k + 1;
+ ira = idamax_(&i__2, &a[i__ + k * a_dim1], lda);
+ ra = (d__1 = a[i__ + (ira + k - 1) * a_dim1], abs(d__1));
+
+/* Guard against zero C or R due to underflow. */
+
+ if (c__ == 0. || r__ == 0.) {
+ goto L200;
+ }
+ g = r__ / 8.;
+ f = 1.;
+ s = c__ + r__;
+L160:
+/* Computing MAX */
+ d__1 = max(f,c__);
+/* Computing MIN */
+ d__2 = min(r__,g);
+ if (c__ >= g || max(d__1,ca) >= sfmax2 || min(d__2,ra) <= sfmin2) {
+ goto L170;
+ }
+ f *= 8.;
+ c__ *= 8.;
+ ca *= 8.;
+ r__ /= 8.;
+ g /= 8.;
+ ra /= 8.;
+ goto L160;
+
+L170:
+ g = c__ / 8.;
+L180:
+/* Computing MIN */
+ d__1 = min(f,c__), d__1 = min(d__1,g);
+ if (g < r__ || max(r__,ra) >= sfmax2 || min(d__1,ca) <= sfmin2) {
+ goto L190;
+ }
+ f /= 8.;
+ c__ /= 8.;
+ g /= 8.;
+ ca /= 8.;
+ r__ *= 8.;
+ ra *= 8.;
+ goto L180;
+
+/* Now balance. */
+
+L190:
+ if (c__ + r__ >= s * .95) {
+ goto L200;
+ }
+ if ((f < 1. && scale[i__] < 1.)) {
+ if (f * scale[i__] <= sfmin1) {
+ goto L200;
+ }
+ }
+ if ((f > 1. && scale[i__] > 1.)) {
+ if (scale[i__] >= sfmax1 / f) {
+ goto L200;
+ }
+ }
+ g = 1. / f;
+ scale[i__] *= f;
+ noconv = TRUE_;
+
+ i__2 = *n - k + 1;
+ dscal_(&i__2, &g, &a[i__ + k * a_dim1], lda);
+ dscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1);
+
+L200:
+ ;
+ }
+
+ if (noconv) {
+ goto L140;
+ }
+
+L210:
+ *ilo = k;
+ *ihi = l;
+
+ return 0;
+
+/* End of DGEBAL */
+
+} /* dgebal_ */
+
+/* Subroutine */ int dgebd2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal *
+ taup, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DGEBD2 reduces a real general m by n matrix A to upper or lower
+ bidiagonal form B by an orthogonal transformation: Q' * A * P = B.
+
+ If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns in the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the m by n general matrix to be reduced.
+ On exit,
+ if m >= n, the diagonal and the first superdiagonal are
+ overwritten with the upper bidiagonal matrix B; the
+ elements below the diagonal, with the array TAUQ, represent
+ the orthogonal matrix Q as a product of elementary
+ reflectors, and the elements above the first superdiagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors;
+ if m < n, the diagonal and the first subdiagonal are
+ overwritten with the lower bidiagonal matrix B; the
+ elements below the first subdiagonal, with the array TAUQ,
+ represent the orthogonal matrix Q as a product of
+ elementary reflectors, and the elements above the diagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The diagonal elements of the bidiagonal matrix B:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (min(M,N)-1)
+ The off-diagonal elements of the bidiagonal matrix B:
+ if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
+ if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
+
+ TAUQ (output) DOUBLE PRECISION array dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix Q. See Further Details.
+
+ TAUP (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix P. See Further Details.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (max(M,N))
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ If m >= n,
+
+ Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i);
+ u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n,
+
+ Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);
+ u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The contents of A on exit are illustrated by the following examples:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
+ ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
+ ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
+ ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
+ ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
+ ( v1 v2 v3 v4 v5 )
+
+ where d and e denote diagonal and off-diagonal elements of B, vi
+ denotes an element of the vector defining H(i), and ui an element of
+ the vector defining G(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBD2", &i__1);
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ *
+ a_dim1], &c__1, &tauq[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tauq[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = d__[i__];
+
+ if (i__ < *n) {
+
+/*
+ Generate elementary reflector G(i) to annihilate
+ A(i,i+2:n)
+*/
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
+ i__3,*n) * a_dim1], lda, &taup[i__]);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+ a[i__ + (i__ + 1) * a_dim1] = 1.;
+
+/* Apply G(i) to A(i+1:m,i+1:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dlarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ a[i__ + (i__ + 1) * a_dim1] = e[i__];
+ } else {
+ taup[i__] = 0.;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) *
+ a_dim1], lda, &taup[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Apply G(i) to A(i+1:m,i:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+/* Computing MIN */
+ i__4 = i__ + 1;
+ dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &taup[
+ i__], &a[min(i__4,*m) + i__ * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = d__[i__];
+
+ if (i__ < *m) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(i+2:m,i)
+*/
+
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) +
+ i__ * a_dim1], &c__1, &tauq[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Apply H(i) to A(i+1:m,i+1:n) from the left */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &tauq[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ a[i__ + 1 + i__ * a_dim1] = e[i__];
+ } else {
+ tauq[i__] = 0.;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of DGEBD2 */
+
+} /* dgebd2_ */
+
+/* Subroutine */ int dgebrd_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal *
+ taup, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, nb, nx;
+ static doublereal ws;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer nbmin, iinfo, minmn;
+ extern /* Subroutine */ int dgebd2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *), dlabrd_(integer *, integer *, integer *
+ , doublereal *, integer *, doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *)
+ , xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwrkx, ldwrky, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DGEBRD reduces a general real M-by-N matrix A to upper or lower
+ bidiagonal form B by an orthogonal transformation: Q**T * A * P = B.
+
+ If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns in the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N general matrix to be reduced.
+ On exit,
+ if m >= n, the diagonal and the first superdiagonal are
+ overwritten with the upper bidiagonal matrix B; the
+ elements below the diagonal, with the array TAUQ, represent
+ the orthogonal matrix Q as a product of elementary
+ reflectors, and the elements above the first superdiagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors;
+ if m < n, the diagonal and the first subdiagonal are
+ overwritten with the lower bidiagonal matrix B; the
+ elements below the first subdiagonal, with the array TAUQ,
+ represent the orthogonal matrix Q as a product of
+ elementary reflectors, and the elements above the diagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The diagonal elements of the bidiagonal matrix B:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (min(M,N)-1)
+ The off-diagonal elements of the bidiagonal matrix B:
+ if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
+ if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
+
+ TAUQ (output) DOUBLE PRECISION array dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix Q. See Further Details.
+
+ TAUP (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix P. See Further Details.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK. LWORK >= max(1,M,N).
+ For optimum performance LWORK >= (M+N)*NB, where NB
+ is the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ If m >= n,
+
+ Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i);
+ u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n,
+
+ Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);
+ u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The contents of A on exit are illustrated by the following examples:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
+ ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
+ ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
+ ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
+ ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
+ ( v1 v2 v3 v4 v5 )
+
+ where d and e denote diagonal and off-diagonal elements of B, vi
+ denotes an element of the vector defining H(i), and ui an element of
+ the vector defining G(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = max(i__1,i__2);
+ lwkopt = (*m + *n) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if ((*lwork < max(i__1,*n) && ! lquery)) {
+ *info = -10;
+ }
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ ws = (doublereal) max(*m,*n);
+ ldwrkx = *m;
+ ldwrky = *n;
+
+ if ((nb > 1 && nb < minmn)) {
+
+/*
+ Set the crossover point NX.
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "DGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+
+/* Determine when to switch from blocked to unblocked code. */
+
+ if (nx < minmn) {
+ ws = (doublereal) ((*m + *n) * nb);
+ if ((doublereal) (*lwork) < ws) {
+
+/*
+ Not enough work space for the optimal NB, consider using
+ a smaller block size.
+*/
+
+ nbmin = ilaenv_(&c__2, "DGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ if (*lwork >= (*m + *n) * nbmin) {
+ nb = *lwork / (*m + *n);
+ } else {
+ nb = 1;
+ nx = minmn;
+ }
+ }
+ }
+ } else {
+ nx = minmn;
+ }
+
+ i__1 = minmn - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/*
+ Reduce rows and columns i:i+nb-1 to bidiagonal form and return
+ the matrices X and Y which are needed to update the unreduced
+ part of the matrix
+*/
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ + 1;
+ dlabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[
+ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx
+ * nb + 1], &ldwrky);
+
+/*
+ Update the trailing submatrix A(i+nb:m,i+nb:n), using an update
+ of the form A := A - V*Y' - X*U'
+*/
+
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ dgemm_("No transpose", "Transpose", &i__3, &i__4, &nb, &c_b151, &a[
+ i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb + nb + 1], &
+ ldwrky, &c_b15, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ dgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &c_b151, &
+ work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, &
+ c_b15, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy diagonal and off-diagonal elements of B back into A */
+
+ if (*m >= *n) {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + j * a_dim1] = d__[j];
+ a[j + (j + 1) * a_dim1] = e[j];
+/* L10: */
+ }
+ } else {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + j * a_dim1] = d__[j];
+ a[j + 1 + j * a_dim1] = e[j];
+/* L20: */
+ }
+ }
+/* L30: */
+ }
+
+/* Use unblocked code to reduce the remainder of the matrix */
+
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ dgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &
+ tauq[i__], &taup[i__], &work[1], &iinfo);
+ work[1] = ws;
+ return 0;
+
+/* End of DGEBRD */
+
+} /* dgebrd_ */
+
+/* Subroutine */ int dgeev_(char *jobvl, char *jobvr, integer *n, doublereal *
+ a, integer *lda, doublereal *wr, doublereal *wi, doublereal *vl,
+ integer *ldvl, doublereal *vr, integer *ldvr, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3, i__4;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, k;
+ static doublereal r__, cs, sn;
+ static integer ihi;
+ static doublereal scl;
+ static integer ilo;
+ static doublereal dum[1], eps;
+ static integer ibal;
+ static char side[1];
+ static integer maxb;
+ static doublereal anrm;
+ static integer ierr, itau;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer iwrk, nout;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebak_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dgebal_(char *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, integer *);
+ static logical scalea;
+
+ static doublereal cscale;
+ extern doublereal dlange_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlartg_(doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), xerbla_(char *, integer *);
+ static logical select[1];
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static doublereal bignum;
+ extern /* Subroutine */ int dorghr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dhseqr_(char *, char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dtrevc_(char *, char *, logical *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, doublereal *, integer *);
+ static integer minwrk, maxwrk;
+ static logical wantvl;
+ static doublereal smlnum;
+ static integer hswork;
+ static logical lquery, wantvr;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 8, 1999
+
+
+ Purpose
+ =======
+
+ DGEEV computes for an N-by-N real nonsymmetric matrix A, the
+ eigenvalues and, optionally, the left and/or right eigenvectors.
+
+ The right eigenvector v(j) of A satisfies
+ A * v(j) = lambda(j) * v(j)
+ where lambda(j) is its eigenvalue.
+ The left eigenvector u(j) of A satisfies
+ u(j)**H * A = lambda(j) * u(j)**H
+ where u(j)**H denotes the conjugate transpose of u(j).
+
+ The computed eigenvectors are normalized to have Euclidean norm
+ equal to 1 and largest component real.
+
+ Arguments
+ =========
+
+ JOBVL (input) CHARACTER*1
+ = 'N': left eigenvectors of A are not computed;
+ = 'V': left eigenvectors of A are computed.
+
+ JOBVR (input) CHARACTER*1
+ = 'N': right eigenvectors of A are not computed;
+ = 'V': right eigenvectors of A are computed.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the N-by-N matrix A.
+ On exit, A has been overwritten.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ WR (output) DOUBLE PRECISION array, dimension (N)
+ WI (output) DOUBLE PRECISION array, dimension (N)
+ WR and WI contain the real and imaginary parts,
+ respectively, of the computed eigenvalues. Complex
+ conjugate pairs of eigenvalues appear consecutively
+ with the eigenvalue having the positive imaginary part
+ first.
+
+ VL (output) DOUBLE PRECISION array, dimension (LDVL,N)
+ If JOBVL = 'V', the left eigenvectors u(j) are stored one
+ after another in the columns of VL, in the same order
+ as their eigenvalues.
+ If JOBVL = 'N', VL is not referenced.
+ If the j-th eigenvalue is real, then u(j) = VL(:,j),
+ the j-th column of VL.
+ If the j-th and (j+1)-st eigenvalues form a complex
+ conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and
+ u(j+1) = VL(:,j) - i*VL(:,j+1).
+
+ LDVL (input) INTEGER
+ The leading dimension of the array VL. LDVL >= 1; if
+ JOBVL = 'V', LDVL >= N.
+
+ VR (output) DOUBLE PRECISION array, dimension (LDVR,N)
+ If JOBVR = 'V', the right eigenvectors v(j) are stored one
+ after another in the columns of VR, in the same order
+ as their eigenvalues.
+ If JOBVR = 'N', VR is not referenced.
+ If the j-th eigenvalue is real, then v(j) = VR(:,j),
+ the j-th column of VR.
+ If the j-th and (j+1)-st eigenvalues form a complex
+ conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and
+ v(j+1) = VR(:,j) - i*VR(:,j+1).
+
+ LDVR (input) INTEGER
+ The leading dimension of the array VR. LDVR >= 1; if
+ JOBVR = 'V', LDVR >= N.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,3*N), and
+ if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good
+ performance, LWORK must generally be larger.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = i, the QR algorithm failed to compute all the
+ eigenvalues, and no eigenvectors have been computed;
+ elements i+1:N of WR and WI contain eigenvalues which
+ have converged.
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1 * 1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1 * 1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ if ((! wantvl && ! lsame_(jobvl, "N"))) {
+ *info = -1;
+ } else if ((! wantvr && ! lsame_(jobvr, "N"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldvl < 1 || (wantvl && *ldvl < *n)) {
+ *info = -9;
+ } else if (*ldvr < 1 || (wantvr && *ldvr < *n)) {
+ *info = -11;
+ }
+
+/*
+ Compute workspace
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.
+ HSWORK refers to the workspace preferred by DHSEQR, as
+ calculated below. HSWORK is computed assuming ILO=1 and IHI=N,
+ the worst case.)
+*/
+
+ minwrk = 1;
+ if ((*info == 0 && (*lwork >= 1 || lquery))) {
+ maxwrk = ((*n) << (1)) + *n * ilaenv_(&c__1, "DGEHRD", " ", n, &c__1,
+ n, &c__0, (ftnlen)6, (ftnlen)1);
+ if ((! wantvl && ! wantvr)) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3;
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ilaenv_(&c__8, "DHSEQR", "EN", n, &c__1, n, &c_n1, (ftnlen)
+ 6, (ftnlen)2);
+ maxb = max(i__1,2);
+/*
+ Computing MIN
+ Computing MAX
+*/
+ i__3 = 2, i__4 = ilaenv_(&c__4, "DHSEQR", "EN", n, &c__1, n, &
+ c_n1, (ftnlen)6, (ftnlen)2);
+ i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
+ k = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = k * (k + 2), i__2 = (*n) << (1);
+ hswork = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *n +
+ hswork;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n) << (2);
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + (*n - 1) * ilaenv_(&c__1,
+ "DORGHR", " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ilaenv_(&c__8, "DHSEQR", "SV", n, &c__1, n, &c_n1, (ftnlen)
+ 6, (ftnlen)2);
+ maxb = max(i__1,2);
+/*
+ Computing MIN
+ Computing MAX
+*/
+ i__3 = 2, i__4 = ilaenv_(&c__4, "DHSEQR", "SV", n, &c__1, n, &
+ c_n1, (ftnlen)6, (ftnlen)2);
+ i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
+ k = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = k * (k + 2), i__2 = (*n) << (1);
+ hswork = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *n +
+ hswork;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n) << (2);
+ maxwrk = max(i__1,i__2);
+ }
+ work[1] = (doublereal) maxwrk;
+ }
+ if ((*lwork < minwrk && ! lquery)) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = PRECISION;
+ smlnum = SAFEMINIMUM;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if ((anrm > 0. && anrm < smlnum)) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ dlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/*
+ Balance the matrix
+ (Workspace: need N)
+*/
+
+ ibal = 1;
+ dgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
+
+/*
+ Reduce to upper Hessenberg form
+ (Workspace: need 3*N, prefer 2*N+N*NB)
+*/
+
+ itau = ibal + *n;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ dgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvl) {
+
+/*
+ Want left eigenvectors
+ Copy Householder vectors to VL
+*/
+
+ *(unsigned char *)side = 'L';
+ dlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/*
+ Generate orthogonal matrix in VL
+ (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB)
+*/
+
+ i__1 = *lwork - iwrk + 1;
+ dorghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/*
+ Perform QR iteration, accumulating Schur vectors in VL
+ (Workspace: need N+1, prefer N+HSWORK (see comments) )
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vl[vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/*
+ Want left and right eigenvectors
+ Copy Schur vectors to VR
+*/
+
+ *(unsigned char *)side = 'B';
+ dlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/*
+ Want right eigenvectors
+ Copy Householder vectors to VR
+*/
+
+ *(unsigned char *)side = 'R';
+ dlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/*
+ Generate orthogonal matrix in VR
+ (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB)
+*/
+
+ i__1 = *lwork - iwrk + 1;
+ dorghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/*
+ Perform QR iteration, accumulating Schur vectors in VR
+ (Workspace: need N+1, prefer N+HSWORK (see comments) )
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/*
+ Compute eigenvalues only
+ (Workspace: need N+1, prefer N+HSWORK (see comments) )
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from DHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if (wantvl || wantvr) {
+
+/*
+ Compute left and/or right eigenvectors
+ (Workspace: need 4*N)
+*/
+
+ dtrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr);
+ }
+
+ if (wantvl) {
+
+/*
+ Undo balancing of left eigenvectors
+ (Workspace: need N)
+*/
+
+ dgebak_("B", "L", n, &ilo, &ihi, &work[ibal], n, &vl[vl_offset], ldvl,
+ &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.) {
+ scl = 1. / dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.) {
+ d__1 = dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ scl = 1. / dlapy2_(&d__1, &d__2);
+ dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ d__1 = vl[k + i__ * vl_dim1];
+/* Computing 2nd power */
+ d__2 = vl[k + (i__ + 1) * vl_dim1];
+ work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2;
+/* L10: */
+ }
+ k = idamax_(n, &work[iwrk], &c__1);
+ dlartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1],
+ &cs, &sn, &r__);
+ drot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) *
+ vl_dim1 + 1], &c__1, &cs, &sn);
+ vl[k + (i__ + 1) * vl_dim1] = 0.;
+ }
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/*
+ Undo balancing of right eigenvectors
+ (Workspace: need N)
+*/
+
+ dgebak_("B", "R", n, &ilo, &ihi, &work[ibal], n, &vr[vr_offset], ldvr,
+ &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.) {
+ scl = 1. / dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.) {
+ d__1 = dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ scl = 1. / dlapy2_(&d__1, &d__2);
+ dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ d__1 = vr[k + i__ * vr_dim1];
+/* Computing 2nd power */
+ d__2 = vr[k + (i__ + 1) * vr_dim1];
+ work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2;
+/* L30: */
+ }
+ k = idamax_(n, &work[iwrk], &c__1);
+ dlartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1],
+ &cs, &sn, &r__);
+ drot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) *
+ vr_dim1 + 1], &c__1, &cs, &sn);
+ vr[k + (i__ + 1) * vr_dim1] = 0.;
+ }
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info +
+ 1], &i__2, &ierr);
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info +
+ 1], &i__2, &ierr);
+ if (*info > 0) {
+ i__1 = ilo - 1;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1],
+ n, &ierr);
+ i__1 = ilo - 1;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1],
+ n, &ierr);
+ }
+ }
+
+ work[1] = (doublereal) maxwrk;
+ return 0;
+
+/* End of DGEEV */
+
+} /* dgeev_ */
+
+/* Subroutine */ int dgehd2_(integer *n, integer *ilo, integer *ihi,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__;
+ static doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DGEHD2 reduces a real general matrix A to upper Hessenberg form H by
+ an orthogonal similarity transformation: Q' * A * Q = H .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that A is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to DGEBAL; otherwise they should be
+ set to 1 and N respectively. See Further Details.
+ 1 <= ILO <= IHI <= max(1,N).
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the n by n general matrix to be reduced.
+ On exit, the upper triangle and the first subdiagonal of A
+ are overwritten with the upper Hessenberg matrix H, and the
+ elements below the first subdiagonal, with the array TAU,
+ represent the orthogonal matrix Q as a product of elementary
+ reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) DOUBLE PRECISION array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of (ihi-ilo) elementary
+ reflectors
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
+ exit in A(i+2:ihi,i), and tau in TAU(i).
+
+ The contents of A are illustrated by the following example, with
+ n = 7, ilo = 2 and ihi = 6:
+
+ on entry, on exit,
+
+ ( a a a a a a a ) ( a a h h h h a )
+ ( a a a a a a ) ( a h h h h a )
+ ( a a a a a a ) ( h h h h h h )
+ ( a a a a a a ) ( v2 h h h h h )
+ ( a a a a a a ) ( v2 v3 h h h h )
+ ( a a a a a a ) ( v2 v3 v4 h h h )
+ ( a ) ( a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEHD2", &i__1);
+ return 0;
+ }
+
+ i__1 = *ihi - 1;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+
+/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */
+
+ i__2 = *ihi - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ aii = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
+
+ i__2 = *ihi - i__;
+ dlarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);
+
+/* Apply H(i) to A(i+1:ihi,i+1:n) from the left */
+
+ i__2 = *ihi - i__;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);
+
+ a[i__ + 1 + i__ * a_dim1] = aii;
+/* L10: */
+ }
+
+ return 0;
+
+/* End of DGEHD2 */
+
+} /* dgehd2_ */
+
+/* Subroutine */ int dgehrd_(integer *n, integer *ilo, integer *ihi,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__;
+ static doublereal t[4160] /* was [65][64] */;
+ static integer ib;
+ static doublereal ei;
+ static integer nb, nh, nx, iws;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int dgehd2_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dlahrd_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DGEHRD reduces a real general matrix A to upper Hessenberg form H by
+ an orthogonal similarity transformation: Q' * A * Q = H .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that A is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to DGEBAL; otherwise they should be
+ set to 1 and N respectively. See Further Details.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the N-by-N general matrix to be reduced.
+ On exit, the upper triangle and the first subdiagonal of A
+ are overwritten with the upper Hessenberg matrix H, and the
+ elements below the first subdiagonal, with the array TAU,
+ represent the orthogonal matrix Q as a product of elementary
+ reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) DOUBLE PRECISION array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to
+ zero.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of (ihi-ilo) elementary
+ reflectors
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
+ exit in A(i+2:ihi,i), and tau in TAU(i).
+
+ The contents of A are illustrated by the following example, with
+ n = 7, ilo = 2 and ihi = 6:
+
+ on entry, on exit,
+
+ ( a a a a a a a ) ( a a h h h h a )
+ ( a a a a a a ) ( a h h h h a )
+ ( a a a a a a ) ( h h h h h h )
+ ( a a a a a a ) ( v2 h h h h h )
+ ( a a a a a a ) ( v2 v3 h h h h )
+ ( a a a a a a ) ( v2 v3 v4 h h h )
+ ( a ) ( a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = min(i__1,i__2);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if ((*lwork < max(1,*n) && ! lquery)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEHRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.;
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = max(1,*ihi); i__ <= i__1; ++i__) {
+ tau[i__] = 0.;
+/* L20: */
+ }
+
+/* Quick return if possible */
+
+ nh = *ihi - *ilo + 1;
+ if (nh <= 1) {
+ work[1] = 1.;
+ return 0;
+ }
+
+/*
+ Determine the block size.
+
+ Computing MIN
+*/
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = min(i__1,i__2);
+ nbmin = 2;
+ iws = 1;
+ if ((nb > 1 && nb < nh)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code
+ (last block is always handled by unblocked code).
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "DGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < nh) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ iws = *n * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: determine the
+ minimum value of NB, and reduce NB or force use of
+ unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEHRD", " ", n, ilo, ihi, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ if (*lwork >= *n * nbmin) {
+ nb = *lwork / *n;
+ } else {
+ nb = 1;
+ }
+ }
+ }
+ }
+ ldwork = *n;
+
+ if (nb < nbmin || nb >= nh) {
+
+/* Use unblocked code below */
+
+ i__ = *ilo;
+
+ } else {
+
+/* Use blocked code */
+
+ i__1 = *ihi - 1 - nx;
+ i__2 = nb;
+ for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *ihi - i__;
+ ib = min(i__3,i__4);
+
+/*
+ Reduce columns i:i+ib-1 to Hessenberg form, returning the
+ matrices V and T of the block reflector H = I - V*T*V'
+ which performs the reduction, and also the matrix Y = A*V*T
+*/
+
+ dlahrd_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, &
+ c__65, &work[1], &ldwork);
+
+/*
+ Apply the block reflector H to A(1:ihi,i+ib:ihi) from the
+ right, computing A := A - Y * V'. V(i+ib,ib-1) must be set
+ to 1.
+*/
+
+ ei = a[i__ + ib + (i__ + ib - 1) * a_dim1];
+ a[i__ + ib + (i__ + ib - 1) * a_dim1] = 1.;
+ i__3 = *ihi - i__ - ib + 1;
+ dgemm_("No transpose", "Transpose", ihi, &i__3, &ib, &c_b151, &
+ work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &
+ c_b15, &a[(i__ + ib) * a_dim1 + 1], lda);
+ a[i__ + ib + (i__ + ib - 1) * a_dim1] = ei;
+
+/*
+ Apply the block reflector H to A(i+1:ihi,i+ib:n) from the
+ left
+*/
+
+ i__3 = *ihi - i__;
+ i__4 = *n - i__ - ib + 1;
+ dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
+ i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &c__65, &a[
+ i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &ldwork);
+/* L30: */
+ }
+ }
+
+/* Use unblocked code to reduce the rest of the matrix */
+
+ dgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ work[1] = (doublereal) iws;
+
+ return 0;
+
+/* End of DGEHRD */
+
+} /* dgehrd_ */
+
+/* Subroutine */ int dgelq2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, k;
+ static doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DGELQ2 computes an LQ factorization of a real m by n matrix A:
+ A = L * Q.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, the elements on and below the diagonal of the array
+ contain the m by min(m,n) lower trapezoidal matrix L (L is
+ lower triangular if m <= n); the elements above the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (M)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(k) . . . H(2) H(1), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1]
+ , lda, &tau[i__]);
+ if (i__ < *m) {
+
+/* Apply H(i) to A(i+1:m,i:n) from the right */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[
+ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of DGELQ2 */
+
+} /* dgelq2_ */
+
+/* Subroutine */ int dgelqf_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dgelq2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *), dlarfb_(char *,
+ char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DGELQF computes an LQ factorization of a real M-by-N matrix A:
+ A = L * Q.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and below the diagonal of the array
+ contain the m-by-min(m,n) lower trapezoidal matrix L (L is
+ lower triangular if m <= n); the elements above the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,M).
+ For optimum performance LWORK >= M*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(k) . . . H(2) H(1), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *m * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if ((*lwork < max(1,*m) && ! lquery)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if ((nb > 1 && nb < k)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DGELQF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGELQF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (((nb >= nbmin && nb < k) && nx < k)) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/*
+ Compute the LQ factorization of the current block
+ A(i:i+ib-1,i:n)
+*/
+
+ i__3 = *n - i__ + 1;
+ dgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *m) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__3 = *n - i__ + 1;
+ dlarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i+ib:m,i:n) from the right */
+
+ i__3 = *m - i__ - ib + 1;
+ i__4 = *n - i__ + 1;
+ dlarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3,
+ &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ dgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+ , &iinfo);
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DGELQF */
+
+} /* dgelqf_ */
+
+/* Subroutine */ int dgelsd_(integer *m, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *
+ s, doublereal *rcond, integer *rank, doublereal *work, integer *lwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ static integer ie, il, mm;
+ static doublereal eps, anrm, bnrm;
+ static integer itau, nlvl, iascl, ibscl;
+ static doublereal sfmin;
+ static integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebrd_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlalsd_(char *, integer *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, integer *), dlascl_(char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *), dgeqrf_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static doublereal bignum;
+ extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ static integer wlalsd;
+ extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ static integer minwrk, maxwrk;
+ static doublereal smlnum;
+ static logical lquery;
+ static integer smlsiz;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DGELSD computes the minimum-norm solution to a real linear least
+ squares problem:
+ minimize 2-norm(| b - A*x |)
+ using the singular value decomposition (SVD) of A. A is an M-by-N
+ matrix which may be rank-deficient.
+
+ Several right hand side vectors b and solution vectors x can be
+ handled in a single call; they are stored as the columns of the
+ M-by-NRHS right hand side matrix B and the N-by-NRHS solution
+ matrix X.
+
+ The problem is solved in three steps:
+ (1) Reduce the coefficient matrix A to bidiagonal form with
+ Householder transformations, reducing the original problem
+ into a "bidiagonal least squares problem" (BLS)
+ (2) Solve the BLS using a divide and conquer approach.
+ (3) Apply back all the Householder tranformations to solve
+ the original least squares problem.
+
+ The effective rank of A is determined by treating as zero those
+ singular values which are less than RCOND times the largest singular
+ value.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrices B and X. NRHS >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, A has been destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
+ On entry, the M-by-NRHS right hand side matrix B.
+ On exit, B is overwritten by the N-by-NRHS solution
+ matrix X. If m >= n and RANK = n, the residual
+ sum-of-squares for the solution in the i-th column is given
+ by the sum of squares of elements n+1:m in that column.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,max(M,N)).
+
+ S (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The singular values of A in decreasing order.
+ The condition number of A in the 2-norm = S(1)/S(min(m,n)).
+
+ RCOND (input) DOUBLE PRECISION
+ RCOND is used to determine the effective rank of A.
+ Singular values S(i) <= RCOND*S(1) are treated as zero.
+ If RCOND < 0, machine precision is used instead.
+
+ RANK (output) INTEGER
+ The effective rank of A, i.e., the number of singular values
+ which are greater than RCOND*S(1).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK must be at least 1.
+ The exact minimum amount of workspace needed depends on M,
+ N and NRHS. As long as LWORK is at least
+ 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2,
+ if M is greater than or equal to N or
+ 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2,
+ if M is less than N, the code will execute correctly.
+ SMLSIZ is returned by ILAENV and is equal to the maximum
+ size of the subproblems at the bottom of the computation
+ tree (usually about 25), and
+ NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
+ For good performance, LWORK should generally be larger.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ IWORK (workspace) INTEGER array, dimension (LIWORK)
+ LIWORK >= 3 * MINMN * NLVL + 11 * MINMN,
+ where MINMN = MIN( M,N ).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: the algorithm for computing the SVD failed to converge;
+ if INFO = i, i off-diagonal elements of an intermediate
+ bidiagonal form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input arguments.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ mnthr = ilaenv_(&c__6, "DGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+ smlsiz = ilaenv_(&c__9, "DGELSD", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ Compute workspace.
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.)
+*/
+
+ minwrk = 1;
+ minmn = max(1,minmn);
+/* Computing MAX */
+ i__1 = (integer) (log((doublereal) minmn / (doublereal) (smlsiz + 1)) /
+ log(2.)) + 1;
+ nlvl = max(i__1,0);
+
+ if (*info == 0) {
+ maxwrk = 0;
+ mm = *m;
+ if ((*m >= *n && *m >= mnthr)) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "DORMQR", "LT",
+ m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/*
+ Path 1 - overdetermined or exactly determined.
+
+ Computing MAX
+*/
+ i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1, "DGEBRD"
+ , " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "DORMBR",
+ "QLT", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1, "DORMBR",
+ "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *n * 9 + ((*n) << (1)) * smlsiz + ((*n) << (3)) * nlvl +
+ *n * *nrhs + i__1 * i__1;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,i__2),
+ i__2 = *n * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *m * 9 + ((*m) << (1)) * smlsiz + ((*m) << (3)) * nlvl +
+ *m * *nrhs + i__1 * i__1;
+ if (*n >= mnthr) {
+
+/*
+ Path 2a - underdetermined, with many more columns
+ than rows.
+*/
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1,
+ &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + ((*m) << (1))
+ * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *nrhs *
+ ilaenv_(&c__1, "DORMBR", "QLT", m, nrhs, m, &c_n1, (
+ ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + (*m - 1) *
+ ilaenv_(&c__1, "DORMBR", "PLN", m, nrhs, m, &c_n1, (
+ ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (1));
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "DORMLQ",
+ "LT", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + wlalsd;
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1, "DORMBR"
+ , "QLT", m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR",
+ "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *m, i__1 = max(i__1,i__2),
+ i__2 = *m * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ minwrk = min(minwrk,maxwrk);
+ work[1] = (doublereal) maxwrk;
+ if ((*lwork < minwrk && ! lquery)) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ goto L10;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters. */
+
+ eps = PRECISION;
+ sfmin = SAFEMINIMUM;
+ smlnum = sfmin / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if ((anrm > 0. && anrm < smlnum)) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb);
+ dlaset_("F", &minmn, &c__1, &c_b29, &c_b29, &s[1], &c__1);
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = dlange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if ((bnrm > 0. && bnrm < smlnum)) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure certain entries of B are zero. */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1], ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R.
+ (Workspace: need 2*N, prefer N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/*
+ Multiply B by transpose(Q).
+ (Workspace: need N+NRHS, prefer N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A.
+ (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of R.
+ (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ dlalsd_("U", &smlsiz, n, nrhs, &s[1], &work[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = ((*m) << (1)) - 4, i__1 = max(i__1,i__2), i__1 =
+ max(i__1,*nrhs), i__2 = *n - *m * 3;
+ if ((*n >= mnthr && *lwork >= ((*m) << (2)) + *m * *m + max(i__1,i__2)
+ )) {
+
+/*
+ Path 2a - underdetermined, with many more columns than rows
+ and sufficient workspace for an efficient algorithm.
+*/
+
+ ldwork = *m;
+/*
+ Computing MAX
+ Computing MAX
+*/
+ i__3 = *m, i__4 = ((*m) << (1)) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = ((*m) << (2)) + *m * *lda + max(i__3,i__4), i__2 = *m * *
+ lda + *m + *m * *nrhs;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/*
+ Compute A=L*Q.
+ (Workspace: need 2*M, prefer M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwork], &
+ ldwork);
+ ie = il + ldwork * *m;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL).
+ (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of L.
+ (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ dlalsd_("U", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1],
+ ldb);
+ nwork = itau + *m;
+
+/*
+ Multiply transpose(Q) by B.
+ (Workspace: need M+NRHS, prefer M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A.
+ (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors.
+ (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ dlalsd_("L", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ dlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ work[1] = (doublereal) maxwrk;
+ return 0;
+
+/* End of DGELSD */
+
+} /* dgelsd_ */
+
+/* Subroutine */ int dgeqr2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, k;
+ static doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DGEQR2 computes a QR factorization of a real m by n matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(m,n) by n upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEQR2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1]
+ , &c__1, &tau[i__]);
+ if (i__ < *n) {
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of DGEQR2 */
+
+} /* dgeqr2_ */
+
+/* Subroutine */ int dgeqrf_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dgeqr2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *), dlarfb_(char *,
+ char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DGEQRF computes a QR factorization of a real M-by-N matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(M,N)-by-N upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of min(m,n) elementary reflectors (see Further
+ Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if ((*lwork < max(1,*n) && ! lquery)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if ((nb > 1 && nb < k)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DGEQRF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEQRF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (((nb >= nbmin && nb < k) && nx < k)) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/*
+ Compute the QR factorization of the current block
+ A(i:m,i:i+ib-1)
+*/
+
+ i__3 = *m - i__ + 1;
+ dgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *n) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__3 = *m - i__ + 1;
+ dlarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i:m,i+ib:n) from the left */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ - ib + 1;
+ dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
+ i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &work[ib
+ + 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ dgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+ , &iinfo);
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DGEQRF */
+
+} /* dgeqrf_ */
+
+/* Subroutine */ int dgesdd_(char *jobz, integer *m, integer *n, doublereal *
+ a, integer *lda, doublereal *s, doublereal *u, integer *ldu,
+ doublereal *vt, integer *ldvt, doublereal *work, integer *lwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, ie, il, ir, iu, blk;
+ static doublereal dum[1], eps;
+ static integer ivt, iscl;
+ static doublereal anrm;
+ static integer idum[1], ierr, itau;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ static integer chunk, minmn, wrkbl, itaup, itauq, mnthr;
+ static logical wntqa;
+ static integer nwork;
+ static logical wntqn, wntqo, wntqs;
+ extern /* Subroutine */ int dbdsdc_(char *, char *, integer *, doublereal
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dgebrd_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ static integer bdspac;
+ extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *),
+ dgeqrf_(integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *), dlacpy_(char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *), dorgbr_(char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static doublereal bignum;
+ extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dorglq_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dorgqr_(integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+ static integer ldwrkl, ldwrkr, minwrk, ldwrku, maxwrk, ldwkvt;
+ static doublereal smlnum;
+ static logical wntqas, lquery;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DGESDD computes the singular value decomposition (SVD) of a real
+ M-by-N matrix A, optionally computing the left and right singular
+ vectors. If singular vectors are desired, it uses a
+ divide-and-conquer algorithm.
+
+ The SVD is written
+
+ A = U * SIGMA * transpose(V)
+
+ where SIGMA is an M-by-N matrix which is zero except for its
+ min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and
+ V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA
+ are the singular values of A; they are real and non-negative, and
+ are returned in descending order. The first min(m,n) columns of
+ U and V are the left and right singular vectors of A.
+
+ Note that the routine returns VT = V**T, not V.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ JOBZ (input) CHARACTER*1
+ Specifies options for computing all or part of the matrix U:
+ = 'A': all M columns of U and all N rows of V**T are
+ returned in the arrays U and VT;
+ = 'S': the first min(M,N) columns of U and the first
+ min(M,N) rows of V**T are returned in the arrays U
+ and VT;
+ = 'O': If M >= N, the first N columns of U are overwritten
+ on the array A and all rows of V**T are returned in
+ the array VT;
+ otherwise, all columns of U are returned in the
+ array U and the first M rows of V**T are overwritten
+ in the array VT;
+ = 'N': no columns of U or rows of V**T are computed.
+
+ M (input) INTEGER
+ The number of rows of the input matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the input matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit,
+ if JOBZ = 'O', A is overwritten with the first N columns
+ of U (the left singular vectors, stored
+ columnwise) if M >= N;
+ A is overwritten with the first M rows
+ of V**T (the right singular vectors, stored
+ rowwise) otherwise.
+ if JOBZ .ne. 'O', the contents of A are destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ S (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The singular values of A, sorted so that S(i) >= S(i+1).
+
+ U (output) DOUBLE PRECISION array, dimension (LDU,UCOL)
+ UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N;
+ UCOL = min(M,N) if JOBZ = 'S'.
+ If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M
+ orthogonal matrix U;
+ if JOBZ = 'S', U contains the first min(M,N) columns of U
+ (the left singular vectors, stored columnwise);
+ if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= 1; if
+ JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M.
+
+ VT (output) DOUBLE PRECISION array, dimension (LDVT,N)
+ If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the
+ N-by-N orthogonal matrix V**T;
+ if JOBZ = 'S', VT contains the first min(M,N) rows of
+ V**T (the right singular vectors, stored rowwise);
+ if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= 1; if
+ JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N;
+ if JOBZ = 'S', LDVT >= min(M,N).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK;
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= 1.
+ If JOBZ = 'N',
+ LWORK >= 3*min(M,N) + max(max(M,N),6*min(M,N)).
+ If JOBZ = 'O',
+ LWORK >= 3*min(M,N)*min(M,N) +
+ max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)).
+ If JOBZ = 'S' or 'A'
+ LWORK >= 3*min(M,N)*min(M,N) +
+ max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)).
+ For good performance, LWORK should generally be larger.
+ If LWORK < 0 but other input arguments are legal, WORK(1)
+ returns the optimal LWORK.
+
+ IWORK (workspace) INTEGER array, dimension (8*min(M,N))
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: DBDSDC did not converge, updating process failed.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ mnthr = (integer) (minmn * 11. / 6.);
+ wntqa = lsame_(jobz, "A");
+ wntqs = lsame_(jobz, "S");
+ wntqas = wntqa || wntqs;
+ wntqo = lsame_(jobz, "O");
+ wntqn = lsame_(jobz, "N");
+ minwrk = 1;
+ maxwrk = 1;
+ lquery = *lwork == -1;
+
+ if (! (wntqa || wntqs || wntqo || wntqn)) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldu < 1 || (wntqas && *ldu < *m) || ((wntqo && *m < *n) && *
+ ldu < *m)) {
+ *info = -8;
+ } else if (*ldvt < 1 || (wntqa && *ldvt < *n) || (wntqs && *ldvt < minmn)
+ || ((wntqo && *m >= *n) && *ldvt < *n)) {
+ *info = -10;
+ }
+
+/*
+ Compute workspace
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.)
+*/
+
+ if (((*info == 0 && *m > 0) && *n > 0)) {
+ if (*m >= *n) {
+
+/* Compute space needed for DBDSDC */
+
+ if (wntqn) {
+ bdspac = *n * 7;
+ } else {
+ bdspac = *n * 3 * *n + ((*n) << (2));
+ }
+ if (*m >= mnthr) {
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n;
+ maxwrk = max(i__1,i__2);
+ minwrk = bdspac + *n;
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + ((*n) << (1)) * *n;
+ minwrk = bdspac + ((*n) << (1)) * *n + *n * 3;
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *n * *n;
+ minwrk = bdspac + *n * *n + *n * 3;
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "DORGQR",
+ " ", m, m, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *n * *n;
+ minwrk = bdspac + *n * *n + *n * 3;
+ }
+ } else {
+
+/* Path 5 (M at least N, but not much larger) */
+
+ wrkbl = *n * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ if (wntqn) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ } else if (wntqo) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *n;
+/* Computing MAX */
+ i__1 = *m, i__2 = *n * *n + bdspac;
+ minwrk = *n * 3 + max(i__1,i__2);
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ }
+ }
+ } else {
+
+/* Compute space needed for DBDSDC */
+
+ if (wntqn) {
+ bdspac = *m * 7;
+ } else {
+ bdspac = *m * 3 * *m + ((*m) << (2));
+ }
+ if (*n >= mnthr) {
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m;
+ maxwrk = max(i__1,i__2);
+ minwrk = bdspac + *m;
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + ((*m) << (1)) * *m;
+ minwrk = bdspac + ((*m) << (1)) * *m + *m * 3;
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *m;
+ minwrk = bdspac + *m * *m + *m * 3;
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "DORGLQ",
+ " ", n, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *m;
+ minwrk = bdspac + *m * *m + *m * 3;
+ }
+ } else {
+
+/* Path 5t (N greater than M, but not much larger) */
+
+ wrkbl = *m * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ if (wntqn) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ } else if (wntqo) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *n;
+/* Computing MAX */
+ i__1 = *n, i__2 = *m * *m + bdspac;
+ minwrk = *m * 3 + max(i__1,i__2);
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ }
+ }
+ }
+ work[1] = (doublereal) maxwrk;
+ }
+
+ if ((*lwork < minwrk && ! lquery)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGESDD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ if (*lwork >= 1) {
+ work[1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = PRECISION;
+ smlnum = sqrt(SAFEMINIMUM) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if ((anrm > 0. && anrm < smlnum)) {
+ iscl = 1;
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/*
+ A has at least as many rows as columns. If A has sufficiently
+ more rows than columns, first reduce using the QR
+ decomposition (if sufficient workspace available)
+*/
+
+ if (*m >= mnthr) {
+
+ if (wntqn) {
+
+/*
+ Path 1 (M much larger than N, JOBZ='N')
+ No singular vectors to be computed
+*/
+
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (Workspace: need 2*N, prefer N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out below R */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A
+ (Workspace: need 4*N, prefer 3*N+2*N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nwork = ie + *n;
+
+/*
+ Perform bidiagonal SVD, computing singular values only
+ (Workspace: need N+BDSPAC)
+*/
+
+ dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/*
+ Path 2 (M much larger than N, JOBZ = 'O')
+ N left singular vectors to be overwritten on A and
+ N right singular vectors to be computed in VT
+*/
+
+ ir = 1;
+
+/* WORK(IR) is LDWRKR by N */
+
+ if (*lwork >= *lda * *n + *n * *n + *n * 3 + bdspac) {
+ ldwrkr = *lda;
+ } else {
+ ldwrkr = (*lwork - *n * *n - *n * 3 - bdspac) / *n;
+ }
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &work[ir + 1], &
+ ldwrkr);
+
+/*
+ Generate Q in A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in VT, copying result to WORK(IR)
+ (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* WORK(IU) is N by N */
+
+ iu = nwork;
+ nwork = iu + *n * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in WORK(IU) and computing right
+ singular vectors of bidiagonal matrix in VT
+ (Workspace: need N+N*N+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite WORK(IU) by left singular vectors of R
+ and VT by right singular vectors of R
+ (Workspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &work[iu], n, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Multiply Q in A by left singular vectors of R in
+ WORK(IU), storing result in WORK(IR) and copying to A
+ (Workspace: need 2*N*N, prefer N*N+M*N)
+*/
+
+ i__1 = *m;
+ i__2 = ldwrkr;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ dgemm_("N", "N", &chunk, n, n, &c_b15, &a[i__ + a_dim1],
+ lda, &work[iu], n, &c_b29, &work[ir], &ldwrkr);
+ dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Path 3 (M much larger than N, JOBZ='S')
+ N left singular vectors to be computed in U and
+ N right singular vectors to be computed in VT
+*/
+
+ ir = 1;
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ dlaset_("L", &i__2, &i__1, &c_b29, &c_b29, &work[ir + 1], &
+ ldwrkr);
+
+/*
+ Generate Q in A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in WORK(IR)
+ (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagoal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need N+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of R and VT
+ by right singular vectors of R
+ (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply Q in A by left singular vectors of R in
+ WORK(IR), storing result in U
+ (Workspace: need N*N)
+*/
+
+ dlacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
+ dgemm_("N", "N", m, n, n, &c_b15, &a[a_offset], lda, &work[ir]
+ , &ldwrkr, &c_b29, &u[u_offset], ldu);
+
+ } else if (wntqa) {
+
+/*
+ Path 4 (M much larger than N, JOBZ='A')
+ M left singular vectors to be computed in U and
+ N right singular vectors to be computed in VT
+*/
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ itau = iu + ldwrku * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R, copying result to U
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+/*
+ Generate Q in U
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+ i__2 = *lwork - nwork + 1;
+ dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
+ &i__2, &ierr);
+
+/* Produce R in A, zeroing out other entries */
+
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ dlaset_("L", &i__2, &i__1, &c_b29, &c_b29, &a[a_dim1 + 2],
+ lda);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A
+ (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in WORK(IU) and computing right
+ singular vectors of bidiagonal matrix in VT
+ (Workspace: need N+N*N+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite WORK(IU) by left singular vectors of R and VT
+ by right singular vectors of R
+ (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply Q in U by left singular vectors of R in
+ WORK(IU), storing result in A
+ (Workspace: need N*N)
+*/
+
+ dgemm_("N", "N", m, n, n, &c_b15, &u[u_offset], ldu, &work[iu]
+ , &ldwrku, &c_b29, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+ }
+
+ } else {
+
+/*
+ M .LT. MNTHR
+
+ Path 5 (M at least N, but not much larger)
+ Reduce to bidiagonal form without QR decomposition
+*/
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize A
+ (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/*
+ Perform bidiagonal SVD, only computing singular values
+ (Workspace: need N+BDSPAC)
+*/
+
+ dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ if (*lwork >= *m * *n + *n * 3 + bdspac) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ nwork = iu + ldwrku * *n;
+ dlaset_("F", m, n, &c_b29, &c_b29, &work[iu], &ldwrku);
+ } else {
+
+/* WORK( IU ) is N by N */
+
+ ldwrku = *n;
+ nwork = iu + ldwrku * *n;
+
+/* WORK(IR) is LDWRKR by N */
+
+ ir = nwork;
+ ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in WORK(IU) and computing right
+ singular vectors of bidiagonal matrix in VT
+ (Workspace: need N+N*N+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], &ldwrku, &
+ vt[vt_offset], ldvt, dum, idum, &work[nwork], &iwork[
+ 1], info);
+
+/*
+ Overwrite VT by right singular vectors of A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+ if (*lwork >= *m * *n + *n * 3 + bdspac) {
+
+/*
+ Overwrite WORK(IU) by left singular vectors of A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+
+/* Copy left singular vectors of A from WORK(IU) to A */
+
+ dlacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
+ } else {
+
+/*
+ Generate Q in A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Multiply Q in A by left singular vectors of
+ bidiagonal matrix in WORK(IU), storing result in
+ WORK(IR) and copying to A
+ (Workspace: need 2*N*N, prefer N*N+M*N)
+*/
+
+ i__2 = *m;
+ i__1 = ldwrkr;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ dgemm_("N", "N", &chunk, n, n, &c_b15, &a[i__ +
+ a_dim1], lda, &work[iu], &ldwrku, &c_b29, &
+ work[ir], &ldwrkr);
+ dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+ }
+
+ } else if (wntqs) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need N+BDSPAC)
+*/
+
+ dlaset_("F", m, n, &c_b29, &c_b29, &u[u_offset], ldu);
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need 3*N, prefer 2*N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else if (wntqa) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need N+BDSPAC)
+*/
+
+ dlaset_("F", m, m, &c_b29, &c_b29, &u[u_offset], ldu);
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Set the right corner of U to identity matrix */
+
+ i__1 = *m - *n;
+ i__2 = *m - *n;
+ dlaset_("F", &i__1, &i__2, &c_b29, &c_b15, &u[*n + 1 + (*n +
+ 1) * u_dim1], ldu);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need N*N+2*N+M, prefer N*N+2*N+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ } else {
+
+/*
+ A has more columns than rows. If A has sufficiently more
+ columns than rows, first reduce using the LQ decomposition (if
+ sufficient workspace available)
+*/
+
+ if (*n >= mnthr) {
+
+ if (wntqn) {
+
+/*
+ Path 1t (N much larger than M, JOBZ='N')
+ No singular vectors to be computed
+*/
+
+ itau = 1;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (Workspace: need 2*M, prefer M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out above L */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &a[((a_dim1) << (1)
+ ) + 1], lda);
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in A
+ (Workspace: need 4*M, prefer 3*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nwork = ie + *m;
+
+/*
+ Perform bidiagonal SVD, computing singular values only
+ (Workspace: need M+BDSPAC)
+*/
+
+ dbdsdc_("U", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/*
+ Path 2t (N much larger than M, JOBZ='O')
+ M right singular vectors to be overwritten on A and
+ M left singular vectors to be computed in U
+*/
+
+ ivt = 1;
+
+/* IVT is M by M */
+
+ il = ivt + *m * *m;
+ if (*lwork >= *m * *n + *m * *m + *m * 3 + bdspac) {
+
+/* WORK(IL) is M by N */
+
+ ldwrkl = *m;
+ chunk = *n;
+ } else {
+ ldwrkl = *m;
+ chunk = (*lwork - *m * *m) / *m;
+ }
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy L to WORK(IL), zeroing about above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwrkl],
+ &ldwrkl);
+
+/*
+ Generate Q in A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL)
+ (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U, and computing right singular
+ vectors of bidiagonal matrix in WORK(IVT)
+ (Workspace: need M+M*M+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], m, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of L and WORK(IVT)
+ by right singular vectors of L
+ (Workspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &work[ivt], m, &work[nwork], &i__1, &ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IVT) by Q
+ in A, storing result in WORK(IL) and copying to A
+ (Workspace: need 2*M*M, prefer M*M+M*N)
+*/
+
+ i__1 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ dgemm_("N", "N", m, &blk, m, &c_b15, &work[ivt], m, &a[
+ i__ * a_dim1 + 1], lda, &c_b29, &work[il], &
+ ldwrkl);
+ dlacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
+ + 1], lda);
+/* L30: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Path 3t (N much larger than M, JOBZ='S')
+ M right singular vectors to be computed in VT and
+ M left singular vectors to be computed in U
+*/
+
+ il = 1;
+
+/* WORK(IL) is M by M */
+
+ ldwrkl = *m;
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ dlaset_("U", &i__2, &i__1, &c_b29, &c_b29, &work[il + ldwrkl],
+ &ldwrkl);
+
+/*
+ Generate Q in A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IU), copying result to U
+ (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need M+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of L and VT
+ by right singular vectors of L
+ (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IL) by
+ Q in A, storing result in VT
+ (Workspace: need M*M)
+*/
+
+ dlacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
+ dgemm_("N", "N", m, n, m, &c_b15, &work[il], &ldwrkl, &a[
+ a_offset], lda, &c_b29, &vt[vt_offset], ldvt);
+
+ } else if (wntqa) {
+
+/*
+ Path 4t (N much larger than M, JOBZ='A')
+ N right singular vectors to be computed in VT and
+ M left singular vectors to be computed in U
+*/
+
+ ivt = 1;
+
+/* WORK(IVT) is M by M */
+
+ ldwkvt = *m;
+ itau = ivt + ldwkvt * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q, copying result to VT
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/*
+ Generate Q in VT
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
+ nwork], &i__2, &ierr);
+
+/* Produce L in A, zeroing out other entries */
+
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ dlaset_("U", &i__2, &i__1, &c_b29, &c_b29, &a[((a_dim1) << (1)
+ ) + 1], lda);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in A
+ (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in WORK(IVT)
+ (Workspace: need M+M*M+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
+ , info);
+
+/*
+ Overwrite U by left singular vectors of L and WORK(IVT)
+ by right singular vectors of L
+ (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, m, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IVT) by
+ Q in VT, storing result in A
+ (Workspace: need M*M)
+*/
+
+ dgemm_("N", "N", m, n, m, &c_b15, &work[ivt], &ldwkvt, &vt[
+ vt_offset], ldvt, &c_b29, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+ }
+
+ } else {
+
+/*
+ N .LT. MNTHR
+
+ Path 5t (N greater than M, but not much larger)
+ Reduce to bidiagonal form without LQ decomposition
+*/
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A
+ (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/*
+ Perform bidiagonal SVD, only computing singular values
+ (Workspace: need M+BDSPAC)
+*/
+
+ dbdsdc_("L", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+ } else if (wntqo) {
+ ldwkvt = *m;
+ ivt = nwork;
+ if (*lwork >= *m * *n + *m * 3 + bdspac) {
+
+/* WORK( IVT ) is M by N */
+
+ dlaset_("F", m, n, &c_b29, &c_b29, &work[ivt], &ldwkvt);
+ nwork = ivt + ldwkvt * *n;
+ } else {
+
+/* WORK( IVT ) is M by M */
+
+ nwork = ivt + ldwkvt * *m;
+ il = nwork;
+
+/* WORK(IL) is M by CHUNK */
+
+ chunk = (*lwork - *m * *m - *m * 3) / *m;
+ }
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in WORK(IVT)
+ (Workspace: need M*M+BDSPAC)
+*/
+
+ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
+ , info);
+
+/*
+ Overwrite U by left singular vectors of A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *m * 3 + bdspac) {
+
+/*
+ Overwrite WORK(IVT) by left singular vectors of A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
+ &ierr);
+
+/* Copy right singular vectors of A from WORK(IVT) to A */
+
+ dlacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
+ } else {
+
+/*
+ Generate P**T in A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Multiply Q in A by right singular vectors of
+ bidiagonal matrix in WORK(IVT), storing result in
+ WORK(IL) and copying to A
+ (Workspace: need 2*M*M, prefer M*M+M*N)
+*/
+
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ dgemm_("N", "N", m, &blk, m, &c_b15, &work[ivt], &
+ ldwkvt, &a[i__ * a_dim1 + 1], lda, &c_b29, &
+ work[il], m);
+ dlacpy_("F", m, &blk, &work[il], m, &a[i__ * a_dim1 +
+ 1], lda);
+/* L40: */
+ }
+ }
+ } else if (wntqs) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need M+BDSPAC)
+*/
+
+ dlaset_("F", m, n, &c_b29, &c_b29, &vt[vt_offset], ldvt);
+ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need 3*M, prefer 2*M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else if (wntqa) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need M+BDSPAC)
+*/
+
+ dlaset_("F", n, n, &c_b29, &c_b29, &vt[vt_offset], ldvt);
+ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Set the right corner of VT to identity matrix */
+
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ dlaset_("F", &i__1, &i__2, &c_b29, &c_b15, &vt[*m + 1 + (*m +
+ 1) * vt_dim1], ldvt);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need 2*M+N, prefer 2*M+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1] = (doublereal) maxwrk;
+
+ return 0;
+
+/* End of DGESDD */
+
+} /* dgesdd_ */
+
+/* Subroutine */ int dgesv_(integer *n, integer *nrhs, doublereal *a, integer
+ *lda, integer *ipiv, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int dgetrf_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *), dgetrs_(char *, integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DGESV computes the solution to a real system of linear equations
+ A * X = B,
+ where A is an N-by-N matrix and X and B are N-by-NRHS matrices.
+
+ The LU decomposition with partial pivoting and row interchanges is
+ used to factor A as
+ A = P * L * U,
+ where P is a permutation matrix, L is unit lower triangular, and U is
+ upper triangular. The factored form of A is then used to solve the
+ system of equations A * X = B.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of linear equations, i.e., the order of the
+ matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the N-by-N coefficient matrix A.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ IPIV (output) INTEGER array, dimension (N)
+ The pivot indices that define the permutation matrix P;
+ row i of the matrix was interchanged with row IPIV(i).
+
+ B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
+ On entry, the N-by-NRHS matrix of right hand side matrix B.
+ On exit, if INFO = 0, the N-by-NRHS solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, U(i,i) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, so the solution could not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGESV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of A. */
+
+ dgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ dgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[
+ b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of DGESV */
+
+} /* dgesv_ */
+
+/* Subroutine */ int dgetf2_(integer *m, integer *n, doublereal *a, integer *
+ lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer j, jp;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dscal_(integer *, doublereal *, doublereal *, integer
+ *), dswap_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1992
+
+
+ Purpose
+ =======
+
+ DGETF2 computes an LU factorization of a general m-by-n matrix A
+ using partial pivoting with row interchanges.
+
+ The factorization has the form
+ A = P * L * U
+ where P is a permutation matrix, L is lower triangular with unit
+ diagonal elements (lower trapezoidal if m > n), and U is upper
+ triangular (upper trapezoidal if m < n).
+
+ This is the right-looking Level 2 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the m by n matrix to be factored.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ IPIV (output) INTEGER array, dimension (min(M,N))
+ The pivot indices; for 1 <= i <= min(M,N), row i of the
+ matrix was interchanged with row IPIV(i).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+ > 0: if INFO = k, U(k,k) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, and division by zero will occur if it is used
+ to solve a system of equations.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGETF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot and test for singularity. */
+
+ i__2 = *m - j + 1;
+ jp = j - 1 + idamax_(&i__2, &a[j + j * a_dim1], &c__1);
+ ipiv[j] = jp;
+ if (a[jp + j * a_dim1] != 0.) {
+
+/* Apply the interchange to columns 1:N. */
+
+ if (jp != j) {
+ dswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
+ }
+
+/* Compute elements J+1:M of J-th column. */
+
+ if (j < *m) {
+ i__2 = *m - j;
+ d__1 = 1. / a[j + j * a_dim1];
+ dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+ } else if (*info == 0) {
+
+ *info = j;
+ }
+
+ if (j < min(*m,*n)) {
+
+/* Update trailing submatrix. */
+
+ i__2 = *m - j;
+ i__3 = *n - j;
+ dger_(&i__2, &i__3, &c_b151, &a[j + 1 + j * a_dim1], &c__1, &a[j
+ + (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1],
+ lda);
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of DGETF2 */
+
+} /* dgetf2_ */
+
+/* Subroutine */ int dgetrf_(integer *m, integer *n, doublereal *a, integer *
+ lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ static integer i__, j, jb, nb;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer iinfo;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dgetf2_(
+ integer *, integer *, doublereal *, integer *, integer *, integer
+ *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dlaswp_(integer *, doublereal *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DGETRF computes an LU factorization of a general M-by-N matrix A
+ using partial pivoting with row interchanges.
+
+ The factorization has the form
+ A = P * L * U
+ where P is a permutation matrix, L is lower triangular with unit
+ diagonal elements (lower trapezoidal if m > n), and U is upper
+ triangular (upper trapezoidal if m < n).
+
+ This is the right-looking Level 3 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N matrix to be factored.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ IPIV (output) INTEGER array, dimension (min(M,N))
+ The pivot indices; for 1 <= i <= min(M,N), row i of the
+ matrix was interchanged with row IPIV(i).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, U(i,i) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, and division by zero will occur if it is used
+ to solve a system of equations.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGETRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "DGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ if (nb <= 1 || nb >= min(*m,*n)) {
+
+/* Use unblocked code. */
+
+ dgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info);
+ } else {
+
+/* Use blocked code. */
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = min(*m,*n) - j + 1;
+ jb = min(i__3,nb);
+
+/*
+ Factor diagonal and subdiagonal blocks and test for exact
+ singularity.
+*/
+
+ i__3 = *m - j + 1;
+ dgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo);
+
+/* Adjust INFO and the pivot indices. */
+
+ if ((*info == 0 && iinfo > 0)) {
+ *info = iinfo + j - 1;
+ }
+/* Computing MIN */
+ i__4 = *m, i__5 = j + jb - 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = j - 1 + ipiv[i__];
+/* L10: */
+ }
+
+/* Apply interchanges to columns 1:J-1. */
+
+ i__3 = j - 1;
+ i__4 = j + jb - 1;
+ dlaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
+
+ if (j + jb <= *n) {
+
+/* Apply interchanges to columns J+JB:N. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j + jb - 1;
+ dlaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, &
+ ipiv[1], &c__1);
+
+/* Compute block row of U. */
+
+ i__3 = *n - j - jb + 1;
+ dtrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &
+ c_b15, &a[j + j * a_dim1], lda, &a[j + (j + jb) *
+ a_dim1], lda);
+ if (j + jb <= *m) {
+
+/* Update trailing submatrix. */
+
+ i__3 = *m - j - jb + 1;
+ i__4 = *n - j - jb + 1;
+ dgemm_("No transpose", "No transpose", &i__3, &i__4, &jb,
+ &c_b151, &a[j + jb + j * a_dim1], lda, &a[j + (j
+ + jb) * a_dim1], lda, &c_b15, &a[j + jb + (j + jb)
+ * a_dim1], lda);
+ }
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of DGETRF */
+
+} /* dgetrf_ */
+
+/* Subroutine */ int dgetrs_(char *trans, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, integer *ipiv, doublereal *b, integer *
+ ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), xerbla_(
+ char *, integer *), dlaswp_(integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DGETRS solves a system of linear equations
+ A * X = B or A' * X = B
+ with a general N-by-N matrix A using the LU factorization computed
+ by DGETRF.
+
+ Arguments
+ =========
+
+ TRANS (input) CHARACTER*1
+ Specifies the form of the system of equations:
+ = 'N': A * X = B (No transpose)
+ = 'T': A'* X = B (Transpose)
+ = 'C': A'* X = B (Conjugate transpose = Transpose)
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The factors L and U from the factorization A = P*L*U
+ as computed by DGETRF.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ IPIV (input) INTEGER array, dimension (N)
+ The pivot indices from DGETRF; for 1<=i<=N, row i of the
+ matrix was interchanged with row IPIV(i).
+
+ B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
+ On entry, the right hand side matrix B.
+ On exit, the solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (((! notran && ! lsame_(trans, "T")) && ! lsame_(
+ trans, "C"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (notran) {
+
+/*
+ Solve A * X = B.
+
+ Apply row interchanges to the right hand sides.
+*/
+
+ dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
+
+/* Solve L*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b15, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b15, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/*
+ Solve A' * X = B.
+
+ Solve U'*X = B, overwriting B with X.
+*/
+
+ dtrsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b15, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Lower", "Transpose", "Unit", n, nrhs, &c_b15, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Apply row interchanges to the solution vectors. */
+
+ dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
+ }
+
+ return 0;
+
+/* End of DGETRS */
+
+} /* dgetrs_ */
+
+/* Subroutine */ int dhseqr_(char *job, char *compz, integer *n, integer *ilo,
+ integer *ihi, doublereal *h__, integer *ldh, doublereal *wr,
+ doublereal *wi, doublereal *z__, integer *ldz, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ doublereal d__1, d__2;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__, j, k, l;
+ static doublereal s[225] /* was [15][15] */, v[16];
+ static integer i1, i2, ii, nh, nr, ns, nv;
+ static doublereal vv[16];
+ static integer itn;
+ static doublereal tau;
+ static integer its;
+ static doublereal ulp, tst1;
+ static integer maxb;
+ static doublereal absw;
+ static integer ierr;
+ static doublereal unfl, temp, ovfl;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ static integer itemp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static logical initz, wantt, wantz;
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+
+ extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern doublereal dlanhs_(char *, integer *, doublereal *, integer *,
+ doublereal *);
+ extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlacpy_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlaset_(char *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlarfx_(
+ char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *);
+ static doublereal smlnum;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DHSEQR computes the eigenvalues of a real upper Hessenberg matrix H
+ and, optionally, the matrices T and Z from the Schur decomposition
+ H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur
+ form), and Z is the orthogonal matrix of Schur vectors.
+
+ Optionally Z may be postmultiplied into an input orthogonal matrix Q,
+ so that this routine can give the Schur factorization of a matrix A
+ which has been reduced to the Hessenberg form H by the orthogonal
+ matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ = 'E': compute eigenvalues only;
+ = 'S': compute eigenvalues and the Schur form T.
+
+ COMPZ (input) CHARACTER*1
+ = 'N': no Schur vectors are computed;
+ = 'I': Z is initialized to the unit matrix and the matrix Z
+ of Schur vectors of H is returned;
+ = 'V': Z must contain an orthogonal matrix Q on entry, and
+ the product Q*Z is returned.
+
+ N (input) INTEGER
+ The order of the matrix H. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that H is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to DGEBAL, and then passed to SGEHRD
+ when the matrix output by DGEBAL is reduced to Hessenberg
+ form. Otherwise ILO and IHI should be set to 1 and N
+ respectively.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ H (input/output) DOUBLE PRECISION array, dimension (LDH,N)
+ On entry, the upper Hessenberg matrix H.
+ On exit, if JOB = 'S', H contains the upper quasi-triangular
+ matrix T from the Schur decomposition (the Schur form);
+ 2-by-2 diagonal blocks (corresponding to complex conjugate
+ pairs of eigenvalues) are returned in standard form, with
+ H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. If JOB = 'E',
+ the contents of H are unspecified on exit.
+
+ LDH (input) INTEGER
+ The leading dimension of the array H. LDH >= max(1,N).
+
+ WR (output) DOUBLE PRECISION array, dimension (N)
+ WI (output) DOUBLE PRECISION array, dimension (N)
+ The real and imaginary parts, respectively, of the computed
+ eigenvalues. If two eigenvalues are computed as a complex
+ conjugate pair, they are stored in consecutive elements of
+ WR and WI, say the i-th and (i+1)th, with WI(i) > 0 and
+ WI(i+1) < 0. If JOB = 'S', the eigenvalues are stored in the
+ same order as on the diagonal of the Schur form returned in
+ H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2
+ diagonal block, WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and
+ WI(i+1) = -WI(i).
+
+ Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N)
+ If COMPZ = 'N': Z is not referenced.
+ If COMPZ = 'I': on entry, Z need not be set, and on exit, Z
+ contains the orthogonal matrix Z of the Schur vectors of H.
+ If COMPZ = 'V': on entry Z must contain an N-by-N matrix Q,
+ which is assumed to be equal to the unit matrix except for
+ the submatrix Z(ILO:IHI,ILO:IHI); on exit Z contains Q*Z.
+ Normally Q is the orthogonal matrix generated by DORGHR after
+ the call to DGEHRD which formed the Hessenberg matrix H.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z.
+ LDZ >= max(1,N) if COMPZ = 'I' or 'V'; LDZ >= 1 otherwise.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, DHSEQR failed to compute all of the
+ eigenvalues in a total of 30*(IHI-ILO+1) iterations;
+ elements 1:ilo-1 and i+1:n of WR and WI contain those
+ eigenvalues which have been successfully computed.
+
+ =====================================================================
+
+
+ Decode and test the input parameters
+*/
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1 * 1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantt = lsame_(job, "S");
+ initz = lsame_(compz, "I");
+ wantz = initz || lsame_(compz, "V");
+
+ *info = 0;
+ work[1] = (doublereal) max(1,*n);
+ lquery = *lwork == -1;
+ if ((! lsame_(job, "E") && ! wantt)) {
+ *info = -1;
+ } else if ((! lsame_(compz, "N") && ! wantz)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if (*ldz < 1 || (wantz && *ldz < max(1,*n))) {
+ *info = -11;
+ } else if ((*lwork < max(1,*n) && ! lquery)) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DHSEQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Initialize Z, if necessary */
+
+ if (initz) {
+ dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz);
+ }
+
+/* Store the eigenvalues isolated by DGEBAL. */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.;
+/* L10: */
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.;
+/* L20: */
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ wr[*ilo] = h__[*ilo + *ilo * h_dim1];
+ wi[*ilo] = 0.;
+ return 0;
+ }
+
+/*
+ Set rows and columns ILO to IHI to zero below the first
+ subdiagonal.
+*/
+
+ i__1 = *ihi - 2;
+ for (j = *ilo; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j + 2; i__ <= i__2; ++i__) {
+ h__[i__ + j * h_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ nh = *ihi - *ilo + 1;
+
+/*
+ Determine the order of the multi-shift QR algorithm to be used.
+
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = job;
+ i__3[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ ns = ilaenv_(&c__4, "DHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = job;
+ i__3[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ maxb = ilaenv_(&c__8, "DHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+ if (ns <= 2 || ns > nh || maxb >= nh) {
+
+/* Use the standard double-shift algorithm */
+
+ dlahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[
+ 1], ilo, ihi, &z__[z_offset], ldz, info);
+ return 0;
+ }
+ maxb = max(3,maxb);
+/* Computing MIN */
+ i__1 = min(ns,maxb);
+ ns = min(i__1,15);
+
+/*
+ Now 2 < NS <= MAXB < NH.
+
+ Set machine-dependent constants for the stopping criterion.
+ If norm(H) <= sqrt(OVFL), overflow should not occur.
+*/
+
+ unfl = SAFEMINIMUM;
+ ovfl = 1. / unfl;
+ dlabad_(&unfl, &ovfl);
+ ulp = PRECISION;
+ smlnum = unfl * (nh / ulp);
+
+/*
+ I1 and I2 are the indices of the first row and last column of H
+ to which transformations must be applied. If eigenvalues only are
+ being computed, I1 and I2 are set inside the main loop.
+*/
+
+ if (wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* ITN is the total number of multiple-shift QR iterations allowed. */
+
+ itn = nh * 30;
+
+/*
+ The main loop begins here. I is the loop index and decreases from
+ IHI to ILO in steps of at most MAXB. Each iteration of the loop
+ works with the active submatrix in rows and columns L to I.
+ Eigenvalues I+1 to IHI have already converged. Either L = ILO or
+ H(L,L-1) is negligible so that the matrix splits.
+*/
+
+ i__ = *ihi;
+L50:
+ l = *ilo;
+ if (i__ < *ilo) {
+ goto L170;
+ }
+
+/*
+ Perform multiple-shift QR iterations on rows and columns ILO to I
+ until a submatrix of order at most MAXB splits off at the bottom
+ because a subdiagonal element has become negligible.
+*/
+
+ i__1 = itn;
+ for (its = 0; its <= i__1; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__2 = l + 1;
+ for (k = i__; k >= i__2; --k) {
+ tst1 = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 =
+ h__[k + k * h_dim1], abs(d__2));
+ if (tst1 == 0.) {
+ i__4 = i__ - l + 1;
+ tst1 = dlanhs_("1", &i__4, &h__[l + l * h_dim1], ldh, &work[1]
+ );
+ }
+/* Computing MAX */
+ d__2 = ulp * tst1;
+ if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= max(d__2,
+ smlnum)) {
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible. */
+
+ h__[l + (l - 1) * h_dim1] = 0.;
+ }
+
+/* Exit from loop if a submatrix of order <= MAXB has split off. */
+
+ if (l >= i__ - maxb + 1) {
+ goto L160;
+ }
+
+/*
+ Now the active submatrix is in rows and columns L to I. If
+ eigenvalues only are being computed, only the active submatrix
+ need be transformed.
+*/
+
+ if (! wantt) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if (its == 20 || its == 30) {
+
+/* Exceptional shifts. */
+
+ i__2 = i__;
+ for (ii = i__ - ns + 1; ii <= i__2; ++ii) {
+ wr[ii] = ((d__1 = h__[ii + (ii - 1) * h_dim1], abs(d__1)) + (
+ d__2 = h__[ii + ii * h_dim1], abs(d__2))) * 1.5;
+ wi[ii] = 0.;
+/* L80: */
+ }
+ } else {
+
+/* Use eigenvalues of trailing submatrix of order NS as shifts. */
+
+ dlacpy_("Full", &ns, &ns, &h__[i__ - ns + 1 + (i__ - ns + 1) *
+ h_dim1], ldh, s, &c__15);
+ dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &wr[i__ -
+ ns + 1], &wi[i__ - ns + 1], &c__1, &ns, &z__[z_offset],
+ ldz, &ierr);
+ if (ierr > 0) {
+
+/*
+ If DLAHQR failed to compute all NS eigenvalues, use the
+ unconverged diagonal elements as the remaining shifts.
+*/
+
+ i__2 = ierr;
+ for (ii = 1; ii <= i__2; ++ii) {
+ wr[i__ - ns + ii] = s[ii + ii * 15 - 16];
+ wi[i__ - ns + ii] = 0.;
+/* L90: */
+ }
+ }
+ }
+
+/*
+ Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns))
+ where G is the Hessenberg submatrix H(L:I,L:I) and w is
+ the vector of shifts (stored in WR and WI). The result is
+ stored in the local array V.
+*/
+
+ v[0] = 1.;
+ i__2 = ns + 1;
+ for (ii = 2; ii <= i__2; ++ii) {
+ v[ii - 1] = 0.;
+/* L100: */
+ }
+ nv = 1;
+ i__2 = i__;
+ for (j = i__ - ns + 1; j <= i__2; ++j) {
+ if (wi[j] >= 0.) {
+ if (wi[j] == 0.) {
+
+/* real shift */
+
+ i__4 = nv + 1;
+ dcopy_(&i__4, v, &c__1, vv, &c__1);
+ i__4 = nv + 1;
+ d__1 = -wr[j];
+ dgemv_("No transpose", &i__4, &nv, &c_b15, &h__[l + l *
+ h_dim1], ldh, vv, &c__1, &d__1, v, &c__1);
+ ++nv;
+ } else if (wi[j] > 0.) {
+
+/* complex conjugate pair of shifts */
+
+ i__4 = nv + 1;
+ dcopy_(&i__4, v, &c__1, vv, &c__1);
+ i__4 = nv + 1;
+ d__1 = wr[j] * -2.;
+ dgemv_("No transpose", &i__4, &nv, &c_b15, &h__[l + l *
+ h_dim1], ldh, v, &c__1, &d__1, vv, &c__1);
+ i__4 = nv + 1;
+ itemp = idamax_(&i__4, vv, &c__1);
+/* Computing MAX */
+ d__2 = (d__1 = vv[itemp - 1], abs(d__1));
+ temp = 1. / max(d__2,smlnum);
+ i__4 = nv + 1;
+ dscal_(&i__4, &temp, vv, &c__1);
+ absw = dlapy2_(&wr[j], &wi[j]);
+ temp = temp * absw * absw;
+ i__4 = nv + 2;
+ i__5 = nv + 1;
+ dgemv_("No transpose", &i__4, &i__5, &c_b15, &h__[l + l *
+ h_dim1], ldh, vv, &c__1, &temp, v, &c__1);
+ nv += 2;
+ }
+
+/*
+ Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero,
+ reset it to the unit vector.
+*/
+
+ itemp = idamax_(&nv, v, &c__1);
+ temp = (d__1 = v[itemp - 1], abs(d__1));
+ if (temp == 0.) {
+ v[0] = 1.;
+ i__4 = nv;
+ for (ii = 2; ii <= i__4; ++ii) {
+ v[ii - 1] = 0.;
+/* L110: */
+ }
+ } else {
+ temp = max(temp,smlnum);
+ d__1 = 1. / temp;
+ dscal_(&nv, &d__1, v, &c__1);
+ }
+ }
+/* L120: */
+ }
+
+/* Multiple-shift QR step */
+
+ i__2 = i__ - 1;
+ for (k = l; k <= i__2; ++k) {
+
+/*
+ The first iteration of this loop determines a reflection G
+ from the vector V and applies it from left and right to H,
+ thus creating a nonzero bulge below the subdiagonal.
+
+ Each subsequent iteration determines a reflection G to
+ restore the Hessenberg form in the (K-1)th column, and thus
+ chases the bulge one step toward the bottom of the active
+ submatrix. NR is the order of G.
+
+ Computing MIN
+*/
+ i__4 = ns + 1, i__5 = i__ - k + 1;
+ nr = min(i__4,i__5);
+ if (k > l) {
+ dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ dlarfg_(&nr, v, &v[1], &c__1, &tau);
+ if (k > l) {
+ h__[k + (k - 1) * h_dim1] = v[0];
+ i__4 = i__;
+ for (ii = k + 1; ii <= i__4; ++ii) {
+ h__[ii + (k - 1) * h_dim1] = 0.;
+/* L130: */
+ }
+ }
+ v[0] = 1.;
+
+/*
+ Apply G from the left to transform the rows of the matrix in
+ columns K to I2.
+*/
+
+ i__4 = i2 - k + 1;
+ dlarfx_("Left", &nr, &i__4, v, &tau, &h__[k + k * h_dim1], ldh, &
+ work[1]);
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+NR,I).
+
+ Computing MIN
+*/
+ i__5 = k + nr;
+ i__4 = min(i__5,i__) - i1 + 1;
+ dlarfx_("Right", &i__4, &nr, v, &tau, &h__[i1 + k * h_dim1], ldh,
+ &work[1]);
+
+ if (wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ dlarfx_("Right", &nh, &nr, v, &tau, &z__[*ilo + k * z_dim1],
+ ldz, &work[1]);
+ }
+/* L140: */
+ }
+
+/* L150: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L160:
+
+/*
+ A submatrix of order <= MAXB in rows and columns L to I has split
+ off. Use the double-shift QR algorithm to handle it.
+*/
+
+ dlahqr_(&wantt, &wantz, n, &l, &i__, &h__[h_offset], ldh, &wr[1], &wi[1],
+ ilo, ihi, &z__[z_offset], ldz, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/*
+ Decrement number of remaining iterations, and return to start of
+ the main loop with a new value of I.
+*/
+
+ itn -= its;
+ i__ = l - 1;
+ goto L50;
+
+L170:
+ work[1] = (doublereal) max(1,*n);
+ return 0;
+
+/* End of DHSEQR */
+
+} /* dhseqr_ */
+
+/* Subroutine */ int dlabad_(doublereal *small, doublereal *large)
+{
+ /* Builtin functions */
+ double d_lg10(doublereal *), sqrt(doublereal);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLABAD takes as input the values computed by DLAMCH for underflow and
+ overflow, and returns the square root of each of these values if the
+ log of LARGE is sufficiently large. This subroutine is intended to
+ identify machines with a large exponent range, such as the Crays, and
+ redefine the underflow and overflow limits to be the square roots of
+ the values computed by DLAMCH. This subroutine is needed because
+ DLAMCH does not compensate for poor arithmetic in the upper half of
+ the exponent range, as is found on a Cray.
+
+ Arguments
+ =========
+
+ SMALL (input/output) DOUBLE PRECISION
+ On entry, the underflow threshold as computed by DLAMCH.
+ On exit, if LOG10(LARGE) is sufficiently large, the square
+ root of SMALL, otherwise unchanged.
+
+ LARGE (input/output) DOUBLE PRECISION
+ On entry, the overflow threshold as computed by DLAMCH.
+ On exit, if LOG10(LARGE) is sufficiently large, the square
+ root of LARGE, otherwise unchanged.
+
+ =====================================================================
+
+
+ If it looks like we're on a Cray, take the square root of
+ SMALL and LARGE to avoid overflow and underflow problems.
+*/
+
+ if (d_lg10(large) > 2e3) {
+ *small = sqrt(*small);
+ *large = sqrt(*large);
+ }
+
+ return 0;
+
+/* End of DLABAD */
+
+} /* dlabad_ */
+
+/* Subroutine */ int dlabrd_(integer *m, integer *n, integer *nb, doublereal *
+ a, integer *lda, doublereal *d__, doublereal *e, doublereal *tauq,
+ doublereal *taup, doublereal *x, integer *ldx, doublereal *y, integer
+ *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ static integer i__;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemv_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLABRD reduces the first NB rows and columns of a real general
+ m by n matrix A to upper or lower bidiagonal form by an orthogonal
+ transformation Q' * A * P, and returns the matrices X and Y which
+ are needed to apply the transformation to the unreduced part of A.
+
+ If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower
+ bidiagonal form.
+
+ This is an auxiliary routine called by DGEBRD
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A.
+
+ N (input) INTEGER
+ The number of columns in the matrix A.
+
+ NB (input) INTEGER
+ The number of leading rows and columns of A to be reduced.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the m by n general matrix to be reduced.
+ On exit, the first NB rows and columns of the matrix are
+ overwritten; the rest of the array is unchanged.
+ If m >= n, elements on and below the diagonal in the first NB
+ columns, with the array TAUQ, represent the orthogonal
+ matrix Q as a product of elementary reflectors; and
+ elements above the diagonal in the first NB rows, with the
+ array TAUP, represent the orthogonal matrix P as a product
+ of elementary reflectors.
+ If m < n, elements below the diagonal in the first NB
+ columns, with the array TAUQ, represent the orthogonal
+ matrix Q as a product of elementary reflectors, and
+ elements on and above the diagonal in the first NB rows,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) DOUBLE PRECISION array, dimension (NB)
+ The diagonal elements of the first NB rows and columns of
+ the reduced matrix. D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (NB)
+ The off-diagonal elements of the first NB rows and columns of
+ the reduced matrix.
+
+ TAUQ (output) DOUBLE PRECISION array dimension (NB)
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix Q. See Further Details.
+
+ TAUP (output) DOUBLE PRECISION array, dimension (NB)
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix P. See Further Details.
+
+ X (output) DOUBLE PRECISION array, dimension (LDX,NB)
+ The m-by-nb matrix X required to update the unreduced part
+ of A.
+
+ LDX (input) INTEGER
+ The leading dimension of the array X. LDX >= M.
+
+ Y (output) DOUBLE PRECISION array, dimension (LDY,NB)
+ The n-by-nb matrix Y required to update the unreduced part
+ of A.
+
+ LDY (output) INTEGER
+ The leading dimension of the array Y. LDY >= N.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors.
+
+ If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in
+ A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in
+ A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The elements of the vectors v and u together form the m-by-nb matrix
+ V and the nb-by-n matrix U' which are needed, with X and Y, to apply
+ the transformation to the unreduced part of the matrix, using a block
+ update of the form: A := A - V*Y' - X*U'.
+
+ The contents of A on exit are illustrated by the following examples
+ with nb = 2:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 )
+ ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 )
+ ( v1 v2 a a a ) ( v1 1 a a a a )
+ ( v1 v2 a a a ) ( v1 v2 a a a a )
+ ( v1 v2 a a a ) ( v1 v2 a a a a )
+ ( v1 v2 a a a )
+
+ where a denotes an element of the original matrix which is unchanged,
+ vi denotes an element of the vector defining H(i), and ui an element
+ of the vector defining G(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1 * 1;
+ x -= x_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1 * 1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:m,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + a_dim1],
+ lda, &y[i__ + y_dim1], ldy, &c_b15, &a[i__ + i__ * a_dim1]
+ , &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + x_dim1],
+ ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b15, &a[i__ + i__ *
+ a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ *
+ a_dim1], &c__1, &tauq[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + (i__ + 1) *
+ a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b29,
+ &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + a_dim1],
+ lda, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &x[i__ + x_dim1],
+ ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b15,
+ &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+
+/* Update A(i,i+1:n) */
+
+ i__2 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__, &c_b151, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b15, &a[i__ +
+ (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b15, &a[
+ i__ + (i__ + 1) * a_dim1], lda);
+
+/* Generate reflection P(i) to annihilate A(i,i+2:n) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
+ i__3,*n) * a_dim1], lda, &taup[i__]);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+ a[i__ + (i__ + 1) * a_dim1] = 1.;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + (
+ i__ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &c_b29, &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__, &c_b15, &y[i__ + 1 + y_dim1],
+ ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &c_b29, &x[
+ i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ dgemv_("No transpose", &i__2, &i__, &c_b151, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b29, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i,i:n) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + y_dim1],
+ ldy, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + i__ * a_dim1]
+ , lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[i__ * a_dim1 + 1],
+ lda, &x[i__ + x_dim1], ldx, &c_b15, &a[i__ + i__ * a_dim1]
+ , lda);
+
+/* Generate reflection P(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) *
+ a_dim1], lda, &taup[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ if (i__ < *m) {
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + i__
+ * a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b29, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &y[i__ + y_dim1],
+ ldy, &a[i__ + i__ * a_dim1], lda, &c_b29, &x[i__ *
+ x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ * a_dim1
+ + 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b29, &x[
+ i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+
+/* Update A(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 +
+ a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b15, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = *m - i__;
+ dgemv_("No transpose", &i__2, &i__, &c_b151, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b15, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+2:m,i) */
+
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) +
+ i__ * a_dim1], &c__1, &tauq[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + (i__ +
+ 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1,
+ &c_b29, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + a_dim1]
+ , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[
+ i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ dgemv_("Transpose", &i__2, &i__, &c_b15, &x[i__ + 1 + x_dim1],
+ ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[
+ i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ dgemv_("Transpose", &i__, &i__2, &c_b151, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b15,
+ &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of DLABRD */
+
+} /* dlabrd_ */
+
+/* Subroutine */ int dlacpy_(char *uplo, integer *m, integer *n, doublereal *
+ a, integer *lda, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLACPY copies all or part of a two-dimensional matrix A to another
+ matrix B.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be copied to B.
+ = 'U': Upper triangular part
+ = 'L': Lower triangular part
+ Otherwise: All of the matrix A
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The m by n matrix A. If UPLO = 'U', only the upper triangle
+ or trapezoid is accessed; if UPLO = 'L', only the lower
+ triangle or trapezoid is accessed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (output) DOUBLE PRECISION array, dimension (LDB,N)
+ On exit, B = A in the locations specified by UPLO.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ return 0;
+
+/* End of DLACPY */
+
+} /* dlacpy_ */
+
+/* Subroutine */ int dladiv_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *d__, doublereal *p, doublereal *q)
+{
+ static doublereal e, f;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLADIV performs complex division in real arithmetic
+
+ a + i*b
+ p + i*q = ---------
+ c + i*d
+
+ The algorithm is due to Robert L. Smith and can be found
+ in D. Knuth, The art of Computer Programming, Vol.2, p.195
+
+ Arguments
+ =========
+
+ A (input) DOUBLE PRECISION
+ B (input) DOUBLE PRECISION
+ C (input) DOUBLE PRECISION
+ D (input) DOUBLE PRECISION
+ The scalars a, b, c, and d in the above expression.
+
+ P (output) DOUBLE PRECISION
+ Q (output) DOUBLE PRECISION
+ The scalars p and q in the above expression.
+
+ =====================================================================
+*/
+
+
+ if (abs(*d__) < abs(*c__)) {
+ e = *d__ / *c__;
+ f = *c__ + *d__ * e;
+ *p = (*a + *b * e) / f;
+ *q = (*b - *a * e) / f;
+ } else {
+ e = *c__ / *d__;
+ f = *d__ + *c__ * e;
+ *p = (*b + *a * e) / f;
+ *q = (-(*a) + *b * e) / f;
+ }
+
+ return 0;
+
+/* End of DLADIV */
+
+} /* dladiv_ */
+
+/* Subroutine */ int dlae2_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *rt1, doublereal *rt2)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal ab, df, tb, sm, rt, adf, acmn, acmx;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAE2 computes the eigenvalues of a 2-by-2 symmetric matrix
+ [ A B ]
+ [ B C ].
+ On return, RT1 is the eigenvalue of larger absolute value, and RT2
+ is the eigenvalue of smaller absolute value.
+
+ Arguments
+ =========
+
+ A (input) DOUBLE PRECISION
+ The (1,1) element of the 2-by-2 matrix.
+
+ B (input) DOUBLE PRECISION
+ The (1,2) and (2,1) elements of the 2-by-2 matrix.
+
+ C (input) DOUBLE PRECISION
+ The (2,2) element of the 2-by-2 matrix.
+
+ RT1 (output) DOUBLE PRECISION
+ The eigenvalue of larger absolute value.
+
+ RT2 (output) DOUBLE PRECISION
+ The eigenvalue of smaller absolute value.
+
+ Further Details
+ ===============
+
+ RT1 is accurate to a few ulps barring over/underflow.
+
+ RT2 may be inaccurate if there is massive cancellation in the
+ determinant A*C-B*B; higher precision or correctly rounded or
+ correctly truncated arithmetic would be needed to compute RT2
+ accurately in all cases.
+
+ Overflow is possible only if RT1 is within a factor of 5 of overflow.
+ Underflow is harmless if the input data is 0 or exceeds
+ underflow_threshold / macheps.
+
+ =====================================================================
+
+
+ Compute the eigenvalues
+*/
+
+ sm = *a + *c__;
+ df = *a - *c__;
+ adf = abs(df);
+ tb = *b + *b;
+ ab = abs(tb);
+ if (abs(*a) > abs(*c__)) {
+ acmx = *a;
+ acmn = *c__;
+ } else {
+ acmx = *c__;
+ acmn = *a;
+ }
+ if (adf > ab) {
+/* Computing 2nd power */
+ d__1 = ab / adf;
+ rt = adf * sqrt(d__1 * d__1 + 1.);
+ } else if (adf < ab) {
+/* Computing 2nd power */
+ d__1 = adf / ab;
+ rt = ab * sqrt(d__1 * d__1 + 1.);
+ } else {
+
+/* Includes case AB=ADF=0 */
+
+ rt = ab * sqrt(2.);
+ }
+ if (sm < 0.) {
+ *rt1 = (sm - rt) * .5;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else if (sm > 0.) {
+ *rt1 = (sm + rt) * .5;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else {
+
+/* Includes case RT1 = RT2 = 0 */
+
+ *rt1 = rt * .5;
+ *rt2 = rt * -.5;
+ }
+ return 0;
+
+/* End of DLAE2 */
+
+} /* dlae2_ */
+
+/* Subroutine */ int dlaed0_(integer *icompq, integer *qsiz, integer *n,
+ doublereal *d__, doublereal *e, doublereal *q, integer *ldq,
+ doublereal *qstore, integer *ldqs, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, k, iq, lgn, msd2, smm1, spm1, spm2;
+ static doublereal temp;
+ static integer curr;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer iperm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer indxq, iwrem;
+ extern /* Subroutine */ int dlaed1_(integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *);
+ static integer iqptr;
+ extern /* Subroutine */ int dlaed7_(integer *, integer *, integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, doublereal *, integer *, integer *);
+ static integer tlvls;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ static integer igivcl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer igivnm, submat, curprb, subpbs, igivpt;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ static integer curlvl, matsiz, iprmpt, smlsiz;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAED0 computes all eigenvalues and corresponding eigenvectors of a
+ symmetric tridiagonal matrix using the divide and conquer method.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ = 0: Compute eigenvalues only.
+ = 1: Compute eigenvectors of original dense symmetric matrix
+ also. On entry, Q contains the orthogonal matrix used
+ to reduce the original matrix to tridiagonal form.
+ = 2: Compute eigenvalues and eigenvectors of tridiagonal
+ matrix.
+
+ QSIZ (input) INTEGER
+ The dimension of the orthogonal matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the main diagonal of the tridiagonal matrix.
+ On exit, its eigenvalues.
+
+ E (input) DOUBLE PRECISION array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix.
+ On exit, E has been destroyed.
+
+ Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N)
+ On entry, Q must contain an N-by-N orthogonal matrix.
+ If ICOMPQ = 0 Q is not referenced.
+ If ICOMPQ = 1 On entry, Q is a subset of the columns of the
+ orthogonal matrix used to reduce the full
+ matrix to tridiagonal form corresponding to
+ the subset of the full matrix which is being
+ decomposed at this time.
+ If ICOMPQ = 2 On entry, Q will be the identity matrix.
+ On exit, Q contains the eigenvectors of the
+ tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. If eigenvectors are
+ desired, then LDQ >= max(1,N). In any case, LDQ >= 1.
+
+ QSTORE (workspace) DOUBLE PRECISION array, dimension (LDQS, N)
+ Referenced only when ICOMPQ = 1. Used to store parts of
+ the eigenvector matrix when the updating matrix multiplies
+ take place.
+
+ LDQS (input) INTEGER
+ The leading dimension of the array QSTORE. If ICOMPQ = 1,
+ then LDQS >= max(1,N). In any case, LDQS >= 1.
+
+ WORK (workspace) DOUBLE PRECISION array,
+ If ICOMPQ = 0 or 1, the dimension of WORK must be at least
+ 1 + 3*N + 2*N*lg N + 2*N**2
+ ( lg( N ) = smallest integer k
+ such that 2^k >= N )
+ If ICOMPQ = 2, the dimension of WORK must be at least
+ 4*N + N**2.
+
+ IWORK (workspace) INTEGER array,
+ If ICOMPQ = 0 or 1, the dimension of IWORK must be at least
+ 6 + 6*N + 5*N*lg N.
+ ( lg( N ) = smallest integer k
+ such that 2^k >= N )
+ If ICOMPQ = 2, the dimension of IWORK must be at least
+ 3 + 5*N.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an eigenvalue while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through mod(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ qstore_dim1 = *ldqs;
+ qstore_offset = 1 + qstore_dim1 * 1;
+ qstore -= qstore_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 2) {
+ *info = -1;
+ } else if ((*icompq == 1 && *qsiz < max(0,*n))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -7;
+ } else if (*ldqs < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED0", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "DLAED0", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ Determine the size and placement of the submatrices, and save in
+ the leading elements of IWORK.
+*/
+
+ iwork[1] = *n;
+ subpbs = 1;
+ tlvls = 0;
+L10:
+ if (iwork[subpbs] > smlsiz) {
+ for (j = subpbs; j >= 1; --j) {
+ iwork[j * 2] = (iwork[j] + 1) / 2;
+ iwork[((j) << (1)) - 1] = iwork[j] / 2;
+/* L20: */
+ }
+ ++tlvls;
+ subpbs <<= 1;
+ goto L10;
+ }
+ i__1 = subpbs;
+ for (j = 2; j <= i__1; ++j) {
+ iwork[j] += iwork[j - 1];
+/* L30: */
+ }
+
+/*
+ Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1
+ using rank-1 modifications (cuts).
+*/
+
+ spm1 = subpbs - 1;
+ i__1 = spm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ submat = iwork[i__] + 1;
+ smm1 = submat - 1;
+ d__[smm1] -= (d__1 = e[smm1], abs(d__1));
+ d__[submat] -= (d__1 = e[smm1], abs(d__1));
+/* L40: */
+ }
+
+ indxq = ((*n) << (2)) + 3;
+ if (*icompq != 2) {
+
+/*
+ Set up workspaces for eigenvalues only/accumulate new vectors
+ routine
+*/
+
+ temp = log((doublereal) (*n)) / log(2.);
+ lgn = (integer) temp;
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ iprmpt = indxq + *n + 1;
+ iperm = iprmpt + *n * lgn;
+ iqptr = iperm + *n * lgn;
+ igivpt = iqptr + *n + 2;
+ igivcl = igivpt + *n * lgn;
+
+ igivnm = 1;
+ iq = igivnm + ((*n) << (1)) * lgn;
+/* Computing 2nd power */
+ i__1 = *n;
+ iwrem = iq + i__1 * i__1 + 1;
+
+/* Initialize pointers */
+
+ i__1 = subpbs;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ iwork[iprmpt + i__] = 1;
+ iwork[igivpt + i__] = 1;
+/* L50: */
+ }
+ iwork[iqptr] = 1;
+ }
+
+/*
+ Solve each submatrix eigenproblem at the bottom of the divide and
+ conquer tree.
+*/
+
+ curr = 0;
+ i__1 = spm1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[1];
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 1] - iwork[i__];
+ }
+ if (*icompq == 2) {
+ dsteqr_("I", &matsiz, &d__[submat], &e[submat], &q[submat +
+ submat * q_dim1], ldq, &work[1], info);
+ if (*info != 0) {
+ goto L130;
+ }
+ } else {
+ dsteqr_("I", &matsiz, &d__[submat], &e[submat], &work[iq - 1 +
+ iwork[iqptr + curr]], &matsiz, &work[1], info);
+ if (*info != 0) {
+ goto L130;
+ }
+ if (*icompq == 1) {
+ dgemm_("N", "N", qsiz, &matsiz, &matsiz, &c_b15, &q[submat *
+ q_dim1 + 1], ldq, &work[iq - 1 + iwork[iqptr + curr]],
+ &matsiz, &c_b29, &qstore[submat * qstore_dim1 + 1],
+ ldqs);
+ }
+/* Computing 2nd power */
+ i__2 = matsiz;
+ iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2;
+ ++curr;
+ }
+ k = 1;
+ i__2 = iwork[i__ + 1];
+ for (j = submat; j <= i__2; ++j) {
+ iwork[indxq + j] = k;
+ ++k;
+/* L60: */
+ }
+/* L70: */
+ }
+
+/*
+ Successively merge eigensystems of adjacent submatrices
+ into eigensystem for the corresponding larger matrix.
+
+ while ( SUBPBS > 1 )
+*/
+
+ curlvl = 1;
+L80:
+ if (subpbs > 1) {
+ spm2 = subpbs - 2;
+ i__1 = spm2;
+ for (i__ = 0; i__ <= i__1; i__ += 2) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[2];
+ msd2 = iwork[1];
+ curprb = 0;
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 2] - iwork[i__];
+ msd2 = matsiz / 2;
+ ++curprb;
+ }
+
+/*
+ Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2)
+ into an eigensystem of size MATSIZ.
+ DLAED1 is used only for the full eigensystem of a tridiagonal
+ matrix.
+ DLAED7 handles the cases in which eigenvalues only or eigenvalues
+ and eigenvectors of a full symmetric matrix (which was reduced to
+ tridiagonal form) are desired.
+*/
+
+ if (*icompq == 2) {
+ dlaed1_(&matsiz, &d__[submat], &q[submat + submat * q_dim1],
+ ldq, &iwork[indxq + submat], &e[submat + msd2 - 1], &
+ msd2, &work[1], &iwork[subpbs + 1], info);
+ } else {
+ dlaed7_(icompq, &matsiz, qsiz, &tlvls, &curlvl, &curprb, &d__[
+ submat], &qstore[submat * qstore_dim1 + 1], ldqs, &
+ iwork[indxq + submat], &e[submat + msd2 - 1], &msd2, &
+ work[iq], &iwork[iqptr], &iwork[iprmpt], &iwork[iperm]
+ , &iwork[igivpt], &iwork[igivcl], &work[igivnm], &
+ work[iwrem], &iwork[subpbs + 1], info);
+ }
+ if (*info != 0) {
+ goto L130;
+ }
+ iwork[i__ / 2 + 1] = iwork[i__ + 2];
+/* L90: */
+ }
+ subpbs /= 2;
+ ++curlvl;
+ goto L80;
+ }
+
+/*
+ end while
+
+ Re-merge the eigenvalues/vectors which were deflated at the final
+ merge step.
+*/
+
+ if (*icompq == 1) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+ dcopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1
+ + 1], &c__1);
+/* L100: */
+ }
+ dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ } else if (*icompq == 2) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+ dcopy_(n, &q[j * q_dim1 + 1], &c__1, &work[*n * i__ + 1], &c__1);
+/* L110: */
+ }
+ dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ dlacpy_("A", n, n, &work[*n + 1], n, &q[q_offset], ldq);
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+/* L120: */
+ }
+ dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ }
+ goto L140;
+
+L130:
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+
+L140:
+ return 0;
+
+/* End of DLAED0 */
+
+} /* dlaed0_ */
+
+/* Subroutine */ int dlaed1_(integer *n, doublereal *d__, doublereal *q,
+ integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, k, n1, n2, is, iw, iz, iq2, zpp1, indx, indxc;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer indxp;
+ extern /* Subroutine */ int dlaed2_(integer *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *, integer *, integer *, integer *), dlaed3_(integer *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, integer *);
+ static integer idlmda;
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+ static integer coltyp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAED1 computes the updated eigensystem of a diagonal
+ matrix after modification by a rank-one symmetric matrix. This
+ routine is used only for the eigenproblem which requires all
+ eigenvalues and eigenvectors of a tridiagonal matrix. DLAED7 handles
+ the case in which eigenvalues only or eigenvalues and eigenvectors
+ of a full symmetric matrix (which was reduced to tridiagonal form)
+ are desired.
+
+ T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out)
+
+ where Z = Q'u, u is a vector of length N with ones in the
+ CUTPNT and CUTPNT + 1 th elements and zeros elsewhere.
+
+ The eigenvectors of the original matrix are stored in Q, and the
+ eigenvalues are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple eigenvalues or if there is a zero in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine DLAED2.
+
+ The second stage consists of calculating the updated
+ eigenvalues. This is done by finding the roots of the secular
+ equation via the routine DLAED4 (as called by DLAED3).
+ This routine also calculates the eigenvectors of the current
+ problem.
+
+ The final stage consists of computing the updated eigenvectors
+ directly using the updated eigenvalues. The eigenvectors for
+ the current problem are multiplied with the eigenvectors from
+ the overall problem.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the eigenvalues of the rank-1-perturbed matrix.
+ On exit, the eigenvalues of the repaired matrix.
+
+ Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N)
+ On entry, the eigenvectors of the rank-1-perturbed matrix.
+ On exit, the eigenvectors of the repaired tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (input/output) INTEGER array, dimension (N)
+ On entry, the permutation which separately sorts the two
+ subproblems in D into ascending order.
+ On exit, the permutation which will reintegrate the
+ subproblems back into sorted order,
+ i.e. D( INDXQ( I = 1, N ) ) will be in ascending order.
+
+ RHO (input) DOUBLE PRECISION
+ The subdiagonal entry used to create the rank-1 modification.
+
+ CUTPNT (input) INTEGER
+ The location of the last eigenvalue in the leading sub-matrix.
+ min(1,N) <= CUTPNT <= N/2.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (4*N + N**2)
+
+ IWORK (workspace) INTEGER array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ --indxq;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ldq < max(1,*n)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MIN */
+ i__1 = 1, i__2 = *n / 2;
+ if (min(i__1,i__2) > *cutpnt || *n / 2 < *cutpnt) {
+ *info = -7;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED1", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/*
+ The following values are integer pointers which indicate
+ the portion of the workspace
+ used by a particular array in DLAED2 and DLAED3.
+*/
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq2 = iw + *n;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+
+/*
+ Form the z-vector which consists of the last row of Q_1 and the
+ first row of Q_2.
+*/
+
+ dcopy_(cutpnt, &q[*cutpnt + q_dim1], ldq, &work[iz], &c__1);
+ zpp1 = *cutpnt + 1;
+ i__1 = *n - *cutpnt;
+ dcopy_(&i__1, &q[zpp1 + zpp1 * q_dim1], ldq, &work[iz + *cutpnt], &c__1);
+
+/* Deflate eigenvalues. */
+
+ dlaed2_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, &indxq[1], rho, &work[
+ iz], &work[idlmda], &work[iw], &work[iq2], &iwork[indx], &iwork[
+ indxc], &iwork[indxp], &iwork[coltyp], info);
+
+ if (*info != 0) {
+ goto L20;
+ }
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ is = (iwork[coltyp] + iwork[coltyp + 1]) * *cutpnt + (iwork[coltyp +
+ 1] + iwork[coltyp + 2]) * (*n - *cutpnt) + iq2;
+ dlaed3_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, rho, &work[idlmda],
+ &work[iq2], &iwork[indxc], &iwork[coltyp], &work[iw], &work[
+ is], info);
+ if (*info != 0) {
+ goto L20;
+ }
+
+/* Prepare the INDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L10: */
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of DLAED1 */
+
+} /* dlaed1_ */
+
+/* Subroutine */ int dlaed2_(integer *k, integer *n, integer *n1, doublereal *
+ d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho,
+ doublereal *z__, doublereal *dlamda, doublereal *w, doublereal *q2,
+ integer *indx, integer *indxc, integer *indxp, integer *coltyp,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal c__;
+ static integer i__, j;
+ static doublereal s, t;
+ static integer k2, n2, ct, nj, pj, js, iq1, iq2, n1p1;
+ static doublereal eps, tau, tol;
+ static integer psm[4], imax, jmax;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer ctot[4];
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dcopy_(integer *, doublereal *, integer *, doublereal
+ *, integer *);
+
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLAED2 merges the two sets of eigenvalues together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ eigenvalues are close together or if there is a tiny entry in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ Arguments
+ =========
+
+ K (output) INTEGER
+ The number of non-deflated eigenvalues, and the order of the
+ related secular equation. 0 <= K <=N.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ N1 (input) INTEGER
+ The location of the last eigenvalue in the leading sub-matrix.
+ min(1,N) <= N1 <= N/2.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, D contains the eigenvalues of the two submatrices to
+ be combined.
+ On exit, D contains the trailing (N-K) updated eigenvalues
+ (those which were deflated) sorted into increasing order.
+
+ Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N)
+ On entry, Q contains the eigenvectors of two submatrices in
+ the two square blocks with corners at (1,1), (N1,N1)
+ and (N1+1, N1+1), (N,N).
+ On exit, Q contains the trailing (N-K) updated eigenvectors
+ (those which were deflated) in its last N-K columns.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (input/output) INTEGER array, dimension (N)
+ The permutation which separately sorts the two sub-problems
+ in D into ascending order. Note that elements in the second
+ half of this permutation must first have N1 added to their
+ values. Destroyed on exit.
+
+ RHO (input/output) DOUBLE PRECISION
+ On entry, the off-diagonal element associated with the rank-1
+ cut which originally split the two submatrices which are now
+ being recombined.
+ On exit, RHO has been modified to the value required by
+ DLAED3.
+
+ Z (input) DOUBLE PRECISION array, dimension (N)
+ On entry, Z contains the updating vector (the last
+ row of the first sub-eigenvector matrix and the first row of
+ the second sub-eigenvector matrix).
+ On exit, the contents of Z have been destroyed by the updating
+ process.
+
+ DLAMDA (output) DOUBLE PRECISION array, dimension (N)
+ A copy of the first K eigenvalues which will be used by
+ DLAED3 to form the secular equation.
+
+ W (output) DOUBLE PRECISION array, dimension (N)
+ The first k values of the final deflation-altered z-vector
+ which will be passed to DLAED3.
+
+ Q2 (output) DOUBLE PRECISION array, dimension (N1**2+(N-N1)**2)
+ A copy of the first K eigenvectors which will be used by
+ DLAED3 in a matrix multiply (DGEMM) to solve for the new
+ eigenvectors.
+
+ INDX (workspace) INTEGER array, dimension (N)
+ The permutation used to sort the contents of DLAMDA into
+ ascending order.
+
+ INDXC (output) INTEGER array, dimension (N)
+ The permutation used to arrange the columns of the deflated
+ Q matrix into three groups: the first group contains non-zero
+ elements only at and above N1, the second contains
+ non-zero elements only below N1, and the third is dense.
+
+ INDXP (workspace) INTEGER array, dimension (N)
+ The permutation used to place deflated values of D at the end
+ of the array. INDXP(1:K) points to the nondeflated D-values
+ and INDXP(K+1:N) points to the deflated eigenvalues.
+
+ COLTYP (workspace/output) INTEGER array, dimension (N)
+ During execution, a label which will indicate which of the
+ following types a column in the Q2 matrix is:
+ 1 : non-zero in the upper half only;
+ 2 : dense;
+ 3 : non-zero in the lower half only;
+ 4 : deflated.
+ On exit, COLTYP(i) is the number of columns of type i,
+ for i=1 to 4 only.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ --indxq;
+ --z__;
+ --dlamda;
+ --w;
+ --q2;
+ --indx;
+ --indxc;
+ --indxp;
+ --coltyp;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MIN */
+ i__1 = 1, i__2 = *n / 2;
+ if (min(i__1,i__2) > *n1 || *n / 2 < *n1) {
+ *info = -3;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n2 = *n - *n1;
+ n1p1 = *n1 + 1;
+
+ if (*rho < 0.) {
+ dscal_(&n2, &c_b151, &z__[n1p1], &c__1);
+ }
+
+/*
+ Normalize z so that norm(z) = 1. Since z is the concatenation of
+ two normalized vectors, norm2(z) = sqrt(2).
+*/
+
+ t = 1. / sqrt(2.);
+ dscal_(n, &t, &z__[1], &c__1);
+
+/* RHO = ABS( norm(z)**2 * RHO ) */
+
+ *rho = (d__1 = *rho * 2., abs(d__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = n1p1; i__ <= i__1; ++i__) {
+ indxq[i__] += *n1;
+/* L10: */
+ }
+
+/* re-integrate the deflated parts from the last pass */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+/* L20: */
+ }
+ dlamrg_(n1, &n2, &dlamda[1], &c__1, &c__1, &indxc[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indx[i__] = indxq[indxc[i__]];
+/* L30: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ imax = idamax_(n, &z__[1], &c__1);
+ jmax = idamax_(n, &d__[1], &c__1);
+ eps = EPSILON;
+/* Computing MAX */
+ d__3 = (d__1 = d__[jmax], abs(d__1)), d__4 = (d__2 = z__[imax], abs(d__2))
+ ;
+ tol = eps * 8. * max(d__3,d__4);
+
+/*
+ If the rank-1 modifier is small enough, no more needs to be done
+ except to reorganize Q so that its columns correspond with the
+ elements in D.
+*/
+
+ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) {
+ *k = 0;
+ iq2 = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = indx[j];
+ dcopy_(n, &q[i__ * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
+ dlamda[j] = d__[i__];
+ iq2 += *n;
+/* L40: */
+ }
+ dlacpy_("A", n, n, &q2[1], n, &q[q_offset], ldq);
+ dcopy_(n, &dlamda[1], &c__1, &d__[1], &c__1);
+ goto L190;
+ }
+
+/*
+ If there are multiple eigenvalues then the problem deflates. Here
+ the number of equal eigenvalues are found. As each equal
+ eigenvalue is found, an elementary reflector is computed to rotate
+ the corresponding eigensubspace so that the corresponding
+ components of Z are zero in this new basis.
+*/
+
+ i__1 = *n1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ coltyp[i__] = 1;
+/* L50: */
+ }
+ i__1 = *n;
+ for (i__ = n1p1; i__ <= i__1; ++i__) {
+ coltyp[i__] = 3;
+/* L60: */
+ }
+
+
+ *k = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ nj = indx[j];
+ if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ coltyp[nj] = 4;
+ indxp[k2] = nj;
+ if (j == *n) {
+ goto L100;
+ }
+ } else {
+ pj = nj;
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ ++j;
+ nj = indx[j];
+ if (j > *n) {
+ goto L100;
+ }
+ if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ coltyp[nj] = 4;
+ indxp[k2] = nj;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[pj];
+ c__ = z__[nj];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = dlapy2_(&c__, &s);
+ t = d__[nj] - d__[pj];
+ c__ /= tau;
+ s = -s / tau;
+ if ((d__1 = t * c__ * s, abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[nj] = tau;
+ z__[pj] = 0.;
+ if (coltyp[nj] != coltyp[pj]) {
+ coltyp[nj] = 2;
+ }
+ coltyp[pj] = 4;
+ drot_(n, &q[pj * q_dim1 + 1], &c__1, &q[nj * q_dim1 + 1], &c__1, &
+ c__, &s);
+/* Computing 2nd power */
+ d__1 = c__;
+/* Computing 2nd power */
+ d__2 = s;
+ t = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2);
+/* Computing 2nd power */
+ d__1 = s;
+/* Computing 2nd power */
+ d__2 = c__;
+ d__[nj] = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2);
+ d__[pj] = t;
+ --k2;
+ i__ = 1;
+L90:
+ if (k2 + i__ <= *n) {
+ if (d__[pj] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = pj;
+ ++i__;
+ goto L90;
+ } else {
+ indxp[k2 + i__ - 1] = pj;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = pj;
+ }
+ pj = nj;
+ } else {
+ ++(*k);
+ dlamda[*k] = d__[pj];
+ w[*k] = z__[pj];
+ indxp[*k] = pj;
+ pj = nj;
+ }
+ }
+ goto L80;
+L100:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ dlamda[*k] = d__[pj];
+ w[*k] = z__[pj];
+ indxp[*k] = pj;
+
+/*
+ Count up the total number of the various types of columns, then
+ form a permutation which positions the four column types into
+ four uniform groups (although one or more of these groups may be
+ empty).
+*/
+
+ for (j = 1; j <= 4; ++j) {
+ ctot[j - 1] = 0;
+/* L110: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ct = coltyp[j];
+ ++ctot[ct - 1];
+/* L120: */
+ }
+
+/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
+
+ psm[0] = 1;
+ psm[1] = ctot[0] + 1;
+ psm[2] = psm[1] + ctot[1];
+ psm[3] = psm[2] + ctot[2];
+ *k = *n - ctot[3];
+
+/*
+ Fill out the INDXC array so that the permutation which it induces
+ will place all type-1 columns first, all type-2 columns next,
+ then all type-3's, and finally all type-4's.
+*/
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ js = indxp[j];
+ ct = coltyp[js];
+ indx[psm[ct - 1]] = js;
+ indxc[psm[ct - 1]] = j;
+ ++psm[ct - 1];
+/* L130: */
+ }
+
+/*
+ Sort the eigenvalues and corresponding eigenvectors into DLAMDA
+ and Q2 respectively. The eigenvalues/vectors which were not
+ deflated go into the first K slots of DLAMDA and Q2 respectively,
+ while those which were deflated go into the last N - K slots.
+*/
+
+ i__ = 1;
+ iq1 = 1;
+ iq2 = (ctot[0] + ctot[1]) * *n1 + 1;
+ i__1 = ctot[0];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq1 += *n1;
+/* L140: */
+ }
+
+ i__1 = ctot[1];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
+ dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq1 += *n1;
+ iq2 += n2;
+/* L150: */
+ }
+
+ i__1 = ctot[2];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq2 += n2;
+/* L160: */
+ }
+
+ iq1 = iq2;
+ i__1 = ctot[3];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(n, &q[js * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
+ iq2 += *n;
+ z__[i__] = d__[js];
+ ++i__;
+/* L170: */
+ }
+
+/*
+ The deflated eigenvalues and their corresponding vectors go back
+ into the last N - K slots of D and Q respectively.
+*/
+
+ dlacpy_("A", n, &ctot[3], &q2[iq1], n, &q[(*k + 1) * q_dim1 + 1], ldq);
+ i__1 = *n - *k;
+ dcopy_(&i__1, &z__[*k + 1], &c__1, &d__[*k + 1], &c__1);
+
+/* Copy CTOT into COLTYP for referencing in DLAED3. */
+
+ for (j = 1; j <= 4; ++j) {
+ coltyp[j] = ctot[j - 1];
+/* L180: */
+ }
+
+L190:
+ return 0;
+
+/* End of DLAED2 */
+
+} /* dlaed2_ */
+
+/* Subroutine */ int dlaed3_(integer *k, integer *n, integer *n1, doublereal *
+ d__, doublereal *q, integer *ldq, doublereal *rho, doublereal *dlamda,
+ doublereal *q2, integer *indx, integer *ctot, doublereal *w,
+ doublereal *s, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer i__, j, n2, n12, ii, n23, iq2;
+ static doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *),
+ dcopy_(integer *, doublereal *, integer *, doublereal *, integer
+ *), dlaed4_(integer *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAED3 finds the roots of the secular equation, as defined by the
+ values in D, W, and RHO, between 1 and K. It makes the
+ appropriate calls to DLAED4 and then updates the eigenvectors by
+ multiplying the matrix of eigenvectors of the pair of eigensystems
+ being combined by the matrix of eigenvectors of the K-by-K system
+ which is solved here.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ K (input) INTEGER
+ The number of terms in the rational function to be solved by
+ DLAED4. K >= 0.
+
+ N (input) INTEGER
+ The number of rows and columns in the Q matrix.
+ N >= K (deflation may result in N>K).
+
+ N1 (input) INTEGER
+ The location of the last eigenvalue in the leading submatrix.
+ min(1,N) <= N1 <= N/2.
+
+ D (output) DOUBLE PRECISION array, dimension (N)
+ D(I) contains the updated eigenvalues for
+ 1 <= I <= K.
+
+ Q (output) DOUBLE PRECISION array, dimension (LDQ,N)
+ Initially the first K columns are used as workspace.
+ On output the columns 1 to K contain
+ the updated eigenvectors.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ RHO (input) DOUBLE PRECISION
+ The value of the parameter in the rank one update equation.
+ RHO >= 0 required.
+
+ DLAMDA (input/output) DOUBLE PRECISION array, dimension (K)
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation. May be changed on output by
+ having lowest order bit set to zero on Cray X-MP, Cray Y-MP,
+ Cray-2, or Cray C-90, as described above.
+
+ Q2 (input) DOUBLE PRECISION array, dimension (LDQ2, N)
+ The first K columns of this matrix contain the non-deflated
+ eigenvectors for the split problem.
+
+ INDX (input) INTEGER array, dimension (N)
+ The permutation used to arrange the columns of the deflated
+ Q matrix into three groups (see DLAED2).
+ The rows of the eigenvectors found by DLAED4 must be likewise
+ permuted before the matrix multiply can take place.
+
+ CTOT (input) INTEGER array, dimension (4)
+ A count of the total number of the various types of columns
+ in Q, as described in INDX. The fourth column type is any
+ column which has been deflated.
+
+ W (input/output) DOUBLE PRECISION array, dimension (K)
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating vector. Destroyed on
+ output.
+
+ S (workspace) DOUBLE PRECISION array, dimension (N1 + 1)*K
+ Will contain the eigenvectors of the repaired matrix which
+ will be multiplied by the previously accumulated eigenvectors
+ to update the system.
+
+ LDS (input) INTEGER
+ The leading dimension of S. LDS >= max(1,K).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ --dlamda;
+ --q2;
+ --indx;
+ --ctot;
+ --w;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*k < 0) {
+ *info = -1;
+ } else if (*n < *k) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 0) {
+ return 0;
+ }
+
+/*
+ Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DLAMDA(I) if it is 1; this makes the subsequent
+ subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DLAMDA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DLAMDA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
+/* L10: */
+ }
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
+ info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ goto L120;
+ }
+/* L20: */
+ }
+
+ if (*k == 1) {
+ goto L110;
+ }
+ if (*k == 2) {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ w[1] = q[j * q_dim1 + 1];
+ w[2] = q[j * q_dim1 + 2];
+ ii = indx[1];
+ q[j * q_dim1 + 1] = w[ii];
+ ii = indx[2];
+ q[j * q_dim1 + 2] = w[ii];
+/* L30: */
+ }
+ goto L110;
+ }
+
+/* Compute updated W. */
+
+ dcopy_(k, &w[1], &c__1, &s[1], &c__1);
+
+/* Initialize W(I) = Q(I,I) */
+
+ i__1 = *ldq + 1;
+ dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L40: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L50: */
+ }
+/* L60: */
+ }
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__1 = sqrt(-w[i__]);
+ w[i__] = d_sign(&d__1, &s[i__]);
+/* L70: */
+ }
+
+/* Compute eigenvectors of the modified rank-1 modification. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ s[i__] = w[i__] / q[i__ + j * q_dim1];
+/* L80: */
+ }
+ temp = dnrm2_(k, &s[1], &c__1);
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ii = indx[i__];
+ q[i__ + j * q_dim1] = s[ii] / temp;
+/* L90: */
+ }
+/* L100: */
+ }
+
+/* Compute the updated eigenvectors. */
+
+L110:
+
+ n2 = *n - *n1;
+ n12 = ctot[1] + ctot[2];
+ n23 = ctot[2] + ctot[3];
+
+ dlacpy_("A", &n23, k, &q[ctot[1] + 1 + q_dim1], ldq, &s[1], &n23);
+ iq2 = *n1 * n12 + 1;
+ if (n23 != 0) {
+ dgemm_("N", "N", &n2, k, &n23, &c_b15, &q2[iq2], &n2, &s[1], &n23, &
+ c_b29, &q[*n1 + 1 + q_dim1], ldq);
+ } else {
+ dlaset_("A", &n2, k, &c_b29, &c_b29, &q[*n1 + 1 + q_dim1], ldq);
+ }
+
+ dlacpy_("A", &n12, k, &q[q_offset], ldq, &s[1], &n12);
+ if (n12 != 0) {
+ dgemm_("N", "N", n1, k, &n12, &c_b15, &q2[1], n1, &s[1], &n12, &c_b29,
+ &q[q_offset], ldq);
+ } else {
+ dlaset_("A", n1, k, &c_b29, &c_b29, &q[q_dim1 + 1], ldq);
+ }
+
+
+L120:
+ return 0;
+
+/* End of DLAED3 */
+
+} /* dlaed3_ */
+
+/* Subroutine */ int dlaed4_(integer *n, integer *i__, doublereal *d__,
+ doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dlam,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal a, b, c__;
+ static integer j;
+ static doublereal w;
+ static integer ii;
+ static doublereal dw, zz[3];
+ static integer ip1;
+ static doublereal del, eta, phi, eps, tau, psi;
+ static integer iim1, iip1;
+ static doublereal dphi, dpsi;
+ static integer iter;
+ static doublereal temp, prew, temp1, dltlb, dltub, midpt;
+ static integer niter;
+ static logical swtch;
+ extern /* Subroutine */ int dlaed5_(integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), dlaed6_(integer *,
+ logical *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ static logical swtch3;
+
+ static logical orgati;
+ static doublereal erretm, rhoinv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ December 23, 1999
+
+
+ Purpose
+ =======
+
+ This subroutine computes the I-th updated eigenvalue of a symmetric
+ rank-one modification to a diagonal matrix whose elements are
+ given in the array d, and that
+
+ D(i) < D(j) for i < j
+
+ and that RHO > 0. This is arranged by the calling routine, and is
+ no loss in generality. The rank-one modified system is thus
+
+ diag( D ) + RHO * Z * Z_transpose.
+
+ where we assume the Euclidean norm of Z is 1.
+
+ The method consists of approximating the rational functions in the
+ secular equation by simpler interpolating rational functions.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The length of all arrays.
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. 1 <= I <= N.
+
+ D (input) DOUBLE PRECISION array, dimension (N)
+ The original eigenvalues. It is assumed that they are in
+ order, D(I) < D(J) for I < J.
+
+ Z (input) DOUBLE PRECISION array, dimension (N)
+ The components of the updating vector.
+
+ DELTA (output) DOUBLE PRECISION array, dimension (N)
+ If N .ne. 1, DELTA contains (D(j) - lambda_I) in its j-th
+ component. If N = 1, then DELTA(1) = 1. The vector DELTA
+ contains the information necessary to construct the
+ eigenvectors.
+
+ RHO (input) DOUBLE PRECISION
+ The scalar in the symmetric updating formula.
+
+ DLAM (output) DOUBLE PRECISION
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = 1, the updating process failed.
+
+ Internal Parameters
+ ===================
+
+ Logical variable ORGATI (origin-at-i?) is used for distinguishing
+ whether D(i) or D(i+1) is treated as the origin.
+
+ ORGATI = .true. origin at i
+ ORGATI = .false. origin at i+1
+
+ Logical variable SWTCH3 (switch-for-3-poles?) is for noting
+ if we are working with THREE poles!
+
+ MAXIT is the maximum number of iterations allowed for each
+ eigenvalue.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Since this routine is called in an inner loop, we do no argument
+ checking.
+
+ Quick return for N=1 and 2.
+*/
+
+ /* Parameter adjustments */
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n == 1) {
+
+/* Presumably, I=1 upon entry */
+
+ *dlam = d__[1] + *rho * z__[1] * z__[1];
+ delta[1] = 1.;
+ return 0;
+ }
+ if (*n == 2) {
+ dlaed5_(i__, &d__[1], &z__[1], &delta[1], rho, dlam);
+ return 0;
+ }
+
+/* Compute machine epsilon */
+
+ eps = EPSILON;
+ rhoinv = 1. / *rho;
+
+/* The case I = N */
+
+ if (*i__ == *n) {
+
+/* Initialize some basic variables */
+
+ ii = *n - 1;
+ niter = 1;
+
+/* Calculate initial guess */
+
+ midpt = *rho / 2.;
+
+/*
+ If ||Z||_2 is not one, then TEMP should be set to
+ RHO * ||Z||_2^2 / TWO
+*/
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - midpt;
+/* L10: */
+ }
+
+ psi = 0.;
+ i__1 = *n - 2;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / delta[j];
+/* L20: */
+ }
+
+ c__ = rhoinv + psi;
+ w = c__ + z__[ii] * z__[ii] / delta[ii] + z__[*n] * z__[*n] / delta[*
+ n];
+
+ if (w <= 0.) {
+ temp = z__[*n - 1] * z__[*n - 1] / (d__[*n] - d__[*n - 1] + *rho)
+ + z__[*n] * z__[*n] / *rho;
+ if (c__ <= temp) {
+ tau = *rho;
+ } else {
+ del = d__[*n] - d__[*n - 1];
+ a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]
+ ;
+ b = z__[*n] * z__[*n] * del;
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+ }
+
+/*
+ It can be proved that
+ D(N)+RHO/2 <= LAMBDA(N) < D(N)+TAU <= D(N)+RHO
+*/
+
+ dltlb = midpt;
+ dltub = *rho;
+ } else {
+ del = d__[*n] - d__[*n - 1];
+ a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
+ b = z__[*n] * z__[*n] * del;
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+
+/*
+ It can be proved that
+ D(N) < D(N)+TAU < LAMBDA(N) < D(N)+RHO/2
+*/
+
+ dltlb = 0.;
+ dltub = midpt;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - tau;
+/* L30: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L40: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ *dlam = d__[*i__] + tau;
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
+ a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * (
+ dpsi + dphi);
+ b = delta[*n - 1] * delta[*n] * w;
+ if (c__ < 0.) {
+ c__ = abs(c__);
+ }
+ if (c__ == 0.) {
+/*
+ ETA = B/A
+ ETA = RHO - TAU
+*/
+ eta = dltub - tau;
+ } else if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
+ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
+ );
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L50: */
+ }
+
+ tau += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L60: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 30; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ *dlam = d__[*i__] + tau;
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
+ a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] *
+ (dpsi + dphi);
+ b = delta[*n - 1] * delta[*n] * w;
+ if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L70: */
+ }
+
+ tau += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L80: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+/* L90: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ *dlam = d__[*i__] + tau;
+ goto L250;
+
+/* End for the case I = N */
+
+ } else {
+
+/* The case for I < N */
+
+ niter = 1;
+ ip1 = *i__ + 1;
+
+/* Calculate initial guess */
+
+ del = d__[ip1] - d__[*i__];
+ midpt = del / 2.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - midpt;
+/* L100: */
+ }
+
+ psi = 0.;
+ i__1 = *i__ - 1;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / delta[j];
+/* L110: */
+ }
+
+ phi = 0.;
+ i__1 = *i__ + 2;
+ for (j = *n; j >= i__1; --j) {
+ phi += z__[j] * z__[j] / delta[j];
+/* L120: */
+ }
+ c__ = rhoinv + psi + phi;
+ w = c__ + z__[*i__] * z__[*i__] / delta[*i__] + z__[ip1] * z__[ip1] /
+ delta[ip1];
+
+ if (w > 0.) {
+
+/*
+ d(i)< the ith eigenvalue < (d(i)+d(i+1))/2
+
+ We choose d(i) as origin.
+*/
+
+ orgati = TRUE_;
+ a = c__ * del + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
+ b = z__[*i__] * z__[*i__] * del;
+ if (a > 0.) {
+ tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ }
+ dltlb = 0.;
+ dltub = midpt;
+ } else {
+
+/*
+ (d(i)+d(i+1))/2 <= the ith eigenvalue < d(i+1)
+
+ We choose d(i+1) as origin.
+*/
+
+ orgati = FALSE_;
+ a = c__ * del - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
+ b = z__[ip1] * z__[ip1] * del;
+ if (a < 0.) {
+ tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) /
+ (c__ * 2.);
+ }
+ dltlb = -midpt;
+ dltub = 0.;
+ }
+
+ if (orgati) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - tau;
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[ip1] - tau;
+/* L140: */
+ }
+ }
+ if (orgati) {
+ ii = *i__;
+ } else {
+ ii = *i__ + 1;
+ }
+ iim1 = ii - 1;
+ iip1 = ii + 1;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L150: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L160: */
+ }
+
+ w = rhoinv + phi + psi;
+
+/*
+ W is the value of the secular function with
+ its ii-th element removed.
+*/
+
+ swtch3 = FALSE_;
+ if (orgati) {
+ if (w < 0.) {
+ swtch3 = TRUE_;
+ }
+ } else {
+ if (w > 0.) {
+ swtch3 = TRUE_;
+ }
+ }
+ if (ii == 1 || ii == *n) {
+ swtch3 = FALSE_;
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w += temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
+ abs(tau) * dw;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ if (! swtch3) {
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / delta[*i__];
+ c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (d__1 *
+ d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / delta[ip1];
+ c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (d__1 *
+ d__1);
+ }
+ a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] *
+ dw;
+ b = delta[*i__] * delta[ip1] * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] *
+ (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + delta[*i__] * delta[*i__] *
+ (dpsi + dphi);
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ temp = rhoinv + psi + phi;
+ if (orgati) {
+ temp1 = z__[iim1] / delta[iim1];
+ temp1 *= temp1;
+ c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[
+ iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi);
+ } else {
+ temp1 = z__[iip1] / delta[iip1];
+ temp1 *= temp1;
+ c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[
+ iim1]) * temp1;
+ zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1));
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ zz[1] = z__[ii] * z__[ii];
+ dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L250;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+
+ prew = w;
+
+/* L170: */
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L180: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L190: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L200: */
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + (
+ d__1 = tau + eta, abs(d__1)) * dw;
+
+ swtch = FALSE_;
+ if (orgati) {
+ if (-w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ } else {
+ if (w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ }
+
+ tau += eta;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 30; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ if (! swtch3) {
+ if (! swtch) {
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / delta[*i__];
+ c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (
+ d__1 * d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / delta[ip1];
+ c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) *
+ (d__1 * d__1);
+ }
+ } else {
+ temp = z__[ii] / delta[ii];
+ if (orgati) {
+ dpsi += temp * temp;
+ } else {
+ dphi += temp * temp;
+ }
+ c__ = w - delta[*i__] * dpsi - delta[ip1] * dphi;
+ }
+ a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1]
+ * dw;
+ b = delta[*i__] * delta[ip1] * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (! swtch) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + delta[ip1] *
+ delta[ip1] * (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + delta[*i__] * delta[
+ *i__] * (dpsi + dphi);
+ }
+ } else {
+ a = delta[*i__] * delta[*i__] * dpsi + delta[ip1]
+ * delta[ip1] * dphi;
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))))
+ / (c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__,
+ abs(d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ temp = rhoinv + psi + phi;
+ if (swtch) {
+ c__ = temp - delta[iim1] * dpsi - delta[iip1] * dphi;
+ zz[0] = delta[iim1] * delta[iim1] * dpsi;
+ zz[2] = delta[iip1] * delta[iip1] * dphi;
+ } else {
+ if (orgati) {
+ temp1 = z__[iim1] / delta[iim1];
+ temp1 *= temp1;
+ c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1]
+ - d__[iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 +
+ dphi);
+ } else {
+ temp1 = z__[iip1] / delta[iip1];
+ temp1 *= temp1;
+ c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1]
+ - d__[iim1]) * temp1;
+ zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi -
+ temp1));
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ }
+ dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta,
+ info);
+ if (*info != 0) {
+ goto L250;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L210: */
+ }
+
+ tau += eta;
+ prew = w;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L220: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L230: */
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3.
+ + abs(tau) * dw;
+ if ((w * prew > 0. && abs(w) > abs(prew) / 10.)) {
+ swtch = ! swtch;
+ }
+
+/* L240: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+
+ }
+
+L250:
+
+ return 0;
+
+/* End of DLAED4 */
+
+} /* dlaed4_ */
+
+/* Subroutine */ int dlaed5_(integer *i__, doublereal *d__, doublereal *z__,
+ doublereal *delta, doublereal *rho, doublereal *dlam)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal b, c__, w, del, tau, temp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ This subroutine computes the I-th eigenvalue of a symmetric rank-one
+ modification of a 2-by-2 diagonal matrix
+
+ diag( D ) + RHO * Z * transpose(Z) .
+
+ The diagonal elements in the array D are assumed to satisfy
+
+ D(i) < D(j) for i < j .
+
+ We also assume RHO > 0 and that the Euclidean norm of the vector
+ Z is one.
+
+ Arguments
+ =========
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. I = 1 or I = 2.
+
+ D (input) DOUBLE PRECISION array, dimension (2)
+ The original eigenvalues. We assume D(1) < D(2).
+
+ Z (input) DOUBLE PRECISION array, dimension (2)
+ The components of the updating vector.
+
+ DELTA (output) DOUBLE PRECISION array, dimension (2)
+ The vector DELTA contains the information necessary
+ to construct the eigenvectors.
+
+ RHO (input) DOUBLE PRECISION
+ The scalar in the symmetric updating formula.
+
+ DLAM (output) DOUBLE PRECISION
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ del = d__[2] - d__[1];
+ if (*i__ == 1) {
+ w = *rho * 2. * (z__[2] * z__[2] - z__[1] * z__[1]) / del + 1.;
+ if (w > 0.) {
+ b = del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[1] * z__[1] * del;
+
+/* B > ZERO, always */
+
+ tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1))));
+ *dlam = d__[1] + tau;
+ delta[1] = -z__[1] / tau;
+ delta[2] = z__[2] / (del - tau);
+ } else {
+ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * del;
+ if (b > 0.) {
+ tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.));
+ } else {
+ tau = (b - sqrt(b * b + c__ * 4.)) / 2.;
+ }
+ *dlam = d__[2] + tau;
+ delta[1] = -z__[1] / (del + tau);
+ delta[2] = -z__[2] / tau;
+ }
+ temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
+ delta[1] /= temp;
+ delta[2] /= temp;
+ } else {
+
+/* Now I=2 */
+
+ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * del;
+ if (b > 0.) {
+ tau = (b + sqrt(b * b + c__ * 4.)) / 2.;
+ } else {
+ tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.));
+ }
+ *dlam = d__[2] + tau;
+ delta[1] = -z__[1] / (del + tau);
+ delta[2] = -z__[2] / tau;
+ temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
+ delta[1] /= temp;
+ delta[2] /= temp;
+ }
+ return 0;
+
+/* End OF DLAED5 */
+
+} /* dlaed5_ */
+
+/* Subroutine */ int dlaed6_(integer *kniter, logical *orgati, doublereal *
+ rho, doublereal *d__, doublereal *z__, doublereal *finit, doublereal *
+ tau, integer *info)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *);
+
+ /* Local variables */
+ static doublereal a, b, c__, f;
+ static integer i__;
+ static doublereal fc, df, ddf, eta, eps, base;
+ static integer iter;
+ static doublereal temp, temp1, temp2, temp3, temp4;
+ static logical scale;
+ static integer niter;
+ static doublereal small1, small2, sminv1, sminv2;
+
+ static doublereal dscale[3], sclfac, zscale[3], erretm, sclinv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAED6 computes the positive or negative root (closest to the origin)
+ of
+ z(1) z(2) z(3)
+ f(x) = rho + --------- + ---------- + ---------
+ d(1)-x d(2)-x d(3)-x
+
+ It is assumed that
+
+ if ORGATI = .true. the root is between d(2) and d(3);
+ otherwise it is between d(1) and d(2)
+
+ This routine will be called by DLAED4 when necessary. In most cases,
+ the root sought is the smallest in magnitude, though it might not be
+ in some extremely rare situations.
+
+ Arguments
+ =========
+
+ KNITER (input) INTEGER
+ Refer to DLAED4 for its significance.
+
+ ORGATI (input) LOGICAL
+ If ORGATI is true, the needed root is between d(2) and
+ d(3); otherwise it is between d(1) and d(2). See
+ DLAED4 for further details.
+
+ RHO (input) DOUBLE PRECISION
+ Refer to the equation f(x) above.
+
+ D (input) DOUBLE PRECISION array, dimension (3)
+ D satisfies d(1) < d(2) < d(3).
+
+ Z (input) DOUBLE PRECISION array, dimension (3)
+ Each of the elements in z must be positive.
+
+ FINIT (input) DOUBLE PRECISION
+ The value of f at 0. It is more accurate than the one
+ evaluated inside this routine (if someone wants to do
+ so).
+
+ TAU (output) DOUBLE PRECISION
+ The root of the equation f(x).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = 1, failure to converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ --z__;
+ --d__;
+
+ /* Function Body */
+
+ *info = 0;
+
+ niter = 1;
+ *tau = 0.;
+ if (*kniter == 2) {
+ if (*orgati) {
+ temp = (d__[3] - d__[2]) / 2.;
+ c__ = *rho + z__[1] / (d__[1] - d__[2] - temp);
+ a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3];
+ b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2];
+ } else {
+ temp = (d__[1] - d__[2]) / 2.;
+ c__ = *rho + z__[3] / (d__[3] - d__[2] - temp);
+ a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2];
+ b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1];
+ }
+/* Computing MAX */
+ d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__);
+ temp = max(d__1,d__2);
+ a /= temp;
+ b /= temp;
+ c__ /= temp;
+ if (c__ == 0.) {
+ *tau = b / a;
+ } else if (a <= 0.) {
+ *tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ *tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))
+ ));
+ }
+ temp = *rho + z__[1] / (d__[1] - *tau) + z__[2] / (d__[2] - *tau) +
+ z__[3] / (d__[3] - *tau);
+ if (abs(*finit) <= abs(temp)) {
+ *tau = 0.;
+ }
+ }
+
+/*
+ On first call to routine, get machine parameters for
+ possible scaling to avoid overflow
+*/
+
+ if (first) {
+ eps = EPSILON;
+ base = BASE;
+ i__1 = (integer) (log(SAFEMINIMUM) / log(base) / 3.);
+ small1 = pow_di(&base, &i__1);
+ sminv1 = 1. / small1;
+ small2 = small1 * small1;
+ sminv2 = sminv1 * sminv1;
+ first = FALSE_;
+ }
+
+/*
+ Determine if scaling of inputs necessary to avoid overflow
+ when computing 1/TEMP**3
+*/
+
+ if (*orgati) {
+/* Computing MIN */
+ d__3 = (d__1 = d__[2] - *tau, abs(d__1)), d__4 = (d__2 = d__[3] - *
+ tau, abs(d__2));
+ temp = min(d__3,d__4);
+ } else {
+/* Computing MIN */
+ d__3 = (d__1 = d__[1] - *tau, abs(d__1)), d__4 = (d__2 = d__[2] - *
+ tau, abs(d__2));
+ temp = min(d__3,d__4);
+ }
+ scale = FALSE_;
+ if (temp <= small1) {
+ scale = TRUE_;
+ if (temp <= small2) {
+
+/* Scale up by power of radix nearest 1/SAFMIN**(2/3) */
+
+ sclfac = sminv2;
+ sclinv = small2;
+ } else {
+
+/* Scale up by power of radix nearest 1/SAFMIN**(1/3) */
+
+ sclfac = sminv1;
+ sclinv = small1;
+ }
+
+/* Scaling up safe because D, Z, TAU scaled elsewhere to be O(1) */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ dscale[i__ - 1] = d__[i__] * sclfac;
+ zscale[i__ - 1] = z__[i__] * sclfac;
+/* L10: */
+ }
+ *tau *= sclfac;
+ } else {
+
+/* Copy D and Z to DSCALE and ZSCALE */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ dscale[i__ - 1] = d__[i__];
+ zscale[i__ - 1] = z__[i__];
+/* L20: */
+ }
+ }
+
+ fc = 0.;
+ df = 0.;
+ ddf = 0.;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ temp = 1. / (dscale[i__ - 1] - *tau);
+ temp1 = zscale[i__ - 1] * temp;
+ temp2 = temp1 * temp;
+ temp3 = temp2 * temp;
+ fc += temp1 / dscale[i__ - 1];
+ df += temp2;
+ ddf += temp3;
+/* L30: */
+ }
+ f = *finit + *tau * fc;
+
+ if (abs(f) <= 0.) {
+ goto L60;
+ }
+
+/*
+ Iteration begins
+
+ It is not hard to see that
+
+ 1) Iterations will go up monotonically
+ if FINIT < 0;
+
+ 2) Iterations will go down monotonically
+ if FINIT > 0.
+*/
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+ if (*orgati) {
+ temp1 = dscale[1] - *tau;
+ temp2 = dscale[2] - *tau;
+ } else {
+ temp1 = dscale[0] - *tau;
+ temp2 = dscale[1] - *tau;
+ }
+ a = (temp1 + temp2) * f - temp1 * temp2 * df;
+ b = temp1 * temp2 * f;
+ c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf;
+/* Computing MAX */
+ d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__);
+ temp = max(d__1,d__2);
+ a /= temp;
+ b /= temp;
+ c__ /= temp;
+ if (c__ == 0.) {
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
+ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
+ );
+ }
+ if (f * eta >= 0.) {
+ eta = -f / df;
+ }
+
+ temp = eta + *tau;
+ if (*orgati) {
+ if ((eta > 0. && temp >= dscale[2])) {
+ eta = (dscale[2] - *tau) / 2.;
+ }
+ if ((eta < 0. && temp <= dscale[1])) {
+ eta = (dscale[1] - *tau) / 2.;
+ }
+ } else {
+ if ((eta > 0. && temp >= dscale[1])) {
+ eta = (dscale[1] - *tau) / 2.;
+ }
+ if ((eta < 0. && temp <= dscale[0])) {
+ eta = (dscale[0] - *tau) / 2.;
+ }
+ }
+ *tau += eta;
+
+ fc = 0.;
+ erretm = 0.;
+ df = 0.;
+ ddf = 0.;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ temp = 1. / (dscale[i__ - 1] - *tau);
+ temp1 = zscale[i__ - 1] * temp;
+ temp2 = temp1 * temp;
+ temp3 = temp2 * temp;
+ temp4 = temp1 / dscale[i__ - 1];
+ fc += temp4;
+ erretm += abs(temp4);
+ df += temp2;
+ ddf += temp3;
+/* L40: */
+ }
+ f = *finit + *tau * fc;
+ erretm = (abs(*finit) + abs(*tau) * erretm) * 8. + abs(*tau) * df;
+ if (abs(f) <= eps * erretm) {
+ goto L60;
+ }
+/* L50: */
+ }
+ *info = 1;
+L60:
+
+/* Undo scaling */
+
+ if (scale) {
+ *tau *= sclinv;
+ }
+ return 0;
+
+/* End of DLAED6 */
+
+} /* dlaed6_ */
+
+/* Subroutine */ int dlaed7_(integer *icompq, integer *n, integer *qsiz,
+ integer *tlvls, integer *curlvl, integer *curpbm, doublereal *d__,
+ doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer
+ *cutpnt, doublereal *qstore, integer *qptr, integer *prmptr, integer *
+ perm, integer *givptr, integer *givcol, doublereal *givnum,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, k, n1, n2, is, iw, iz, iq2, ptr, ldq2, indx, curr;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer indxc, indxp;
+ extern /* Subroutine */ int dlaed8_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *,
+ doublereal *, integer *, integer *, integer *), dlaed9_(integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *), dlaeda_(integer *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, doublereal *, integer *, doublereal *, doublereal *, integer *)
+ ;
+ static integer idlmda;
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+ static integer coltyp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAED7 computes the updated eigensystem of a diagonal
+ matrix after modification by a rank-one symmetric matrix. This
+ routine is used only for the eigenproblem which requires all
+ eigenvalues and optionally eigenvectors of a dense symmetric matrix
+ that has been reduced to tridiagonal form. DLAED1 handles
+ the case in which all eigenvalues and eigenvectors of a symmetric
+ tridiagonal matrix are desired.
+
+ T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out)
+
+ where Z = Q'u, u is a vector of length N with ones in the
+ CUTPNT and CUTPNT + 1 th elements and zeros elsewhere.
+
+ The eigenvectors of the original matrix are stored in Q, and the
+ eigenvalues are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple eigenvalues or if there is a zero in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine DLAED8.
+
+ The second stage consists of calculating the updated
+ eigenvalues. This is done by finding the roots of the secular
+ equation via the routine DLAED4 (as called by DLAED9).
+ This routine also calculates the eigenvectors of the current
+ problem.
+
+ The final stage consists of computing the updated eigenvectors
+ directly using the updated eigenvalues. The eigenvectors for
+ the current problem are multiplied with the eigenvectors from
+ the overall problem.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ = 0: Compute eigenvalues only.
+ = 1: Compute eigenvectors of original dense symmetric matrix
+ also. On entry, Q contains the orthogonal matrix used
+ to reduce the original matrix to tridiagonal form.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ QSIZ (input) INTEGER
+ The dimension of the orthogonal matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ TLVLS (input) INTEGER
+ The total number of merging levels in the overall divide and
+ conquer tree.
+
+ CURLVL (input) INTEGER
+ The current level in the overall merge routine,
+ 0 <= CURLVL <= TLVLS.
+
+ CURPBM (input) INTEGER
+ The current problem in the current level in the overall
+ merge routine (counting from upper left to lower right).
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the eigenvalues of the rank-1-perturbed matrix.
+ On exit, the eigenvalues of the repaired matrix.
+
+ Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N)
+ On entry, the eigenvectors of the rank-1-perturbed matrix.
+ On exit, the eigenvectors of the repaired tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (output) INTEGER array, dimension (N)
+ The permutation which will reintegrate the subproblem just
+ solved back into sorted order, i.e., D( INDXQ( I = 1, N ) )
+ will be in ascending order.
+
+ RHO (input) DOUBLE PRECISION
+ The subdiagonal element used to create the rank-1
+ modification.
+
+ CUTPNT (input) INTEGER
+ Contains the location of the last eigenvalue in the leading
+ sub-matrix. min(1,N) <= CUTPNT <= N.
+
+ QSTORE (input/output) DOUBLE PRECISION array, dimension (N**2+1)
+ Stores eigenvectors of submatrices encountered during
+ divide and conquer, packed together. QPTR points to
+ beginning of the submatrices.
+
+ QPTR (input/output) INTEGER array, dimension (N+2)
+ List of indices pointing to beginning of submatrices stored
+ in QSTORE. The submatrices are numbered starting at the
+ bottom left of the divide and conquer tree, from left to
+ right and bottom to top.
+
+ PRMPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in PERM a
+ level's permutation is stored. PRMPTR(i+1) - PRMPTR(i)
+ indicates the size of the permutation and also the size of
+ the full, non-deflated problem.
+
+ PERM (input) INTEGER array, dimension (N lg N)
+ Contains the permutations (from deflation and sorting) to be
+ applied to each eigenblock.
+
+ GIVPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in GIVCOL a
+ level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i)
+ indicates the number of Givens rotations.
+
+ GIVCOL (input) INTEGER array, dimension (2, N lg N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (3*N+QSIZ*N)
+
+ IWORK (workspace) INTEGER array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ --indxq;
+ --qstore;
+ --qptr;
+ --prmptr;
+ --perm;
+ --givptr;
+ givcol -= 3;
+ givnum -= 3;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if ((*icompq == 1 && *qsiz < *n)) {
+ *info = -4;
+ } else if (*ldq < max(1,*n)) {
+ *info = -9;
+ } else if (min(1,*n) > *cutpnt || *n < *cutpnt) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED7", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in DLAED8 and DLAED9.
+*/
+
+ if (*icompq == 1) {
+ ldq2 = *qsiz;
+ } else {
+ ldq2 = *n;
+ }
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq2 = iw + *n;
+ is = iq2 + *n * ldq2;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+/*
+ Form the z-vector which consists of the last row of Q_1 and the
+ first row of Q_2.
+*/
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *tlvls - i__;
+ ptr += pow_ii(&c__2, &i__2);
+/* L10: */
+ }
+ curr = ptr + *curpbm;
+ dlaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], &
+ givcol[3], &givnum[3], &qstore[1], &qptr[1], &work[iz], &work[iz
+ + *n], info);
+
+/*
+ When solving the final problem, we no longer need the stored data,
+ so we will overwrite the data from this level onto the previously
+ used storage space.
+*/
+
+ if (*curlvl == *tlvls) {
+ qptr[curr] = 1;
+ prmptr[curr] = 1;
+ givptr[curr] = 1;
+ }
+
+/* Sort and Deflate eigenvalues. */
+
+ dlaed8_(icompq, &k, n, qsiz, &d__[1], &q[q_offset], ldq, &indxq[1], rho,
+ cutpnt, &work[iz], &work[idlmda], &work[iq2], &ldq2, &work[iw], &
+ perm[prmptr[curr]], &givptr[curr + 1], &givcol[((givptr[curr]) <<
+ (1)) + 1], &givnum[((givptr[curr]) << (1)) + 1], &iwork[indxp], &
+ iwork[indx], info);
+ prmptr[curr + 1] = prmptr[curr] + *n;
+ givptr[curr + 1] += givptr[curr];
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ dlaed9_(&k, &c__1, &k, n, &d__[1], &work[is], &k, rho, &work[idlmda],
+ &work[iw], &qstore[qptr[curr]], &k, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (*icompq == 1) {
+ dgemm_("N", "N", qsiz, &k, &k, &c_b15, &work[iq2], &ldq2, &qstore[
+ qptr[curr]], &k, &c_b29, &q[q_offset], ldq);
+ }
+/* Computing 2nd power */
+ i__1 = k;
+ qptr[curr + 1] = qptr[curr] + i__1 * i__1;
+
+/* Prepare the INDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ qptr[curr + 1] = qptr[curr];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L20: */
+ }
+ }
+
+L30:
+ return 0;
+
+/* End of DLAED7 */
+
+} /* dlaed7_ */
+
+/* Subroutine */ int dlaed8_(integer *icompq, integer *k, integer *n, integer
+ *qsiz, doublereal *d__, doublereal *q, integer *ldq, integer *indxq,
+ doublereal *rho, integer *cutpnt, doublereal *z__, doublereal *dlamda,
+ doublereal *q2, integer *ldq2, doublereal *w, integer *perm, integer
+ *givptr, integer *givcol, doublereal *givnum, integer *indxp, integer
+ *indx, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal c__;
+ static integer i__, j;
+ static doublereal s, t;
+ static integer k2, n1, n2, jp, n1p1;
+ static doublereal eps, tau, tol;
+ static integer jlam, imax, jmax;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *), dscal_(
+ integer *, doublereal *, doublereal *, integer *), dcopy_(integer
+ *, doublereal *, integer *, doublereal *, integer *);
+
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAED8 merges the two sets of eigenvalues together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ eigenvalues are close together or if there is a tiny element in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ = 0: Compute eigenvalues only.
+ = 1: Compute eigenvectors of original dense symmetric matrix
+ also. On entry, Q contains the orthogonal matrix used
+ to reduce the original matrix to tridiagonal form.
+
+ K (output) INTEGER
+ The number of non-deflated eigenvalues, and the order of the
+ related secular equation.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ QSIZ (input) INTEGER
+ The dimension of the orthogonal matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the eigenvalues of the two submatrices to be
+ combined. On exit, the trailing (N-K) updated eigenvalues
+ (those which were deflated) sorted into increasing order.
+
+ Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N)
+ If ICOMPQ = 0, Q is not referenced. Otherwise,
+ on entry, Q contains the eigenvectors of the partially solved
+ system which has been previously updated in matrix
+ multiplies with other partially solved eigensystems.
+ On exit, Q contains the trailing (N-K) updated eigenvectors
+ (those which were deflated) in its last N-K columns.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (input) INTEGER array, dimension (N)
+ The permutation which separately sorts the two sub-problems
+ in D into ascending order. Note that elements in the second
+ half of this permutation must first have CUTPNT added to
+ their values in order to be accurate.
+
+ RHO (input/output) DOUBLE PRECISION
+ On entry, the off-diagonal element associated with the rank-1
+ cut which originally split the two submatrices which are now
+ being recombined.
+ On exit, RHO has been modified to the value required by
+ DLAED3.
+
+ CUTPNT (input) INTEGER
+ The location of the last eigenvalue in the leading
+ sub-matrix. min(1,N) <= CUTPNT <= N.
+
+ Z (input) DOUBLE PRECISION array, dimension (N)
+ On entry, Z contains the updating vector (the last row of
+ the first sub-eigenvector matrix and the first row of the
+ second sub-eigenvector matrix).
+ On exit, the contents of Z are destroyed by the updating
+ process.
+
+ DLAMDA (output) DOUBLE PRECISION array, dimension (N)
+ A copy of the first K eigenvalues which will be used by
+ DLAED3 to form the secular equation.
+
+ Q2 (output) DOUBLE PRECISION array, dimension (LDQ2,N)
+ If ICOMPQ = 0, Q2 is not referenced. Otherwise,
+ a copy of the first K eigenvectors which will be used by
+ DLAED7 in a matrix multiply (DGEMM) to update the new
+ eigenvectors.
+
+ LDQ2 (input) INTEGER
+ The leading dimension of the array Q2. LDQ2 >= max(1,N).
+
+ W (output) DOUBLE PRECISION array, dimension (N)
+ The first k values of the final deflation-altered z-vector and
+ will be passed to DLAED3.
+
+ PERM (output) INTEGER array, dimension (N)
+ The permutations (from deflation and sorting) to be applied
+ to each eigenblock.
+
+ GIVPTR (output) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem.
+
+ GIVCOL (output) INTEGER array, dimension (2, N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (output) DOUBLE PRECISION array, dimension (2, N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ INDXP (workspace) INTEGER array, dimension (N)
+ The permutation used to place deflated values of D at the end
+ of the array. INDXP(1:K) points to the nondeflated D-values
+ and INDXP(K+1:N) points to the deflated eigenvalues.
+
+ INDX (workspace) INTEGER array, dimension (N)
+ The permutation used to sort the contents of D into ascending
+ order.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ --indxq;
+ --z__;
+ --dlamda;
+ q2_dim1 = *ldq2;
+ q2_offset = 1 + q2_dim1 * 1;
+ q2 -= q2_offset;
+ --w;
+ --perm;
+ givcol -= 3;
+ givnum -= 3;
+ --indxp;
+ --indx;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*icompq == 1 && *qsiz < *n)) {
+ *info = -4;
+ } else if (*ldq < max(1,*n)) {
+ *info = -7;
+ } else if (*cutpnt < min(1,*n) || *cutpnt > *n) {
+ *info = -10;
+ } else if (*ldq2 < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n1 = *cutpnt;
+ n2 = *n - n1;
+ n1p1 = n1 + 1;
+
+ if (*rho < 0.) {
+ dscal_(&n2, &c_b151, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1 */
+
+ t = 1. / sqrt(2.);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ indx[j] = j;
+/* L10: */
+ }
+ dscal_(n, &t, &z__[1], &c__1);
+ *rho = (d__1 = *rho * 2., abs(d__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) {
+ indxq[i__] += *cutpnt;
+/* L20: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+ w[i__] = z__[indxq[i__]];
+/* L30: */
+ }
+ i__ = 1;
+ j = *cutpnt + 1;
+ dlamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = dlamda[indx[i__]];
+ z__[i__] = w[indx[i__]];
+/* L40: */
+ }
+
+/* Calculate the allowable deflation tolerence */
+
+ imax = idamax_(n, &z__[1], &c__1);
+ jmax = idamax_(n, &d__[1], &c__1);
+ eps = EPSILON;
+ tol = eps * 8. * (d__1 = d__[jmax], abs(d__1));
+
+/*
+ If the rank-1 modifier is small enough, no more needs to be done
+ except to reorganize Q so that its columns correspond with the
+ elements in D.
+*/
+
+ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) {
+ *k = 0;
+ if (*icompq == 0) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+ dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1
+ + 1], &c__1);
+/* L60: */
+ }
+ dlacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq);
+ }
+ return 0;
+ }
+
+/*
+ If there are multiple eigenvalues then the problem deflates. Here
+ the number of equal eigenvalues are found. As each equal
+ eigenvalue is found, an elementary reflector is computed to rotate
+ the corresponding eigensubspace so that the corresponding
+ components of Z are zero in this new basis.
+*/
+
+ *k = 0;
+ *givptr = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ if (j == *n) {
+ goto L110;
+ }
+ } else {
+ jlam = j;
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ ++j;
+ if (j > *n) {
+ goto L100;
+ }
+ if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[jlam];
+ c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = dlapy2_(&c__, &s);
+ t = d__[j] - d__[jlam];
+ c__ /= tau;
+ s = -s / tau;
+ if ((d__1 = t * c__ * s, abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[j] = tau;
+ z__[jlam] = 0.;
+
+/* Record the appropriate Givens rotation */
+
+ ++(*givptr);
+ givcol[((*givptr) << (1)) + 1] = indxq[indx[jlam]];
+ givcol[((*givptr) << (1)) + 2] = indxq[indx[j]];
+ givnum[((*givptr) << (1)) + 1] = c__;
+ givnum[((*givptr) << (1)) + 2] = s;
+ if (*icompq == 1) {
+ drot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[
+ indxq[indx[j]] * q_dim1 + 1], &c__1, &c__, &s);
+ }
+ t = d__[jlam] * c__ * c__ + d__[j] * s * s;
+ d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__;
+ d__[jlam] = t;
+ --k2;
+ i__ = 1;
+L90:
+ if (k2 + i__ <= *n) {
+ if (d__[jlam] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = jlam;
+ ++i__;
+ goto L90;
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ jlam = j;
+ } else {
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+ jlam = j;
+ }
+ }
+ goto L80;
+L100:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+
+L110:
+
+/*
+ Sort the eigenvalues and corresponding eigenvectors into DLAMDA
+ and Q2 respectively. The eigenvalues/vectors which were not
+ deflated go into the first K slots of DLAMDA and Q2 respectively,
+ while those which were deflated go into the last N - K slots.
+*/
+
+ if (*icompq == 0) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+/* L120: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+ dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1]
+ , &c__1);
+/* L130: */
+ }
+ }
+
+/*
+ The deflated eigenvalues and their corresponding vectors go back
+ into the last N - K slots of D and Q respectively.
+*/
+
+ if (*k < *n) {
+ if (*icompq == 0) {
+ i__1 = *n - *k;
+ dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ } else {
+ i__1 = *n - *k;
+ dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = *n - *k;
+ dlacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*
+ k + 1) * q_dim1 + 1], ldq);
+ }
+ }
+
+ return 0;
+
+/* End of DLAED8 */
+
+} /* dlaed8_ */
+
+/* Subroutine */ int dlaed9_(integer *k, integer *kstart, integer *kstop,
+ integer *n, doublereal *d__, doublereal *q, integer *ldq, doublereal *
+ rho, doublereal *dlamda, doublereal *w, doublereal *s, integer *lds,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaed4_(integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAED9 finds the roots of the secular equation, as defined by the
+ values in D, Z, and RHO, between KSTART and KSTOP. It makes the
+ appropriate calls to DLAED4 and then stores the new matrix of
+ eigenvectors for use in calculating the next level of Z vectors.
+
+ Arguments
+ =========
+
+ K (input) INTEGER
+ The number of terms in the rational function to be solved by
+ DLAED4. K >= 0.
+
+ KSTART (input) INTEGER
+ KSTOP (input) INTEGER
+ The updated eigenvalues Lambda(I), KSTART <= I <= KSTOP
+ are to be computed. 1 <= KSTART <= KSTOP <= K.
+
+ N (input) INTEGER
+ The number of rows and columns in the Q matrix.
+ N >= K (delation may result in N > K).
+
+ D (output) DOUBLE PRECISION array, dimension (N)
+ D(I) contains the updated eigenvalues
+ for KSTART <= I <= KSTOP.
+
+ Q (workspace) DOUBLE PRECISION array, dimension (LDQ,N)
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max( 1, N ).
+
+ RHO (input) DOUBLE PRECISION
+ The value of the parameter in the rank one update equation.
+ RHO >= 0 required.
+
+ DLAMDA (input) DOUBLE PRECISION array, dimension (K)
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation.
+
+ W (input) DOUBLE PRECISION array, dimension (K)
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating vector.
+
+ S (output) DOUBLE PRECISION array, dimension (LDS, K)
+ Will contain the eigenvectors of the repaired matrix which
+ will be stored for subsequent Z vector calculation and
+ multiplied by the previously accumulated eigenvectors
+ to update the system.
+
+ LDS (input) INTEGER
+ The leading dimension of S. LDS >= max( 1, K ).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ --dlamda;
+ --w;
+ s_dim1 = *lds;
+ s_offset = 1 + s_dim1 * 1;
+ s -= s_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*k < 0) {
+ *info = -1;
+ } else if (*kstart < 1 || *kstart > max(1,*k)) {
+ *info = -2;
+ } else if (max(1,*kstop) < *kstart || *kstop > max(1,*k)) {
+ *info = -3;
+ } else if (*n < *k) {
+ *info = -4;
+ } else if (*ldq < max(1,*k)) {
+ *info = -7;
+ } else if (*lds < max(1,*k)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED9", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 0) {
+ return 0;
+ }
+
+/*
+ Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DLAMDA(I) if it is 1; this makes the subsequent
+ subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DLAMDA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DLAMDA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
+/* L10: */
+ }
+
+ i__1 = *kstop;
+ for (j = *kstart; j <= i__1; ++j) {
+ dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
+ info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ goto L120;
+ }
+/* L20: */
+ }
+
+ if (*k == 1 || *k == 2) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *k;
+ for (j = 1; j <= i__2; ++j) {
+ s[j + i__ * s_dim1] = q[j + i__ * q_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L120;
+ }
+
+/* Compute updated W. */
+
+ dcopy_(k, &w[1], &c__1, &s[s_offset], &c__1);
+
+/* Initialize W(I) = Q(I,I) */
+
+ i__1 = *ldq + 1;
+ dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L50: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L60: */
+ }
+/* L70: */
+ }
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__1 = sqrt(-w[i__]);
+ w[i__] = d_sign(&d__1, &s[i__ + s_dim1]);
+/* L80: */
+ }
+
+/* Compute eigenvectors of the modified rank-1 modification. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ q[i__ + j * q_dim1] = w[i__] / q[i__ + j * q_dim1];
+/* L90: */
+ }
+ temp = dnrm2_(k, &q[j * q_dim1 + 1], &c__1);
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ s[i__ + j * s_dim1] = q[i__ + j * q_dim1] / temp;
+/* L100: */
+ }
+/* L110: */
+ }
+
+L120:
+ return 0;
+
+/* End of DLAED9 */
+
+} /* dlaed9_ */
+
+/* Subroutine */ int dlaeda_(integer *n, integer *tlvls, integer *curlvl,
+ integer *curpbm, integer *prmptr, integer *perm, integer *givptr,
+ integer *givcol, doublereal *givnum, doublereal *q, integer *qptr,
+ doublereal *z__, doublereal *ztemp, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, k, mid, ptr;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer curr, bsiz1, bsiz2, psiz1, psiz2, zptr1;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAEDA computes the Z vector corresponding to the merge step in the
+ CURLVLth step of the merge process with TLVLS steps for the CURPBMth
+ problem.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ TLVLS (input) INTEGER
+ The total number of merging levels in the overall divide and
+ conquer tree.
+
+ CURLVL (input) INTEGER
+ The current level in the overall merge routine,
+ 0 <= curlvl <= tlvls.
+
+ CURPBM (input) INTEGER
+ The current problem in the current level in the overall
+ merge routine (counting from upper left to lower right).
+
+ PRMPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in PERM a
+ level's permutation is stored. PRMPTR(i+1) - PRMPTR(i)
+ indicates the size of the permutation and incidentally the
+ size of the full, non-deflated problem.
+
+ PERM (input) INTEGER array, dimension (N lg N)
+ Contains the permutations (from deflation and sorting) to be
+ applied to each eigenblock.
+
+ GIVPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in GIVCOL a
+ level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i)
+ indicates the number of Givens rotations.
+
+ GIVCOL (input) INTEGER array, dimension (2, N lg N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ Q (input) DOUBLE PRECISION array, dimension (N**2)
+ Contains the square eigenblocks from previous levels, the
+ starting positions for blocks are given by QPTR.
+
+ QPTR (input) INTEGER array, dimension (N+2)
+ Contains a list of pointers which indicate where in Q an
+ eigenblock is stored. SQRT( QPTR(i+1) - QPTR(i) ) indicates
+ the size of the block.
+
+ Z (output) DOUBLE PRECISION array, dimension (N)
+ On output this vector contains the updating vector (the last
+ row of the first sub-eigenvector matrix and the first row of
+ the second sub-eigenvector matrix).
+
+ ZTEMP (workspace) DOUBLE PRECISION array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --ztemp;
+ --z__;
+ --qptr;
+ --q;
+ givnum -= 3;
+ givcol -= 3;
+ --givptr;
+ --perm;
+ --prmptr;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAEDA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine location of first number in second half. */
+
+ mid = *n / 2 + 1;
+
+/* Gather last/first rows of appropriate eigenblocks into center of Z */
+
+ ptr = 1;
+
+/*
+ Determine location of lowest level subproblem in the full storage
+ scheme
+*/
+
+ i__1 = *curlvl - 1;
+ curr = ptr + *curpbm * pow_ii(&c__2, curlvl) + pow_ii(&c__2, &i__1) - 1;
+
+/*
+ Determine size of these matrices. We add HALF to the value of
+ the SQRT in case the machine underestimates one of these square
+ roots.
+*/
+
+ bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) + .5);
+ bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])) +
+ .5);
+ i__1 = mid - bsiz1 - 1;
+ for (k = 1; k <= i__1; ++k) {
+ z__[k] = 0.;
+/* L10: */
+ }
+ dcopy_(&bsiz1, &q[qptr[curr] + bsiz1 - 1], &bsiz1, &z__[mid - bsiz1], &
+ c__1);
+ dcopy_(&bsiz2, &q[qptr[curr + 1]], &bsiz2, &z__[mid], &c__1);
+ i__1 = *n;
+ for (k = mid + bsiz2; k <= i__1; ++k) {
+ z__[k] = 0.;
+/* L20: */
+ }
+
+/*
+ Loop thru remaining levels 1 -> CURLVL applying the Givens
+ rotations and permutation and then multiplying the center matrices
+ against the current Z.
+*/
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = *curlvl - k;
+ i__3 = *curlvl - k - 1;
+ curr = ptr + *curpbm * pow_ii(&c__2, &i__2) + pow_ii(&c__2, &i__3) -
+ 1;
+ psiz1 = prmptr[curr + 1] - prmptr[curr];
+ psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
+ zptr1 = mid - psiz1;
+
+/* Apply Givens at CURR and CURR+1 */
+
+ i__2 = givptr[curr + 1] - 1;
+ for (i__ = givptr[curr]; i__ <= i__2; ++i__) {
+ drot_(&c__1, &z__[zptr1 + givcol[((i__) << (1)) + 1] - 1], &c__1,
+ &z__[zptr1 + givcol[((i__) << (1)) + 2] - 1], &c__1, &
+ givnum[((i__) << (1)) + 1], &givnum[((i__) << (1)) + 2]);
+/* L30: */
+ }
+ i__2 = givptr[curr + 2] - 1;
+ for (i__ = givptr[curr + 1]; i__ <= i__2; ++i__) {
+ drot_(&c__1, &z__[mid - 1 + givcol[((i__) << (1)) + 1]], &c__1, &
+ z__[mid - 1 + givcol[((i__) << (1)) + 2]], &c__1, &givnum[
+ ((i__) << (1)) + 1], &givnum[((i__) << (1)) + 2]);
+/* L40: */
+ }
+ psiz1 = prmptr[curr + 1] - prmptr[curr];
+ psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
+ i__2 = psiz1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ztemp[i__ + 1] = z__[zptr1 + perm[prmptr[curr] + i__] - 1];
+/* L50: */
+ }
+ i__2 = psiz2 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ztemp[psiz1 + i__ + 1] = z__[mid + perm[prmptr[curr + 1] + i__] -
+ 1];
+/* L60: */
+ }
+
+/*
+ Multiply Blocks at CURR and CURR+1
+
+ Determine size of these matrices. We add HALF to the value of
+ the SQRT in case the machine underestimates one of these
+ square roots.
+*/
+
+ bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) +
+ .5);
+ bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])
+ ) + .5);
+ if (bsiz1 > 0) {
+ dgemv_("T", &bsiz1, &bsiz1, &c_b15, &q[qptr[curr]], &bsiz1, &
+ ztemp[1], &c__1, &c_b29, &z__[zptr1], &c__1);
+ }
+ i__2 = psiz1 - bsiz1;
+ dcopy_(&i__2, &ztemp[bsiz1 + 1], &c__1, &z__[zptr1 + bsiz1], &c__1);
+ if (bsiz2 > 0) {
+ dgemv_("T", &bsiz2, &bsiz2, &c_b15, &q[qptr[curr + 1]], &bsiz2, &
+ ztemp[psiz1 + 1], &c__1, &c_b29, &z__[mid], &c__1);
+ }
+ i__2 = psiz2 - bsiz2;
+ dcopy_(&i__2, &ztemp[psiz1 + bsiz2 + 1], &c__1, &z__[mid + bsiz2], &
+ c__1);
+
+ i__2 = *tlvls - k;
+ ptr += pow_ii(&c__2, &i__2);
+/* L70: */
+ }
+
+ return 0;
+
+/* End of DLAEDA */
+
+} /* dlaeda_ */
+
+/* Subroutine */ int dlaev2_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *rt1, doublereal *rt2, doublereal *cs1, doublereal *sn1)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal ab, df, cs, ct, tb, sm, tn, rt, adf, acs;
+ static integer sgn1, sgn2;
+ static doublereal acmn, acmx;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAEV2 computes the eigendecomposition of a 2-by-2 symmetric matrix
+ [ A B ]
+ [ B C ].
+ On return, RT1 is the eigenvalue of larger absolute value, RT2 is the
+ eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right
+ eigenvector for RT1, giving the decomposition
+
+ [ CS1 SN1 ] [ A B ] [ CS1 -SN1 ] = [ RT1 0 ]
+ [-SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ].
+
+ Arguments
+ =========
+
+ A (input) DOUBLE PRECISION
+ The (1,1) element of the 2-by-2 matrix.
+
+ B (input) DOUBLE PRECISION
+ The (1,2) element and the conjugate of the (2,1) element of
+ the 2-by-2 matrix.
+
+ C (input) DOUBLE PRECISION
+ The (2,2) element of the 2-by-2 matrix.
+
+ RT1 (output) DOUBLE PRECISION
+ The eigenvalue of larger absolute value.
+
+ RT2 (output) DOUBLE PRECISION
+ The eigenvalue of smaller absolute value.
+
+ CS1 (output) DOUBLE PRECISION
+ SN1 (output) DOUBLE PRECISION
+ The vector (CS1, SN1) is a unit right eigenvector for RT1.
+
+ Further Details
+ ===============
+
+ RT1 is accurate to a few ulps barring over/underflow.
+
+ RT2 may be inaccurate if there is massive cancellation in the
+ determinant A*C-B*B; higher precision or correctly rounded or
+ correctly truncated arithmetic would be needed to compute RT2
+ accurately in all cases.
+
+ CS1 and SN1 are accurate to a few ulps barring over/underflow.
+
+ Overflow is possible only if RT1 is within a factor of 5 of overflow.
+ Underflow is harmless if the input data is 0 or exceeds
+ underflow_threshold / macheps.
+
+ =====================================================================
+
+
+ Compute the eigenvalues
+*/
+
+ sm = *a + *c__;
+ df = *a - *c__;
+ adf = abs(df);
+ tb = *b + *b;
+ ab = abs(tb);
+ if (abs(*a) > abs(*c__)) {
+ acmx = *a;
+ acmn = *c__;
+ } else {
+ acmx = *c__;
+ acmn = *a;
+ }
+ if (adf > ab) {
+/* Computing 2nd power */
+ d__1 = ab / adf;
+ rt = adf * sqrt(d__1 * d__1 + 1.);
+ } else if (adf < ab) {
+/* Computing 2nd power */
+ d__1 = adf / ab;
+ rt = ab * sqrt(d__1 * d__1 + 1.);
+ } else {
+
+/* Includes case AB=ADF=0 */
+
+ rt = ab * sqrt(2.);
+ }
+ if (sm < 0.) {
+ *rt1 = (sm - rt) * .5;
+ sgn1 = -1;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else if (sm > 0.) {
+ *rt1 = (sm + rt) * .5;
+ sgn1 = 1;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else {
+
+/* Includes case RT1 = RT2 = 0 */
+
+ *rt1 = rt * .5;
+ *rt2 = rt * -.5;
+ sgn1 = 1;
+ }
+
+/* Compute the eigenvector */
+
+ if (df >= 0.) {
+ cs = df + rt;
+ sgn2 = 1;
+ } else {
+ cs = df - rt;
+ sgn2 = -1;
+ }
+ acs = abs(cs);
+ if (acs > ab) {
+ ct = -tb / cs;
+ *sn1 = 1. / sqrt(ct * ct + 1.);
+ *cs1 = ct * *sn1;
+ } else {
+ if (ab == 0.) {
+ *cs1 = 1.;
+ *sn1 = 0.;
+ } else {
+ tn = -cs / tb;
+ *cs1 = 1. / sqrt(tn * tn + 1.);
+ *sn1 = tn * *cs1;
+ }
+ }
+ if (sgn1 == sgn2) {
+ tn = *cs1;
+ *cs1 = -(*sn1);
+ *sn1 = tn;
+ }
+ return 0;
+
+/* End of DLAEV2 */
+
+} /* dlaev2_ */
+
+/* Subroutine */ int dlahqr_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal
+ *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__,
+ integer *ldz, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer i__, j, k, l, m;
+ static doublereal s, v[3];
+ static integer i1, i2;
+ static doublereal t1, t2, t3, v1, v2, v3, h00, h10, h11, h12, h21, h22,
+ h33, h44;
+ static integer nh;
+ static doublereal cs;
+ static integer nr;
+ static doublereal sn;
+ static integer nz;
+ static doublereal ave, h33s, h44s;
+ static integer itn, its;
+ static doublereal ulp, sum, tst1, h43h34, disc, unfl, ovfl;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static doublereal work[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlanv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *), dlabad_(
+ doublereal *, doublereal *);
+
+ extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *);
+ extern doublereal dlanhs_(char *, integer *, doublereal *, integer *,
+ doublereal *);
+ static doublereal smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAHQR is an auxiliary routine called by DHSEQR to update the
+ eigenvalues and Schur decomposition already computed by DHSEQR, by
+ dealing with the Hessenberg submatrix in rows and columns ILO to IHI.
+
+ Arguments
+ =========
+
+ WANTT (input) LOGICAL
+ = .TRUE. : the full Schur form T is required;
+ = .FALSE.: only eigenvalues are required.
+
+ WANTZ (input) LOGICAL
+ = .TRUE. : the matrix of Schur vectors Z is required;
+ = .FALSE.: Schur vectors are not required.
+
+ N (input) INTEGER
+ The order of the matrix H. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that H is already upper quasi-triangular in
+ rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless
+ ILO = 1). DLAHQR works primarily with the Hessenberg
+ submatrix in rows and columns ILO to IHI, but applies
+ transformations to all of H if WANTT is .TRUE..
+ 1 <= ILO <= max(1,IHI); IHI <= N.
+
+ H (input/output) DOUBLE PRECISION array, dimension (LDH,N)
+ On entry, the upper Hessenberg matrix H.
+ On exit, if WANTT is .TRUE., H is upper quasi-triangular in
+ rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in
+ standard form. If WANTT is .FALSE., the contents of H are
+ unspecified on exit.
+
+ LDH (input) INTEGER
+ The leading dimension of the array H. LDH >= max(1,N).
+
+ WR (output) DOUBLE PRECISION array, dimension (N)
+ WI (output) DOUBLE PRECISION array, dimension (N)
+ The real and imaginary parts, respectively, of the computed
+ eigenvalues ILO to IHI are stored in the corresponding
+ elements of WR and WI. If two eigenvalues are computed as a
+ complex conjugate pair, they are stored in consecutive
+ elements of WR and WI, say the i-th and (i+1)th, with
+ WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the
+ eigenvalues are stored in the same order as on the diagonal
+ of the Schur form returned in H, with WR(i) = H(i,i), and, if
+ H(i:i+1,i:i+1) is a 2-by-2 diagonal block,
+ WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i).
+
+ ILOZ (input) INTEGER
+ IHIZ (input) INTEGER
+ Specify the rows of Z to which transformations must be
+ applied if WANTZ is .TRUE..
+ 1 <= ILOZ <= ILO; IHI <= IHIZ <= N.
+
+ Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N)
+ If WANTZ is .TRUE., on entry Z must contain the current
+ matrix Z of transformations accumulated by DHSEQR, and on
+ exit Z has been updated; transformations are applied only to
+ the submatrix Z(ILOZ:IHIZ,ILO:IHI).
+ If WANTZ is .FALSE., Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: DLAHQR failed to compute all the eigenvalues ILO to IHI
+ in a total of 30*(IHI-ILO+1) iterations; if INFO = i,
+ elements i+1:ihi of WR and WI contain those eigenvalues
+ which have been successfully computed.
+
+ Further Details
+ ===============
+
+ 2-96 Based on modifications by
+ David Day, Sandia National Laboratory, USA
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1 * 1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ wr[*ilo] = h__[*ilo + *ilo * h_dim1];
+ wi[*ilo] = 0.;
+ return 0;
+ }
+
+ nh = *ihi - *ilo + 1;
+ nz = *ihiz - *iloz + 1;
+
+/*
+ Set machine-dependent constants for the stopping criterion.
+ If norm(H) <= sqrt(OVFL), overflow should not occur.
+*/
+
+ unfl = SAFEMINIMUM;
+ ovfl = 1. / unfl;
+ dlabad_(&unfl, &ovfl);
+ ulp = PRECISION;
+ smlnum = unfl * (nh / ulp);
+
+/*
+ I1 and I2 are the indices of the first row and last column of H
+ to which transformations must be applied. If eigenvalues only are
+ being computed, I1 and I2 are set inside the main loop.
+*/
+
+ if (*wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* ITN is the total number of QR iterations allowed. */
+
+ itn = nh * 30;
+
+/*
+ The main loop begins here. I is the loop index and decreases from
+ IHI to ILO in steps of 1 or 2. Each iteration of the loop works
+ with the active submatrix in rows and columns L to I.
+ Eigenvalues I+1 to IHI have already converged. Either L = ILO or
+ H(L,L-1) is negligible so that the matrix splits.
+*/
+
+ i__ = *ihi;
+L10:
+ l = *ilo;
+ if (i__ < *ilo) {
+ goto L150;
+ }
+
+/*
+ Perform QR iterations on rows and columns ILO to I until a
+ submatrix of order 1 or 2 splits off at the bottom because a
+ subdiagonal element has become negligible.
+*/
+
+ i__1 = itn;
+ for (its = 0; its <= i__1; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__2 = l + 1;
+ for (k = i__; k >= i__2; --k) {
+ tst1 = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 =
+ h__[k + k * h_dim1], abs(d__2));
+ if (tst1 == 0.) {
+ i__3 = i__ - l + 1;
+ tst1 = dlanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, work);
+ }
+/* Computing MAX */
+ d__2 = ulp * tst1;
+ if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= max(d__2,
+ smlnum)) {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible */
+
+ h__[l + (l - 1) * h_dim1] = 0.;
+ }
+
+/* Exit from loop if a submatrix of order 1 or 2 has split off. */
+
+ if (l >= i__ - 1) {
+ goto L140;
+ }
+
+/*
+ Now the active submatrix is in rows and columns L to I. If
+ eigenvalues only are being computed, only the active submatrix
+ need be transformed.
+*/
+
+ if (! (*wantt)) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if (its == 10 || its == 20) {
+
+/* Exceptional shift. */
+
+ s = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1)) + (d__2 =
+ h__[i__ - 1 + (i__ - 2) * h_dim1], abs(d__2));
+ h44 = s * .75 + h__[i__ + i__ * h_dim1];
+ h33 = h44;
+ h43h34 = s * -.4375 * s;
+ } else {
+
+/*
+ Prepare to use Francis' double shift
+ (i.e. 2nd degree generalized Rayleigh quotient)
+*/
+
+ h44 = h__[i__ + i__ * h_dim1];
+ h33 = h__[i__ - 1 + (i__ - 1) * h_dim1];
+ h43h34 = h__[i__ + (i__ - 1) * h_dim1] * h__[i__ - 1 + i__ *
+ h_dim1];
+ s = h__[i__ - 1 + (i__ - 2) * h_dim1] * h__[i__ - 1 + (i__ - 2) *
+ h_dim1];
+ disc = (h33 - h44) * .5;
+ disc = disc * disc + h43h34;
+ if (disc > 0.) {
+
+/* Real roots: use Wilkinson's shift twice */
+
+ disc = sqrt(disc);
+ ave = (h33 + h44) * .5;
+ if (abs(h33) - abs(h44) > 0.) {
+ h33 = h33 * h44 - h43h34;
+ h44 = h33 / (d_sign(&disc, &ave) + ave);
+ } else {
+ h44 = d_sign(&disc, &ave) + ave;
+ }
+ h33 = h44;
+ h43h34 = 0.;
+ }
+ }
+
+/* Look for two consecutive small subdiagonal elements. */
+
+ i__2 = l;
+ for (m = i__ - 2; m >= i__2; --m) {
+/*
+ Determine the effect of starting the double-shift QR
+ iteration at row M, and see if this would make H(M,M-1)
+ negligible.
+*/
+
+ h11 = h__[m + m * h_dim1];
+ h22 = h__[m + 1 + (m + 1) * h_dim1];
+ h21 = h__[m + 1 + m * h_dim1];
+ h12 = h__[m + (m + 1) * h_dim1];
+ h44s = h44 - h11;
+ h33s = h33 - h11;
+ v1 = (h33s * h44s - h43h34) / h21 + h12;
+ v2 = h22 - h11 - h33s - h44s;
+ v3 = h__[m + 2 + (m + 1) * h_dim1];
+ s = abs(v1) + abs(v2) + abs(v3);
+ v1 /= s;
+ v2 /= s;
+ v3 /= s;
+ v[0] = v1;
+ v[1] = v2;
+ v[2] = v3;
+ if (m == l) {
+ goto L50;
+ }
+ h00 = h__[m - 1 + (m - 1) * h_dim1];
+ h10 = h__[m + (m - 1) * h_dim1];
+ tst1 = abs(v1) * (abs(h00) + abs(h11) + abs(h22));
+ if (abs(h10) * (abs(v2) + abs(v3)) <= ulp * tst1) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+
+/* Double-shift QR step */
+
+ i__2 = i__ - 1;
+ for (k = m; k <= i__2; ++k) {
+
+/*
+ The first iteration of this loop determines a reflection G
+ from the vector V and applies it from left and right to H,
+ thus creating a nonzero bulge below the subdiagonal.
+
+ Each subsequent iteration determines a reflection G to
+ restore the Hessenberg form in the (K-1)th column, and thus
+ chases the bulge one step toward the bottom of the active
+ submatrix. NR is the order of G.
+
+ Computing MIN
+*/
+ i__3 = 3, i__4 = i__ - k + 1;
+ nr = min(i__3,i__4);
+ if (k > m) {
+ dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ dlarfg_(&nr, v, &v[1], &c__1, &t1);
+ if (k > m) {
+ h__[k + (k - 1) * h_dim1] = v[0];
+ h__[k + 1 + (k - 1) * h_dim1] = 0.;
+ if (k < i__ - 1) {
+ h__[k + 2 + (k - 1) * h_dim1] = 0.;
+ }
+ } else if (m > l) {
+ h__[k + (k - 1) * h_dim1] = -h__[k + (k - 1) * h_dim1];
+ }
+ v2 = v[1];
+ t2 = t1 * v2;
+ if (nr == 3) {
+ v3 = v[2];
+ t3 = t1 * v3;
+
+/*
+ Apply G from the left to transform the rows of the matrix
+ in columns K to I2.
+*/
+
+ i__3 = i2;
+ for (j = k; j <= i__3; ++j) {
+ sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1]
+ + v3 * h__[k + 2 + j * h_dim1];
+ h__[k + j * h_dim1] -= sum * t1;
+ h__[k + 1 + j * h_dim1] -= sum * t2;
+ h__[k + 2 + j * h_dim1] -= sum * t3;
+/* L60: */
+ }
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+3,I).
+
+ Computing MIN
+*/
+ i__4 = k + 3;
+ i__3 = min(i__4,i__);
+ for (j = i1; j <= i__3; ++j) {
+ sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
+ + v3 * h__[j + (k + 2) * h_dim1];
+ h__[j + k * h_dim1] -= sum * t1;
+ h__[j + (k + 1) * h_dim1] -= sum * t2;
+ h__[j + (k + 2) * h_dim1] -= sum * t3;
+/* L70: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__3 = *ihiz;
+ for (j = *iloz; j <= i__3; ++j) {
+ sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
+ z_dim1] + v3 * z__[j + (k + 2) * z_dim1];
+ z__[j + k * z_dim1] -= sum * t1;
+ z__[j + (k + 1) * z_dim1] -= sum * t2;
+ z__[j + (k + 2) * z_dim1] -= sum * t3;
+/* L80: */
+ }
+ }
+ } else if (nr == 2) {
+
+/*
+ Apply G from the left to transform the rows of the matrix
+ in columns K to I2.
+*/
+
+ i__3 = i2;
+ for (j = k; j <= i__3; ++j) {
+ sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1];
+ h__[k + j * h_dim1] -= sum * t1;
+ h__[k + 1 + j * h_dim1] -= sum * t2;
+/* L90: */
+ }
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+3,I).
+*/
+
+ i__3 = i__;
+ for (j = i1; j <= i__3; ++j) {
+ sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
+ ;
+ h__[j + k * h_dim1] -= sum * t1;
+ h__[j + (k + 1) * h_dim1] -= sum * t2;
+/* L100: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__3 = *ihiz;
+ for (j = *iloz; j <= i__3; ++j) {
+ sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
+ z_dim1];
+ z__[j + k * z_dim1] -= sum * t1;
+ z__[j + (k + 1) * z_dim1] -= sum * t2;
+/* L110: */
+ }
+ }
+ }
+/* L120: */
+ }
+
+/* L130: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L140:
+
+ if (l == i__) {
+
+/* H(I,I-1) is negligible: one eigenvalue has converged. */
+
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.;
+ } else if (l == i__ - 1) {
+
+/*
+ H(I-1,I-2) is negligible: a pair of eigenvalues have converged.
+
+ Transform the 2-by-2 submatrix to standard Schur form,
+ and compute and store the eigenvalues.
+*/
+
+ dlanv2_(&h__[i__ - 1 + (i__ - 1) * h_dim1], &h__[i__ - 1 + i__ *
+ h_dim1], &h__[i__ + (i__ - 1) * h_dim1], &h__[i__ + i__ *
+ h_dim1], &wr[i__ - 1], &wi[i__ - 1], &wr[i__], &wi[i__], &cs,
+ &sn);
+
+ if (*wantt) {
+
+/* Apply the transformation to the rest of H. */
+
+ if (i2 > i__) {
+ i__1 = i2 - i__;
+ drot_(&i__1, &h__[i__ - 1 + (i__ + 1) * h_dim1], ldh, &h__[
+ i__ + (i__ + 1) * h_dim1], ldh, &cs, &sn);
+ }
+ i__1 = i__ - i1 - 1;
+ drot_(&i__1, &h__[i1 + (i__ - 1) * h_dim1], &c__1, &h__[i1 + i__ *
+ h_dim1], &c__1, &cs, &sn);
+ }
+ if (*wantz) {
+
+/* Apply the transformation to Z. */
+
+ drot_(&nz, &z__[*iloz + (i__ - 1) * z_dim1], &c__1, &z__[*iloz +
+ i__ * z_dim1], &c__1, &cs, &sn);
+ }
+ }
+
+/*
+ Decrement number of remaining iterations, and return to start of
+ the main loop with new value of I.
+*/
+
+ itn -= its;
+ i__ = l - 1;
+ goto L10;
+
+L150:
+ return 0;
+
+/* End of DLAHQR */
+
+} /* dlahqr_ */
+
+/* Subroutine */ int dlahrd_(integer *n, integer *k, integer *nb, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *t, integer *ldt,
+ doublereal *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__;
+ static doublereal ei;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemv_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dcopy_(integer *, doublereal *,
+ integer *, doublereal *, integer *), daxpy_(integer *, doublereal
+ *, doublereal *, integer *, doublereal *, integer *), dtrmv_(char
+ *, char *, char *, integer *, doublereal *, integer *, doublereal
+ *, integer *), dlarfg_(integer *,
+ doublereal *, doublereal *, integer *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAHRD reduces the first NB columns of a real general n-by-(n-k+1)
+ matrix A so that elements below the k-th subdiagonal are zero. The
+ reduction is performed by an orthogonal similarity transformation
+ Q' * A * Q. The routine returns the matrices V and T which determine
+ Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T.
+
+ This is an auxiliary routine called by DGEHRD.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A.
+
+ K (input) INTEGER
+ The offset for the reduction. Elements below the k-th
+ subdiagonal in the first NB columns are reduced to zero.
+
+ NB (input) INTEGER
+ The number of columns to be reduced.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N-K+1)
+ On entry, the n-by-(n-k+1) general matrix A.
+ On exit, the elements on and above the k-th subdiagonal in
+ the first NB columns are overwritten with the corresponding
+ elements of the reduced matrix; the elements below the k-th
+ subdiagonal, with the array TAU, represent the matrix Q as a
+ product of elementary reflectors. The other columns of A are
+ unchanged. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) DOUBLE PRECISION array, dimension (NB)
+ The scalar factors of the elementary reflectors. See Further
+ Details.
+
+ T (output) DOUBLE PRECISION array, dimension (LDT,NB)
+ The upper triangular matrix T.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= NB.
+
+ Y (output) DOUBLE PRECISION array, dimension (LDY,NB)
+ The n-by-nb matrix Y.
+
+ LDY (input) INTEGER
+ The leading dimension of the array Y. LDY >= N.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of nb elementary reflectors
+
+ Q = H(1) H(2) . . . H(nb).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in
+ A(i+k+1:n,i), and tau in TAU(i).
+
+ The elements of the vectors v together form the (n-k+1)-by-nb matrix
+ V which is needed, with T and Y, to apply the transformation to the
+ unreduced part of the matrix, using an update of the form:
+ A := (I - V*T*V') * (A - Y*V').
+
+ The contents of A on exit are illustrated by the following example
+ with n = 7, k = 3 and nb = 2:
+
+ ( a h a a a )
+ ( a h a a a )
+ ( a h a a a )
+ ( h h a a a )
+ ( v1 h a a a )
+ ( v1 v2 a a a )
+ ( v1 v2 a a a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1 * 1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1 * 1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/*
+ Update A(1:n,i)
+
+ Compute i-th column of A - Y * V'
+*/
+
+ i__2 = i__ - 1;
+ dgemv_("No transpose", n, &i__2, &c_b151, &y[y_offset], ldy, &a[*
+ k + i__ - 1 + a_dim1], lda, &c_b15, &a[i__ * a_dim1 + 1],
+ &c__1);
+
+/*
+ Apply I - V * T' * V' to this column (call it b) from the
+ left, using the last column of T as workspace
+
+ Let V = ( V1 ) and b = ( b1 ) (first I-1 rows)
+ ( V2 ) ( b2 )
+
+ where V1 is unit lower triangular
+
+ w := V1' * b1
+*/
+
+ i__2 = i__ - 1;
+ dcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ dtrmv_("Lower", "Transpose", "Unit", &i__2, &a[*k + 1 + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[*k + i__ + a_dim1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b15, &t[*nb *
+ t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "Transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[*k + i__ +
+ a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1, &c_b15, &a[*k
+ + i__ + i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ dtrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
+ , lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ daxpy_(&i__2, &c_b151, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 +
+ i__ * a_dim1], &c__1);
+
+ a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei;
+ }
+
+/*
+ Generate the elementary reflector H(i) to annihilate
+ A(k+i+1:n,i)
+*/
+
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ dlarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3,*n) + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ ei = a[*k + i__ + i__ * a_dim1];
+ a[*k + i__ + i__ * a_dim1] = 1.;
+
+/* Compute Y(1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+ dgemv_("No transpose", n, &i__2, &c_b15, &a[(i__ + 1) * a_dim1 + 1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[*k + i__ + a_dim1], lda,
+ &a[*k + i__ + i__ * a_dim1], &c__1, &c_b29, &t[i__ * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ dgemv_("No transpose", n, &i__2, &c_b151, &y[y_offset], ldy, &t[i__ *
+ t_dim1 + 1], &c__1, &c_b15, &y[i__ * y_dim1 + 1], &c__1);
+ dscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);
+
+/* Compute T(1:i,i) */
+
+ i__2 = i__ - 1;
+ d__1 = -tau[i__];
+ dscal_(&i__2, &d__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ t[i__ + i__ * t_dim1] = tau[i__];
+
+/* L10: */
+ }
+ a[*k + *nb + *nb * a_dim1] = ei;
+
+ return 0;
+
+/* End of DLAHRD */
+
+} /* dlahrd_ */
+
+/* Subroutine */ int dlaln2_(logical *ltrans, integer *na, integer *nw,
+ doublereal *smin, doublereal *ca, doublereal *a, integer *lda,
+ doublereal *d1, doublereal *d2, doublereal *b, integer *ldb,
+ doublereal *wr, doublereal *wi, doublereal *x, integer *ldx,
+ doublereal *scale, doublereal *xnorm, integer *info)
+{
+ /* Initialized data */
+
+ static logical zswap[4] = { FALSE_,FALSE_,TRUE_,TRUE_ };
+ static logical rswap[4] = { FALSE_,TRUE_,FALSE_,TRUE_ };
+ static integer ipivot[16] /* was [4][4] */ = { 1,2,3,4,2,1,4,3,3,4,1,2,
+ 4,3,2,1 };
+
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+ static doublereal equiv_0[4], equiv_1[4];
+
+ /* Local variables */
+ static integer j;
+#define ci (equiv_0)
+#define cr (equiv_1)
+ static doublereal bi1, bi2, br1, br2, xi1, xi2, xr1, xr2, ci21, ci22,
+ cr21, cr22, li21, csi, ui11, lr21, ui12, ui22;
+#define civ (equiv_0)
+ static doublereal csr, ur11, ur12, ur22;
+#define crv (equiv_1)
+ static doublereal bbnd, cmax, ui11r, ui12s, temp, ur11r, ur12s, u22abs;
+ static integer icmax;
+ static doublereal bnorm, cnorm, smini;
+
+ extern /* Subroutine */ int dladiv_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *);
+ static doublereal bignum, smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLALN2 solves a system of the form (ca A - w D ) X = s B
+ or (ca A' - w D) X = s B with possible scaling ("s") and
+ perturbation of A. (A' means A-transpose.)
+
+ A is an NA x NA real matrix, ca is a real scalar, D is an NA x NA
+ real diagonal matrix, w is a real or complex value, and X and B are
+ NA x 1 matrices -- real if w is real, complex if w is complex. NA
+ may be 1 or 2.
+
+ If w is complex, X and B are represented as NA x 2 matrices,
+ the first column of each being the real part and the second
+ being the imaginary part.
+
+ "s" is a scaling factor (.LE. 1), computed by DLALN2, which is
+ so chosen that X can be computed without overflow. X is further
+ scaled if necessary to assure that norm(ca A - w D)*norm(X) is less
+ than overflow.
+
+ If both singular values of (ca A - w D) are less than SMIN,
+ SMIN*identity will be used instead of (ca A - w D). If only one
+ singular value is less than SMIN, one element of (ca A - w D) will be
+ perturbed enough to make the smallest singular value roughly SMIN.
+ If both singular values are at least SMIN, (ca A - w D) will not be
+ perturbed. In any case, the perturbation will be at most some small
+ multiple of max( SMIN, ulp*norm(ca A - w D) ). The singular values
+ are computed by infinity-norm approximations, and thus will only be
+ correct to a factor of 2 or so.
+
+ Note: all input quantities are assumed to be smaller than overflow
+ by a reasonable factor. (See BIGNUM.)
+
+ Arguments
+ ==========
+
+ LTRANS (input) LOGICAL
+ =.TRUE.: A-transpose will be used.
+ =.FALSE.: A will be used (not transposed.)
+
+ NA (input) INTEGER
+ The size of the matrix A. It may (only) be 1 or 2.
+
+ NW (input) INTEGER
+ 1 if "w" is real, 2 if "w" is complex. It may only be 1
+ or 2.
+
+ SMIN (input) DOUBLE PRECISION
+ The desired lower bound on the singular values of A. This
+ should be a safe distance away from underflow or overflow,
+ say, between (underflow/machine precision) and (machine
+ precision * overflow ). (See BIGNUM and ULP.)
+
+ CA (input) DOUBLE PRECISION
+ The coefficient c, which A is multiplied by.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,NA)
+ The NA x NA matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of A. It must be at least NA.
+
+ D1 (input) DOUBLE PRECISION
+ The 1,1 element in the diagonal matrix D.
+
+ D2 (input) DOUBLE PRECISION
+ The 2,2 element in the diagonal matrix D. Not used if NW=1.
+
+ B (input) DOUBLE PRECISION array, dimension (LDB,NW)
+ The NA x NW matrix B (right-hand side). If NW=2 ("w" is
+ complex), column 1 contains the real part of B and column 2
+ contains the imaginary part.
+
+ LDB (input) INTEGER
+ The leading dimension of B. It must be at least NA.
+
+ WR (input) DOUBLE PRECISION
+ The real part of the scalar "w".
+
+ WI (input) DOUBLE PRECISION
+ The imaginary part of the scalar "w". Not used if NW=1.
+
+ X (output) DOUBLE PRECISION array, dimension (LDX,NW)
+ The NA x NW matrix X (unknowns), as computed by DLALN2.
+ If NW=2 ("w" is complex), on exit, column 1 will contain
+ the real part of X and column 2 will contain the imaginary
+ part.
+
+ LDX (input) INTEGER
+ The leading dimension of X. It must be at least NA.
+
+ SCALE (output) DOUBLE PRECISION
+ The scale factor that B must be multiplied by to insure
+ that overflow does not occur when computing X. Thus,
+ (ca A - w D) X will be SCALE*B, not B (ignoring
+ perturbations of A.) It will be at most 1.
+
+ XNORM (output) DOUBLE PRECISION
+ The infinity-norm of X, when X is regarded as an NA x NW
+ real matrix.
+
+ INFO (output) INTEGER
+ An error flag. It will be set to zero if no error occurs,
+ a negative number if an argument is in error, or a positive
+ number if ca A - w D had to be perturbed.
+ The possible values are:
+ = 0: No error occurred, and (ca A - w D) did not have to be
+ perturbed.
+ = 1: (ca A - w D) had to be perturbed to make its smallest
+ (or only) singular value greater than SMIN.
+ NOTE: In the interests of speed, this routine does not
+ check the inputs for errors.
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1 * 1;
+ x -= x_offset;
+
+ /* Function Body */
+
+/* Compute BIGNUM */
+
+ smlnum = 2. * SAFEMINIMUM;
+ bignum = 1. / smlnum;
+ smini = max(*smin,smlnum);
+
+/* Don't check for input errors */
+
+ *info = 0;
+
+/* Standard Initializations */
+
+ *scale = 1.;
+
+ if (*na == 1) {
+
+/* 1 x 1 (i.e., scalar) system C X = B */
+
+ if (*nw == 1) {
+
+/*
+ Real 1x1 system.
+
+ C = ca A - w D
+*/
+
+ csr = *ca * a[a_dim1 + 1] - *wr * *d1;
+ cnorm = abs(csr);
+
+/* If | C | < SMINI, use C = SMINI */
+
+ if (cnorm < smini) {
+ csr = smini;
+ cnorm = smini;
+ *info = 1;
+ }
+
+/* Check scaling for X = B / C */
+
+ bnorm = (d__1 = b[b_dim1 + 1], abs(d__1));
+ if ((cnorm < 1. && bnorm > 1.)) {
+ if (bnorm > bignum * cnorm) {
+ *scale = 1. / bnorm;
+ }
+ }
+
+/* Compute X */
+
+ x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / csr;
+ *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1));
+ } else {
+
+/*
+ Complex 1x1 system (w is complex)
+
+ C = ca A - w D
+*/
+
+ csr = *ca * a[a_dim1 + 1] - *wr * *d1;
+ csi = -(*wi) * *d1;
+ cnorm = abs(csr) + abs(csi);
+
+/* If | C | < SMINI, use C = SMINI */
+
+ if (cnorm < smini) {
+ csr = smini;
+ csi = 0.;
+ cnorm = smini;
+ *info = 1;
+ }
+
+/* Check scaling for X = B / C */
+
+ bnorm = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[((b_dim1) <<
+ (1)) + 1], abs(d__2));
+ if ((cnorm < 1. && bnorm > 1.)) {
+ if (bnorm > bignum * cnorm) {
+ *scale = 1. / bnorm;
+ }
+ }
+
+/* Compute X */
+
+ d__1 = *scale * b[b_dim1 + 1];
+ d__2 = *scale * b[((b_dim1) << (1)) + 1];
+ dladiv_(&d__1, &d__2, &csr, &csi, &x[x_dim1 + 1], &x[((x_dim1) <<
+ (1)) + 1]);
+ *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[((x_dim1)
+ << (1)) + 1], abs(d__2));
+ }
+
+ } else {
+
+/*
+ 2x2 System
+
+ Compute the real part of C = ca A - w D (or ca A' - w D )
+*/
+
+ cr[0] = *ca * a[a_dim1 + 1] - *wr * *d1;
+ cr[3] = *ca * a[((a_dim1) << (1)) + 2] - *wr * *d2;
+ if (*ltrans) {
+ cr[2] = *ca * a[a_dim1 + 2];
+ cr[1] = *ca * a[((a_dim1) << (1)) + 1];
+ } else {
+ cr[1] = *ca * a[a_dim1 + 2];
+ cr[2] = *ca * a[((a_dim1) << (1)) + 1];
+ }
+
+ if (*nw == 1) {
+
+/*
+ Real 2x2 system (w is real)
+
+ Find the largest element in C
+*/
+
+ cmax = 0.;
+ icmax = 0;
+
+ for (j = 1; j <= 4; ++j) {
+ if ((d__1 = crv[j - 1], abs(d__1)) > cmax) {
+ cmax = (d__1 = crv[j - 1], abs(d__1));
+ icmax = j;
+ }
+/* L10: */
+ }
+
+/* If norm(C) < SMINI, use SMINI*identity. */
+
+ if (cmax < smini) {
+/* Computing MAX */
+ d__3 = (d__1 = b[b_dim1 + 1], abs(d__1)), d__4 = (d__2 = b[
+ b_dim1 + 2], abs(d__2));
+ bnorm = max(d__3,d__4);
+ if ((smini < 1. && bnorm > 1.)) {
+ if (bnorm > bignum * smini) {
+ *scale = 1. / bnorm;
+ }
+ }
+ temp = *scale / smini;
+ x[x_dim1 + 1] = temp * b[b_dim1 + 1];
+ x[x_dim1 + 2] = temp * b[b_dim1 + 2];
+ *xnorm = temp * bnorm;
+ *info = 1;
+ return 0;
+ }
+
+/* Gaussian elimination with complete pivoting. */
+
+ ur11 = crv[icmax - 1];
+ cr21 = crv[ipivot[((icmax) << (2)) - 3] - 1];
+ ur12 = crv[ipivot[((icmax) << (2)) - 2] - 1];
+ cr22 = crv[ipivot[((icmax) << (2)) - 1] - 1];
+ ur11r = 1. / ur11;
+ lr21 = ur11r * cr21;
+ ur22 = cr22 - ur12 * lr21;
+
+/* If smaller pivot < SMINI, use SMINI */
+
+ if (abs(ur22) < smini) {
+ ur22 = smini;
+ *info = 1;
+ }
+ if (rswap[icmax - 1]) {
+ br1 = b[b_dim1 + 2];
+ br2 = b[b_dim1 + 1];
+ } else {
+ br1 = b[b_dim1 + 1];
+ br2 = b[b_dim1 + 2];
+ }
+ br2 -= lr21 * br1;
+/* Computing MAX */
+ d__2 = (d__1 = br1 * (ur22 * ur11r), abs(d__1)), d__3 = abs(br2);
+ bbnd = max(d__2,d__3);
+ if ((bbnd > 1. && abs(ur22) < 1.)) {
+ if (bbnd >= bignum * abs(ur22)) {
+ *scale = 1. / bbnd;
+ }
+ }
+
+ xr2 = br2 * *scale / ur22;
+ xr1 = *scale * br1 * ur11r - xr2 * (ur11r * ur12);
+ if (zswap[icmax - 1]) {
+ x[x_dim1 + 1] = xr2;
+ x[x_dim1 + 2] = xr1;
+ } else {
+ x[x_dim1 + 1] = xr1;
+ x[x_dim1 + 2] = xr2;
+ }
+/* Computing MAX */
+ d__1 = abs(xr1), d__2 = abs(xr2);
+ *xnorm = max(d__1,d__2);
+
+/* Further scaling if norm(A) norm(X) > overflow */
+
+ if ((*xnorm > 1. && cmax > 1.)) {
+ if (*xnorm > bignum / cmax) {
+ temp = cmax / bignum;
+ x[x_dim1 + 1] = temp * x[x_dim1 + 1];
+ x[x_dim1 + 2] = temp * x[x_dim1 + 2];
+ *xnorm = temp * *xnorm;
+ *scale = temp * *scale;
+ }
+ }
+ } else {
+
+/*
+ Complex 2x2 system (w is complex)
+
+ Find the largest element in C
+*/
+
+ ci[0] = -(*wi) * *d1;
+ ci[1] = 0.;
+ ci[2] = 0.;
+ ci[3] = -(*wi) * *d2;
+ cmax = 0.;
+ icmax = 0;
+
+ for (j = 1; j <= 4; ++j) {
+ if ((d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1], abs(
+ d__2)) > cmax) {
+ cmax = (d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1]
+ , abs(d__2));
+ icmax = j;
+ }
+/* L20: */
+ }
+
+/* If norm(C) < SMINI, use SMINI*identity. */
+
+ if (cmax < smini) {
+/* Computing MAX */
+ d__5 = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[((b_dim1)
+ << (1)) + 1], abs(d__2)), d__6 = (d__3 = b[b_dim1 +
+ 2], abs(d__3)) + (d__4 = b[((b_dim1) << (1)) + 2],
+ abs(d__4));
+ bnorm = max(d__5,d__6);
+ if ((smini < 1. && bnorm > 1.)) {
+ if (bnorm > bignum * smini) {
+ *scale = 1. / bnorm;
+ }
+ }
+ temp = *scale / smini;
+ x[x_dim1 + 1] = temp * b[b_dim1 + 1];
+ x[x_dim1 + 2] = temp * b[b_dim1 + 2];
+ x[((x_dim1) << (1)) + 1] = temp * b[((b_dim1) << (1)) + 1];
+ x[((x_dim1) << (1)) + 2] = temp * b[((b_dim1) << (1)) + 2];
+ *xnorm = temp * bnorm;
+ *info = 1;
+ return 0;
+ }
+
+/* Gaussian elimination with complete pivoting. */
+
+ ur11 = crv[icmax - 1];
+ ui11 = civ[icmax - 1];
+ cr21 = crv[ipivot[((icmax) << (2)) - 3] - 1];
+ ci21 = civ[ipivot[((icmax) << (2)) - 3] - 1];
+ ur12 = crv[ipivot[((icmax) << (2)) - 2] - 1];
+ ui12 = civ[ipivot[((icmax) << (2)) - 2] - 1];
+ cr22 = crv[ipivot[((icmax) << (2)) - 1] - 1];
+ ci22 = civ[ipivot[((icmax) << (2)) - 1] - 1];
+ if (icmax == 1 || icmax == 4) {
+
+/* Code when off-diagonals of pivoted C are real */
+
+ if (abs(ur11) > abs(ui11)) {
+ temp = ui11 / ur11;
+/* Computing 2nd power */
+ d__1 = temp;
+ ur11r = 1. / (ur11 * (d__1 * d__1 + 1.));
+ ui11r = -temp * ur11r;
+ } else {
+ temp = ur11 / ui11;
+/* Computing 2nd power */
+ d__1 = temp;
+ ui11r = -1. / (ui11 * (d__1 * d__1 + 1.));
+ ur11r = -temp * ui11r;
+ }
+ lr21 = cr21 * ur11r;
+ li21 = cr21 * ui11r;
+ ur12s = ur12 * ur11r;
+ ui12s = ur12 * ui11r;
+ ur22 = cr22 - ur12 * lr21;
+ ui22 = ci22 - ur12 * li21;
+ } else {
+
+/* Code when diagonals of pivoted C are real */
+
+ ur11r = 1. / ur11;
+ ui11r = 0.;
+ lr21 = cr21 * ur11r;
+ li21 = ci21 * ur11r;
+ ur12s = ur12 * ur11r;
+ ui12s = ui12 * ur11r;
+ ur22 = cr22 - ur12 * lr21 + ui12 * li21;
+ ui22 = -ur12 * li21 - ui12 * lr21;
+ }
+ u22abs = abs(ur22) + abs(ui22);
+
+/* If smaller pivot < SMINI, use SMINI */
+
+ if (u22abs < smini) {
+ ur22 = smini;
+ ui22 = 0.;
+ *info = 1;
+ }
+ if (rswap[icmax - 1]) {
+ br2 = b[b_dim1 + 1];
+ br1 = b[b_dim1 + 2];
+ bi2 = b[((b_dim1) << (1)) + 1];
+ bi1 = b[((b_dim1) << (1)) + 2];
+ } else {
+ br1 = b[b_dim1 + 1];
+ br2 = b[b_dim1 + 2];
+ bi1 = b[((b_dim1) << (1)) + 1];
+ bi2 = b[((b_dim1) << (1)) + 2];
+ }
+ br2 = br2 - lr21 * br1 + li21 * bi1;
+ bi2 = bi2 - li21 * br1 - lr21 * bi1;
+/* Computing MAX */
+ d__1 = (abs(br1) + abs(bi1)) * (u22abs * (abs(ur11r) + abs(ui11r))
+ ), d__2 = abs(br2) + abs(bi2);
+ bbnd = max(d__1,d__2);
+ if ((bbnd > 1. && u22abs < 1.)) {
+ if (bbnd >= bignum * u22abs) {
+ *scale = 1. / bbnd;
+ br1 = *scale * br1;
+ bi1 = *scale * bi1;
+ br2 = *scale * br2;
+ bi2 = *scale * bi2;
+ }
+ }
+
+ dladiv_(&br2, &bi2, &ur22, &ui22, &xr2, &xi2);
+ xr1 = ur11r * br1 - ui11r * bi1 - ur12s * xr2 + ui12s * xi2;
+ xi1 = ui11r * br1 + ur11r * bi1 - ui12s * xr2 - ur12s * xi2;
+ if (zswap[icmax - 1]) {
+ x[x_dim1 + 1] = xr2;
+ x[x_dim1 + 2] = xr1;
+ x[((x_dim1) << (1)) + 1] = xi2;
+ x[((x_dim1) << (1)) + 2] = xi1;
+ } else {
+ x[x_dim1 + 1] = xr1;
+ x[x_dim1 + 2] = xr2;
+ x[((x_dim1) << (1)) + 1] = xi1;
+ x[((x_dim1) << (1)) + 2] = xi2;
+ }
+/* Computing MAX */
+ d__1 = abs(xr1) + abs(xi1), d__2 = abs(xr2) + abs(xi2);
+ *xnorm = max(d__1,d__2);
+
+/* Further scaling if norm(A) norm(X) > overflow */
+
+ if ((*xnorm > 1. && cmax > 1.)) {
+ if (*xnorm > bignum / cmax) {
+ temp = cmax / bignum;
+ x[x_dim1 + 1] = temp * x[x_dim1 + 1];
+ x[x_dim1 + 2] = temp * x[x_dim1 + 2];
+ x[((x_dim1) << (1)) + 1] = temp * x[((x_dim1) << (1)) + 1]
+ ;
+ x[((x_dim1) << (1)) + 2] = temp * x[((x_dim1) << (1)) + 2]
+ ;
+ *xnorm = temp * *xnorm;
+ *scale = temp * *scale;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DLALN2 */
+
+} /* dlaln2_ */
+
+#undef crv
+#undef civ
+#undef cr
+#undef ci
+
+
+/* Subroutine */ int dlals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, doublereal *b, integer *ldb, doublereal
+ *bx, integer *ldbx, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *
+ poles, doublereal *difl, doublereal *difr, doublereal *z__, integer *
+ k, doublereal *c__, doublereal *s, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, b_dim1, b_offset, bx_dim1, bx_offset,
+ difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1,
+ poles_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__, j, m, n;
+ static doublereal dj;
+ static integer nlp1;
+ static doublereal temp;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ static doublereal diflj, difrj, dsigj;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlacpy_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ static doublereal dsigjp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 1, 1999
+
+
+ Purpose
+ =======
+
+ DLALS0 applies back the multiplying factors of either the left or the
+ right singular vector matrix of a diagonal matrix appended by a row
+ to the right hand side matrix B in solving the least squares problem
+ using the divide-and-conquer SVD approach.
+
+ For the left singular vector matrix, three types of orthogonal
+ matrices are involved:
+
+ (1L) Givens rotations: the number of such rotations is GIVPTR; the
+ pairs of columns/rows they were applied to are stored in GIVCOL;
+ and the C- and S-values of these rotations are stored in GIVNUM.
+
+ (2L) Permutation. The (NL+1)-st row of B is to be moved to the first
+ row, and for J=2:N, PERM(J)-th row of B is to be moved to the
+ J-th row.
+
+ (3L) The left singular vector matrix of the remaining matrix.
+
+ For the right singular vector matrix, four types of orthogonal
+ matrices are involved:
+
+ (1R) The right singular vector matrix of the remaining matrix.
+
+ (2R) If SQRE = 1, one extra Givens rotation to generate the right
+ null space.
+
+ (3R) The inverse transformation of (2L).
+
+ (4R) The inverse transformation of (1L).
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form:
+ = 0: Left singular vector matrix.
+ = 1: Right singular vector matrix.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input/output) DOUBLE PRECISION array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B. LDB must be at least
+ max(1,MAX( M, N ) ).
+
+ BX (workspace) DOUBLE PRECISION array, dimension ( LDBX, NRHS )
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ PERM (input) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) applied
+ to the two blocks.
+
+ GIVPTR (input) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of rows/columns
+ involved in a Givens rotation.
+
+ LDGCOL (input) INTEGER
+ The leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value used in the
+ corresponding Givens rotation.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of arrays DIFR, POLES and
+ GIVNUM, must be at least K.
+
+ POLES (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ On entry, POLES(1:K, 1) contains the new singular
+ values obtained from solving the secular equation, and
+ POLES(1:K, 2) is an array containing the poles in the secular
+ equation.
+
+ DIFL (input) DOUBLE PRECISION array, dimension ( K ).
+ On entry, DIFL(I) is the distance between I-th updated
+ (undeflated) singular value and the I-th (undeflated) old
+ singular value.
+
+ DIFR (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ).
+ On entry, DIFR(I, 1) contains the distances between I-th
+ updated (undeflated) singular value and the I+1-th
+ (undeflated) old singular value. And DIFR(I, 2) is the
+ normalizing factor for the I-th right singular vector.
+
+ Z (input) DOUBLE PRECISION array, dimension ( K )
+ Contain the components of the deflation-adjusted updating row
+ vector.
+
+ K (input) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ C (input) DOUBLE PRECISION
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (input) DOUBLE PRECISION
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension ( K )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1 * 1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1 * 1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1 * 1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1 * 1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1 * 1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/*
+ Apply back orthogonal transformations from the left.
+
+ Step (1L): apply back the Givens rotations performed.
+*/
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ drot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
+ ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
+ i__ + ((givnum_dim1) << (1))], &givnum[i__ + givnum_dim1])
+ ;
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ dcopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dcopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/*
+ Step (3L): apply the inverse of the left singular vector
+ matrix to BX.
+*/
+
+ if (*k == 1) {
+ dcopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.) {
+ dscal_(nrhs, &c_b151, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + ((poles_dim1) << (1))];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + ((poles_dim1) << (1))];
+ }
+ if (z__[j] == 0. || poles[j + ((poles_dim1) << (1))] == 0.) {
+ work[j] = 0.;
+ } else {
+ work[j] = -poles[j + ((poles_dim1) << (1))] * z__[j] /
+ diflj / (poles[j + ((poles_dim1) << (1))] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))]
+ == 0.) {
+ work[i__] = 0.;
+ } else {
+ work[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
+ i__] / (dlamc3_(&poles[i__ + ((poles_dim1) <<
+ (1))], &dsigj) - diflj) / (poles[i__ + ((
+ poles_dim1) << (1))] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))]
+ == 0.) {
+ work[i__] = 0.;
+ } else {
+ work[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
+ i__] / (dlamc3_(&poles[i__ + ((poles_dim1) <<
+ (1))], &dsigjp) + difrj) / (poles[i__ + ((
+ poles_dim1) << (1))] + dj);
+ }
+/* L40: */
+ }
+ work[1] = -1.;
+ temp = dnrm2_(k, &work[1], &c__1);
+ dgemv_("T", k, nrhs, &c_b15, &bx[bx_offset], ldbx, &work[1], &
+ c__1, &c_b29, &b[j + b_dim1], ldb);
+ dlascl_("G", &c__0, &c__0, &temp, &c_b15, &c__1, nrhs, &b[j +
+ b_dim1], ldb, info);
+/* L50: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ dlacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/*
+ Apply back the right orthogonal transformations.
+
+ Step (1R): apply back the new right singular vector matrix
+ to B.
+*/
+
+ if (*k == 1) {
+ dcopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + ((poles_dim1) << (1))];
+ if (z__[j] == 0.) {
+ work[j] = 0.;
+ } else {
+ work[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + ((difr_dim1) << (1))];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.) {
+ work[i__] = 0.;
+ } else {
+ d__1 = -poles[i__ + 1 + ((poles_dim1) << (1))];
+ work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + ((difr_dim1) << (1))
+ ];
+ }
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.) {
+ work[i__] = 0.;
+ } else {
+ d__1 = -poles[i__ + ((poles_dim1) << (1))];
+ work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + ((difr_dim1) << (1))];
+ }
+/* L70: */
+ }
+ dgemv_("T", k, nrhs, &c_b15, &b[b_offset], ldb, &work[1], &
+ c__1, &c_b29, &bx[j + bx_dim1], ldbx);
+/* L80: */
+ }
+ }
+
+/*
+ Step (2R): if SQRE = 1, apply back the rotation that is
+ related to the right null space of the subproblem.
+*/
+
+ if (*sqre == 1) {
+ dcopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ drot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ dlacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ dcopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ dcopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dcopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L90: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ d__1 = -givnum[i__ + givnum_dim1];
+ drot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
+ ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
+ i__ + ((givnum_dim1) << (1))], &d__1);
+/* L100: */
+ }
+ }
+
+ return 0;
+
+/* End of DLALS0 */
+
+} /* dlals0_ */
+
+/* Subroutine */ int dlalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, doublereal *b, integer *ldb, doublereal *bx, integer *
+ ldbx, doublereal *u, integer *ldu, doublereal *vt, integer *k,
+ doublereal *difl, doublereal *difr, doublereal *z__, doublereal *
+ poles, integer *givptr, integer *givcol, integer *ldgcol, integer *
+ perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, b_dim1,
+ b_offset, bx_dim1, bx_offset, difl_dim1, difl_offset, difr_dim1,
+ difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset,
+ u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1,
+ i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl,
+ ndb1, nlp1, lvl2, nrp1, nlvl, sqre;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer inode, ndiml, ndimr;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlals0_(integer *, integer *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *), dlasdt_(integer *, integer *, integer *, integer *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLALSA is an itermediate step in solving the least squares problem
+ by computing the SVD of the coefficient matrix in compact form (The
+ singular vectors are computed as products of simple orthorgonal
+ matrices.).
+
+ If ICOMPQ = 0, DLALSA applies the inverse of the left singular vector
+ matrix of an upper bidiagonal matrix to the right hand side; and if
+ ICOMPQ = 1, DLALSA applies the right singular vector matrix to the
+ right hand side. The singular vector matrices were generated in
+ compact form by DLALSA.
+
+ Arguments
+ =========
+
+
+ ICOMPQ (input) INTEGER
+ Specifies whether the left or the right singular vector
+ matrix is involved.
+ = 0: Left singular vector matrix
+ = 1: Right singular vector matrix
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The row and column dimensions of the upper bidiagonal matrix.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input) DOUBLE PRECISION array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,MAX( M, N ) ).
+
+ BX (output) DOUBLE PRECISION array, dimension ( LDBX, NRHS )
+ On exit, the result of applying the left or right singular
+ vector matrix to B.
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ U (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ).
+ On entry, U contains the left singular vector matrices of all
+ subproblems at the bottom level.
+
+ LDU (input) INTEGER, LDU = > N.
+ The leading dimension of arrays U, VT, DIFL, DIFR,
+ POLES, GIVNUM, and Z.
+
+ VT (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ).
+ On entry, VT' contains the right singular vector matrices of
+ all subproblems at the bottom level.
+
+ K (input) INTEGER array, dimension ( N ).
+
+ DIFL (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ).
+ where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1.
+
+ DIFR (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
+ On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record
+ distances between singular values on the I-th level and
+ singular values on the (I -1)-th level, and DIFR(*, 2 * I)
+ record the normalizing factors of the right singular vectors
+ matrices of subproblems on I-th level.
+
+ Z (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ).
+ On entry, Z(1, I) contains the components of the deflation-
+ adjusted updating row vector for subproblems on the I-th
+ level.
+
+ POLES (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
+ On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old
+ singular values involved in the secular equations on the I-th
+ level.
+
+ GIVPTR (input) INTEGER array, dimension ( N ).
+ On entry, GIVPTR( I ) records the number of Givens
+ rotations performed on the I-th problem on the computation
+ tree.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ).
+ On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the
+ locations of Givens rotations performed on the I-th level on
+ the computation tree.
+
+ LDGCOL (input) INTEGER, LDGCOL = > N.
+ The leading dimension of arrays GIVCOL and PERM.
+
+ PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ).
+ On entry, PERM(*, I) records permutations done on the I-th
+ level of the computation tree.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
+ On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S-
+ values of Givens rotations performed on the I-th level on the
+ computation tree.
+
+ C (input) DOUBLE PRECISION array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ C( I ) contains the C-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ S (input) DOUBLE PRECISION array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ S( I ) contains the S-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ WORK (workspace) DOUBLE PRECISION array.
+ The dimension must be at least N.
+
+ IWORK (workspace) INTEGER array.
+ The dimension must be at least 3 * N
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1 * 1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1 * 1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1 * 1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1 * 1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1 * 1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1 * 1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1 * 1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ The following code applies back the left singular vector factors.
+ For applying back the right singular vector factors, go to 50.
+*/
+
+ if (*icompq == 1) {
+ goto L50;
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by DLASDQ. The corresponding left and right singular vector
+ matrices are in explicit form. First apply back the left
+ singular vector matrices.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ dgemm_("T", "N", &nl, nrhs, &nl, &c_b15, &u[nlf + u_dim1], ldu, &b[
+ nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx);
+ dgemm_("T", "N", &nr, nrhs, &nr, &c_b15, &u[nrf + u_dim1], ldu, &b[
+ nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx);
+/* L10: */
+ }
+
+/*
+ Next copy the rows of B that correspond to unchanged rows
+ in the bidiagonal matrix to BX.
+*/
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ dcopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L20: */
+ }
+
+/*
+ Finally go through the left singular vector matrices of all
+ the other subproblems bottom-up on the tree.
+*/
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ find the first node LF and last node LL on
+ the current level LVL
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ dlals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L90;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L50:
+
+/*
+ First now go through the right singular vector matrices of all
+ the tree nodes top-down.
+*/
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ Find the first node LF and last node LL on
+ the current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ dlals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L60: */
+ }
+/* L70: */
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by DLASDQ. The corresponding right singular vector
+ matrices are in explicit form. Apply them back.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+ dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b15, &vt[nlf + vt_dim1], ldu,
+ &b[nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx);
+ dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b15, &vt[nrf + vt_dim1], ldu,
+ &b[nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx);
+/* L80: */
+ }
+
+L90:
+
+ return 0;
+
+/* End of DLALSA */
+
+} /* dlalsa_ */
+
+/* Subroutine */ int dlalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, doublereal *d__, doublereal *e, doublereal *b, integer *ldb,
+ doublereal *rcond, integer *rank, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double log(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer c__, i__, j, k;
+ static doublereal r__;
+ static integer s, u, z__;
+ static doublereal cs;
+ static integer bx;
+ static doublereal sn;
+ static integer st, vt, nm1, st1;
+ static doublereal eps;
+ static integer iwk;
+ static doublereal tol;
+ static integer difl, difr, perm, nsub;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer nlvl, sqre, bxst;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *),
+ dcopy_(integer *, doublereal *, integer *, doublereal *, integer
+ *);
+ static integer poles, sizei, nsize, nwork, icmpq1, icmpq2;
+
+ extern /* Subroutine */ int dlasda_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *), dlalsa_(integer *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *), dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ static integer givcol;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ static doublereal orgnrm;
+ static integer givnum, givptr, smlszp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLALSD uses the singular value decomposition of A to solve the least
+ squares problem of finding X to minimize the Euclidean norm of each
+ column of A*X-B, where A is N-by-N upper bidiagonal, and X and B
+ are N-by-NRHS. The solution X overwrites B.
+
+ The singular values of A smaller than RCOND times the largest
+ singular value are treated as zero in solving the least squares
+ problem; in this case a minimum norm solution is returned.
+ The actual singular values are returned in D in ascending order.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': D and E define an upper bidiagonal matrix.
+ = 'L': D and E define a lower bidiagonal matrix.
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The dimension of the bidiagonal matrix. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of columns of B. NRHS must be at least 1.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry D contains the main diagonal of the bidiagonal
+ matrix. On exit, if INFO = 0, D contains its singular values.
+
+ E (input) DOUBLE PRECISION array, dimension (N-1)
+ Contains the super-diagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
+ On input, B contains the right hand sides of the least
+ squares problem. On output, B contains the solution X.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,N).
+
+ RCOND (input) DOUBLE PRECISION
+ The singular values of A less than or equal to RCOND times
+ the largest singular value are treated as zero in solving
+ the least squares problem. If RCOND is negative,
+ machine precision is used instead.
+ For example, if diag(S)*X=B were the least squares problem,
+ where diag(S) is a diagonal matrix of singular values, the
+ solution would be X(i) = B(i) / S(i) if S(i) is greater than
+ RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to
+ RCOND*max(S).
+
+ RANK (output) INTEGER
+ The number of singular values of A greater than RCOND times
+ the largest singular value.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension at least
+ (9*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2),
+ where NLVL = max(0, INT(log_2 (N/(SMLSIZ+1))) + 1).
+
+ IWORK (workspace) INTEGER array, dimension at least
+ (3*N*NLVL + 11*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an singular value while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through MOD(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < 1 || *ldb < *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLALSD", &i__1);
+ return 0;
+ }
+
+ eps = EPSILON;
+
+/* Set up the tolerance. */
+
+ if (*rcond <= 0. || *rcond >= 1.) {
+ *rcond = eps;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.) {
+ dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ dlascl_("G", &c__0, &c__0, &d__[1], &c_b15, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = abs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ drot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ work[((i__) << (1)) - 1] = cs;
+ work[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = work[((j) << (1)) - 1];
+ sn = work[j * 2];
+ drot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ *
+ b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ dlaset_("A", n, nrhs, &c_b29, &c_b29, &b[b_offset], ldb);
+ return 0;
+ }
+
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/*
+ If N is smaller than the minimum divide size SMLSIZ, then solve
+ the problem with another solver.
+*/
+
+ if (*n <= *smlsiz) {
+ nwork = *n * *n + 1;
+ dlaset_("A", n, n, &c_b29, &c_b15, &work[1], n);
+ dlasdq_("U", &c__0, n, n, &c__0, nrhs, &d__[1], &e[1], &work[1], n, &
+ work[1], n, &b[b_offset], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[i__ + b_dim1],
+ ldb);
+ } else {
+ dlascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &b[
+ i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L40: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b15, &work[1], n, &b[b_offset], ldb, &
+ c_b29, &work[nwork], n);
+ dlacpy_("A", n, nrhs, &work[nwork], n, &b[b_offset], ldb);
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ dlasrt_("D", n, &d__[1], info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((doublereal) (*n) / (doublereal) (*smlsiz + 1)) /
+ log(2.)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + ((nlvl * *n) << (1));
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + ((nlvl) << (1)) * *n;
+ bx = givnum + ((nlvl) << (1)) * *n;
+ nwork = bx + *n * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + ((nlvl * *n) << (1));
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) < eps) {
+ d__[i__] = d_sign(&eps, &d__[i__]);
+ }
+/* L50: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/*
+ Subproblem found. First determine its size and then
+ apply divide and conquer on it.
+*/
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((d__1 = e[i__], abs(d__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/*
+ A subproblem with E(NM1) small. This implies an
+ 1-by-1 subproblem at D(N), which is not solved
+ explicitly.
+*/
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ dcopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/*
+ This is a 1-by-1 subproblem and is not solved
+ explicitly.
+*/
+
+ dcopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by DLASDQ. */
+
+ dlaset_("A", &nsize, &nsize, &c_b29, &c_b15, &work[vt + st1],
+ n);
+ dlasdq_("U", &c__0, &nsize, &nsize, &c__0, nrhs, &d__[st], &e[
+ st], &work[vt + st1], n, &work[nwork], n, &b[st +
+ b_dim1], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ dlacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ dlasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ work[u + st1], n, &work[vt + st1], &iwork[k + st1], &
+ work[difl + st1], &work[difr + st1], &work[z__ + st1],
+ &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum +
+ st1], &work[c__ + st1], &work[s + st1], &work[nwork],
+ &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &work[u + st1], n, &work[vt + st1], &
+ iwork[k + st1], &work[difl + st1], &work[difr + st1],
+ &work[z__ + st1], &work[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &
+ work[givnum + st1], &work[c__ + st1], &work[s + st1],
+ &work[nwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L60: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Some of the elements in D can be negative because 1-by-1
+ subproblems were not solved explicitly.
+*/
+
+ if ((d__1 = d__[i__], abs(d__1)) <= tol) {
+ dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &work[bx + i__ - 1], n);
+ } else {
+ ++(*rank);
+ dlascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &work[
+ bx + i__ - 1], n, info);
+ }
+ d__[i__] = (d__1 = d__[i__], abs(d__1));
+/* L70: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ dcopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b15, &work[vt + st1], n,
+ &work[bxst], n, &c_b29, &b[st + b_dim1], ldb);
+ } else {
+ dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &work[u + st1], n, &work[vt + st1], &iwork[
+ k + st1], &work[difl + st1], &work[difr + st1], &work[z__
+ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum + st1],
+ &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[
+ iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L80: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, info);
+ dlasrt_("D", n, &d__[1], info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of DLALSD */
+
+} /* dlalsd_ */
+
+/* Subroutine */ int dlamrg_(integer *n1, integer *n2, doublereal *a, integer
+ *dtrd1, integer *dtrd2, integer *index)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, ind1, ind2, n1sv, n2sv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAMRG will create a permutation list which will merge the elements
+ of A (which is composed of two independently sorted sets) into a
+ single set which is sorted in ascending order.
+
+ Arguments
+ =========
+
+ N1 (input) INTEGER
+ N2 (input) INTEGER
+ These arguements contain the respective lengths of the two
+ sorted lists to be merged.
+
+ A (input) DOUBLE PRECISION array, dimension (N1+N2)
+ The first N1 elements of A contain a list of numbers which
+ are sorted in either ascending or descending order. Likewise
+ for the final N2 elements.
+
+ DTRD1 (input) INTEGER
+ DTRD2 (input) INTEGER
+ These are the strides to be taken through the array A.
+ Allowable strides are 1 and -1. They indicate whether a
+ subset of A is sorted in ascending (DTRDx = 1) or descending
+ (DTRDx = -1) order.
+
+ INDEX (output) INTEGER array, dimension (N1+N2)
+ On exit this array will contain a permutation such that
+ if B( I ) = A( INDEX( I ) ) for I=1,N1+N2, then B will be
+ sorted in ascending order.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --index;
+ --a;
+
+ /* Function Body */
+ n1sv = *n1;
+ n2sv = *n2;
+ if (*dtrd1 > 0) {
+ ind1 = 1;
+ } else {
+ ind1 = *n1;
+ }
+ if (*dtrd2 > 0) {
+ ind2 = *n1 + 1;
+ } else {
+ ind2 = *n1 + *n2;
+ }
+ i__ = 1;
+/* while ( (N1SV > 0) & (N2SV > 0) ) */
+L10:
+ if ((n1sv > 0 && n2sv > 0)) {
+ if (a[ind1] <= a[ind2]) {
+ index[i__] = ind1;
+ ++i__;
+ ind1 += *dtrd1;
+ --n1sv;
+ } else {
+ index[i__] = ind2;
+ ++i__;
+ ind2 += *dtrd2;
+ --n2sv;
+ }
+ goto L10;
+ }
+/* end while */
+ if (n1sv == 0) {
+ i__1 = n2sv;
+ for (n1sv = 1; n1sv <= i__1; ++n1sv) {
+ index[i__] = ind2;
+ ++i__;
+ ind2 += *dtrd2;
+/* L20: */
+ }
+ } else {
+/* N2SV .EQ. 0 */
+ i__1 = n1sv;
+ for (n2sv = 1; n2sv <= i__1; ++n2sv) {
+ index[i__] = ind1;
+ ++i__;
+ ind1 += *dtrd1;
+/* L30: */
+ }
+ }
+
+ return 0;
+
+/* End of DLAMRG */
+
+} /* dlamrg_ */
+
+doublereal dlange_(char *norm, integer *m, integer *n, doublereal *a, integer
+ *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLANGE returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ real matrix A.
+
+ Description
+ ===========
+
+ DLANGE returns the value
+
+ DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in DLANGE as described
+ above.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0. When M = 0,
+ DLANGE is set to zero.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0. When N = 0,
+ DLANGE is set to zero.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The m by n matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(M,1).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
+ where LWORK >= M when NORM = 'I'; otherwise, WORK is not
+ referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ dlassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANGE */
+
+} /* dlange_ */
+
+doublereal dlanhs_(char *norm, integer *n, doublereal *a, integer *lda,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLANHS returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ Hessenberg matrix A.
+
+ Description
+ ===========
+
+ DLANHS returns the value
+
+ DLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in DLANHS as described
+ above.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, DLANHS is
+ set to zero.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The n by n upper Hessenberg matrix A; the part of A below the
+ first sub-diagonal is not referenced.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(N,1).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
+ where LWORK >= N when NORM = 'I'; otherwise, WORK is not
+ referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANHS */
+
+} /* dlanhs_ */
+
+doublereal dlanst_(char *norm, integer *n, doublereal *d__, doublereal *e)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val, d__1, d__2, d__3, d__4, d__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal anorm;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLANST returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ real symmetric tridiagonal matrix A.
+
+ Description
+ ===========
+
+ DLANST returns the value
+
+ DLANST = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in DLANST as described
+ above.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, DLANST is
+ set to zero.
+
+ D (input) DOUBLE PRECISION array, dimension (N)
+ The diagonal elements of A.
+
+ E (input) DOUBLE PRECISION array, dimension (N-1)
+ The (n-1) sub-diagonal or super-diagonal elements of A.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = (d__1 = d__[*n], abs(d__1));
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = d__[i__], abs(d__1));
+ anorm = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = e[i__], abs(d__1));
+ anorm = max(d__2,d__3);
+/* L10: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1' || lsame_(norm, "I")) {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = abs(d__[1]);
+ } else {
+/* Computing MAX */
+ d__3 = abs(d__[1]) + abs(e[1]), d__4 = (d__1 = e[*n - 1], abs(
+ d__1)) + (d__2 = d__[*n], abs(d__2));
+ anorm = max(d__3,d__4);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__4 = anorm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 = e[
+ i__], abs(d__2)) + (d__3 = e[i__ - 1], abs(d__3));
+ anorm = max(d__4,d__5);
+/* L20: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (*n > 1) {
+ i__1 = *n - 1;
+ dlassq_(&i__1, &e[1], &c__1, &scale, &sum);
+ sum *= 2;
+ }
+ dlassq_(n, &d__[1], &c__1, &scale, &sum);
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of DLANST */
+
+} /* dlanst_ */
+
+doublereal dlansy_(char *norm, char *uplo, integer *n, doublereal *a, integer
+ *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLANSY returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ real symmetric matrix A.
+
+ Description
+ ===========
+
+ DLANSY returns the value
+
+ DLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in DLANSY as described
+ above.
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is to be referenced.
+ = 'U': Upper triangular part of A is referenced
+ = 'L': Lower triangular part of A is referenced
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, DLANSY is
+ set to zero.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The symmetric matrix A. If UPLO = 'U', the leading n by n
+ upper triangular part of A contains the upper triangular part
+ of the matrix A, and the strictly lower triangular part of A
+ is not referenced. If UPLO = 'L', the leading n by n lower
+ triangular part of A contains the lower triangular part of
+ the matrix A, and the strictly upper triangular part of A is
+ not referenced.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(N,1).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
+ where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise,
+ WORK is not referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + (d__1 = a[j + j * a_dim1], abs(d__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + (d__1 = a[j + j * a_dim1], abs(d__1));
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ dlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *lda + 1;
+ dlassq_(n, &a[a_offset], &i__1, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANSY */
+
+} /* dlansy_ */
+
+/* Subroutine */ int dlanv2_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *d__, doublereal *rt1r, doublereal *rt1i, doublereal *rt2r,
+ doublereal *rt2i, doublereal *cs, doublereal *sn)
+{
+ /* System generated locals */
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *), sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal p, z__, aa, bb, cc, dd, cs1, sn1, sab, sac, eps, tau,
+ temp, scale, bcmax, bcmis, sigma;
+
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLANV2 computes the Schur factorization of a real 2-by-2 nonsymmetric
+ matrix in standard form:
+
+ [ A B ] = [ CS -SN ] [ AA BB ] [ CS SN ]
+ [ C D ] [ SN CS ] [ CC DD ] [-SN CS ]
+
+ where either
+ 1) CC = 0 so that AA and DD are real eigenvalues of the matrix, or
+ 2) AA = DD and BB*CC < 0, so that AA + or - sqrt(BB*CC) are complex
+ conjugate eigenvalues.
+
+ Arguments
+ =========
+
+ A (input/output) DOUBLE PRECISION
+ B (input/output) DOUBLE PRECISION
+ C (input/output) DOUBLE PRECISION
+ D (input/output) DOUBLE PRECISION
+ On entry, the elements of the input matrix.
+ On exit, they are overwritten by the elements of the
+ standardised Schur form.
+
+ RT1R (output) DOUBLE PRECISION
+ RT1I (output) DOUBLE PRECISION
+ RT2R (output) DOUBLE PRECISION
+ RT2I (output) DOUBLE PRECISION
+ The real and imaginary parts of the eigenvalues. If the
+ eigenvalues are a complex conjugate pair, RT1I > 0.
+
+ CS (output) DOUBLE PRECISION
+ SN (output) DOUBLE PRECISION
+ Parameters of the rotation matrix.
+
+ Further Details
+ ===============
+
+ Modified by V. Sima, Research Institute for Informatics, Bucharest,
+ Romania, to reduce the risk of cancellation errors,
+ when computing real eigenvalues, and to ensure, if possible, that
+ abs(RT1R) >= abs(RT2R).
+
+ =====================================================================
+*/
+
+
+ eps = PRECISION;
+ if (*c__ == 0.) {
+ *cs = 1.;
+ *sn = 0.;
+ goto L10;
+
+ } else if (*b == 0.) {
+
+/* Swap rows and columns */
+
+ *cs = 0.;
+ *sn = 1.;
+ temp = *d__;
+ *d__ = *a;
+ *a = temp;
+ *b = -(*c__);
+ *c__ = 0.;
+ goto L10;
+ } else if ((*a - *d__ == 0. && d_sign(&c_b15, b) != d_sign(&c_b15, c__)))
+ {
+ *cs = 1.;
+ *sn = 0.;
+ goto L10;
+ } else {
+
+ temp = *a - *d__;
+ p = temp * .5;
+/* Computing MAX */
+ d__1 = abs(*b), d__2 = abs(*c__);
+ bcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = abs(*b), d__2 = abs(*c__);
+ bcmis = min(d__1,d__2) * d_sign(&c_b15, b) * d_sign(&c_b15, c__);
+/* Computing MAX */
+ d__1 = abs(p);
+ scale = max(d__1,bcmax);
+ z__ = p / scale * p + bcmax / scale * bcmis;
+
+/*
+ If Z is of the order of the machine accuracy, postpone the
+ decision on the nature of eigenvalues
+*/
+
+ if (z__ >= eps * 4.) {
+
+/* Real eigenvalues. Compute A and D. */
+
+ d__1 = sqrt(scale) * sqrt(z__);
+ z__ = p + d_sign(&d__1, &p);
+ *a = *d__ + z__;
+ *d__ -= bcmax / z__ * bcmis;
+
+/* Compute B and the rotation matrix */
+
+ tau = dlapy2_(c__, &z__);
+ *cs = z__ / tau;
+ *sn = *c__ / tau;
+ *b -= *c__;
+ *c__ = 0.;
+ } else {
+
+/*
+ Complex eigenvalues, or real (almost) equal eigenvalues.
+ Make diagonal elements equal.
+*/
+
+ sigma = *b + *c__;
+ tau = dlapy2_(&sigma, &temp);
+ *cs = sqrt((abs(sigma) / tau + 1.) * .5);
+ *sn = -(p / (tau * *cs)) * d_sign(&c_b15, &sigma);
+
+/*
+ Compute [ AA BB ] = [ A B ] [ CS -SN ]
+ [ CC DD ] [ C D ] [ SN CS ]
+*/
+
+ aa = *a * *cs + *b * *sn;
+ bb = -(*a) * *sn + *b * *cs;
+ cc = *c__ * *cs + *d__ * *sn;
+ dd = -(*c__) * *sn + *d__ * *cs;
+
+/*
+ Compute [ A B ] = [ CS SN ] [ AA BB ]
+ [ C D ] [-SN CS ] [ CC DD ]
+*/
+
+ *a = aa * *cs + cc * *sn;
+ *b = bb * *cs + dd * *sn;
+ *c__ = -aa * *sn + cc * *cs;
+ *d__ = -bb * *sn + dd * *cs;
+
+ temp = (*a + *d__) * .5;
+ *a = temp;
+ *d__ = temp;
+
+ if (*c__ != 0.) {
+ if (*b != 0.) {
+ if (d_sign(&c_b15, b) == d_sign(&c_b15, c__)) {
+
+/* Real eigenvalues: reduce to upper triangular form */
+
+ sab = sqrt((abs(*b)));
+ sac = sqrt((abs(*c__)));
+ d__1 = sab * sac;
+ p = d_sign(&d__1, c__);
+ tau = 1. / sqrt((d__1 = *b + *c__, abs(d__1)));
+ *a = temp + p;
+ *d__ = temp - p;
+ *b -= *c__;
+ *c__ = 0.;
+ cs1 = sab * tau;
+ sn1 = sac * tau;
+ temp = *cs * cs1 - *sn * sn1;
+ *sn = *cs * sn1 + *sn * cs1;
+ *cs = temp;
+ }
+ } else {
+ *b = -(*c__);
+ *c__ = 0.;
+ temp = *cs;
+ *cs = -(*sn);
+ *sn = temp;
+ }
+ }
+ }
+
+ }
+
+L10:
+
+/* Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). */
+
+ *rt1r = *a;
+ *rt2r = *d__;
+ if (*c__ == 0.) {
+ *rt1i = 0.;
+ *rt2i = 0.;
+ } else {
+ *rt1i = sqrt((abs(*b))) * sqrt((abs(*c__)));
+ *rt2i = -(*rt1i);
+ }
+ return 0;
+
+/* End of DLANV2 */
+
+} /* dlanv2_ */
+
+doublereal dlapy2_(doublereal *x, doublereal *y)
+{
+ /* System generated locals */
+ doublereal ret_val, d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal w, z__, xabs, yabs;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary
+ overflow.
+
+ Arguments
+ =========
+
+ X (input) DOUBLE PRECISION
+ Y (input) DOUBLE PRECISION
+ X and Y specify the values x and y.
+
+ =====================================================================
+*/
+
+
+ xabs = abs(*x);
+ yabs = abs(*y);
+ w = max(xabs,yabs);
+ z__ = min(xabs,yabs);
+ if (z__ == 0.) {
+ ret_val = w;
+ } else {
+/* Computing 2nd power */
+ d__1 = z__ / w;
+ ret_val = w * sqrt(d__1 * d__1 + 1.);
+ }
+ return ret_val;
+
+/* End of DLAPY2 */
+
+} /* dlapy2_ */
+
+doublereal dlapy3_(doublereal *x, doublereal *y, doublereal *z__)
+{
+ /* System generated locals */
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal w, xabs, yabs, zabs;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAPY3 returns sqrt(x**2+y**2+z**2), taking care not to cause
+ unnecessary overflow.
+
+ Arguments
+ =========
+
+ X (input) DOUBLE PRECISION
+ Y (input) DOUBLE PRECISION
+ Z (input) DOUBLE PRECISION
+ X, Y and Z specify the values x, y and z.
+
+ =====================================================================
+*/
+
+
+ xabs = abs(*x);
+ yabs = abs(*y);
+ zabs = abs(*z__);
+/* Computing MAX */
+ d__1 = max(xabs,yabs);
+ w = max(d__1,zabs);
+ if (w == 0.) {
+ ret_val = 0.;
+ } else {
+/* Computing 2nd power */
+ d__1 = xabs / w;
+/* Computing 2nd power */
+ d__2 = yabs / w;
+/* Computing 2nd power */
+ d__3 = zabs / w;
+ ret_val = w * sqrt(d__1 * d__1 + d__2 * d__2 + d__3 * d__3);
+ }
+ return ret_val;
+
+/* End of DLAPY3 */
+
+} /* dlapy3_ */
+
+/* Subroutine */ int dlarf_(char *side, integer *m, integer *n, doublereal *v,
+ integer *incv, doublereal *tau, doublereal *c__, integer *ldc,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ doublereal d__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLARF applies a real elementary reflector H to a real m by n matrix
+ C, from either the left or the right. H is represented in the form
+
+ H = I - tau * v * v'
+
+ where tau is a real scalar and v is a real vector.
+
+ If tau = 0, then H is taken to be the unit matrix.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': form H * C
+ = 'R': form C * H
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ V (input) DOUBLE PRECISION array, dimension
+ (1 + (M-1)*abs(INCV)) if SIDE = 'L'
+ or (1 + (N-1)*abs(INCV)) if SIDE = 'R'
+ The vector v in the representation of H. V is not used if
+ TAU = 0.
+
+ INCV (input) INTEGER
+ The increment between elements of v. INCV <> 0.
+
+ TAU (input) DOUBLE PRECISION
+ The value tau in the representation of H.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by the matrix H * C if SIDE = 'L',
+ or C * H if SIDE = 'R'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (N) if SIDE = 'L'
+ or (M) if SIDE = 'R'
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+
+/* Form H * C */
+
+ if (*tau != 0.) {
+
+/* w := C' * v */
+
+ dgemv_("Transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1],
+ incv, &c_b29, &work[1], &c__1);
+
+/* C := C - v * w' */
+
+ d__1 = -(*tau);
+ dger_(m, n, &d__1, &v[1], incv, &work[1], &c__1, &c__[c_offset],
+ ldc);
+ }
+ } else {
+
+/* Form C * H */
+
+ if (*tau != 0.) {
+
+/* w := C * v */
+
+ dgemv_("No transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1],
+ incv, &c_b29, &work[1], &c__1);
+
+/* C := C - w * v' */
+
+ d__1 = -(*tau);
+ dger_(m, n, &d__1, &work[1], &c__1, &v[1], incv, &c__[c_offset],
+ ldc);
+ }
+ }
+ return 0;
+
+/* End of DLARF */
+
+} /* dlarf_ */
+
+/* Subroutine */ int dlarfb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, doublereal *v, integer *
+ ldv, doublereal *t, integer *ldt, doublereal *c__, integer *ldc,
+ doublereal *work, integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ static char transt[1];
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLARFB applies a real block reflector H or its transpose H' to a
+ real m by n matrix C, from either the left or the right.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply H or H' from the Left
+ = 'R': apply H or H' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply H (No transpose)
+ = 'T': apply H' (Transpose)
+
+ DIRECT (input) CHARACTER*1
+ Indicates how H is formed from a product of elementary
+ reflectors
+ = 'F': H = H(1) H(2) . . . H(k) (Forward)
+ = 'B': H = H(k) . . . H(2) H(1) (Backward)
+
+ STOREV (input) CHARACTER*1
+ Indicates how the vectors which define the elementary
+ reflectors are stored:
+ = 'C': Columnwise
+ = 'R': Rowwise
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ K (input) INTEGER
+ The order of the matrix T (= the number of elementary
+ reflectors whose product defines the block reflector).
+
+ V (input) DOUBLE PRECISION array, dimension
+ (LDV,K) if STOREV = 'C'
+ (LDV,M) if STOREV = 'R' and SIDE = 'L'
+ (LDV,N) if STOREV = 'R' and SIDE = 'R'
+ The matrix V. See further details.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V.
+ If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M);
+ if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N);
+ if STOREV = 'R', LDV >= K.
+
+ T (input) DOUBLE PRECISION array, dimension (LDT,K)
+ The triangular k by k matrix T in the representation of the
+ block reflector.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= K.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by H*C or H'*C or C*H or C*H'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDA >= max(1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LDWORK,K)
+
+ LDWORK (input) INTEGER
+ The leading dimension of the array WORK.
+ If SIDE = 'L', LDWORK >= max(1,N);
+ if SIDE = 'R', LDWORK >= max(1,M).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1 * 1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1 * 1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1 * 1;
+ work -= work_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(storev, "C")) {
+
+ if (lsame_(direct, "F")) {
+
+/*
+ Let V = ( V1 ) (first K rows)
+ ( V2 )
+ where V1 is unit lower triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
+
+ W := C1'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
+ &c__1);
+/* L10: */
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b15,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C2'*V2 */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b15, &
+ c__[*k + 1 + c_dim1], ldc, &v[*k + 1 + v_dim1],
+ ldv, &c_b15, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b15, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2 * W' */
+
+ i__1 = *m - *k;
+ dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b151,
+ &v[*k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork, &c_b15, &c__[*k + 1 + c_dim1], ldc);
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b15, &
+ v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V = (C1*V1 + C2*V2) (stored in WORK)
+
+ W := C1
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L40: */
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b15,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C2 * V2 */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "No transpose", m, k, &i__1, &
+ c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k +
+ 1 + v_dim1], ldv, &c_b15, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b15, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (*n > *k) {
+
+/* C2 := C2 - W * V2' */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b151,
+ &work[work_offset], ldwork, &v[*k + 1 + v_dim1],
+ ldv, &c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc);
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b15, &
+ v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ } else {
+
+/*
+ Let V = ( V1 )
+ ( V2 ) (last K rows)
+ where V2 is unit upper triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
+
+ W := C2'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
+ work_dim1 + 1], &c__1);
+/* L70: */
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b15,
+ &v[*m - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+ if (*m > *k) {
+
+/* W := W + C1'*V1 */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b15, &
+ c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, &
+ work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b15, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C1 := C1 - V1 * W' */
+
+ i__1 = *m - *k;
+ dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b151,
+ &v[v_offset], ldv, &work[work_offset], ldwork, &
+ c_b15, &c__[c_offset], ldc)
+ ;
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b15, &
+ v[*m - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V = (C1*V1 + C2*V2) (stored in WORK)
+
+ W := C2
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
+ j * work_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b15,
+ &v[*n - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+ if (*n > *k) {
+
+/* W := W + C1 * V1 */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "No transpose", m, k, &i__1, &
+ c_b15, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b15, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b15, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (*n > *k) {
+
+/* C1 := C1 - W * V1' */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b151,
+ &work[work_offset], ldwork, &v[v_offset], ldv, &
+ c_b15, &c__[c_offset], ldc)
+ ;
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b15, &
+ v[*n - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+
+/* C2 := C2 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+ } else if (lsame_(storev, "R")) {
+
+ if (lsame_(direct, "F")) {
+
+/*
+ Let V = ( V1 V2 ) (V1: first K columns)
+ where V1 is unit upper triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
+
+ W := C1'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
+ &c__1);
+/* L130: */
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b15, &
+ v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C2'*V2' */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b15, &
+ c__[*k + 1 + c_dim1], ldc, &v[(*k + 1) * v_dim1 +
+ 1], ldv, &c_b15, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b15, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2' * W' */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b151, &v[
+ (*k + 1) * v_dim1 + 1], ldv, &work[work_offset],
+ ldwork, &c_b15, &c__[*k + 1 + c_dim1], ldc);
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b15,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
+/* L140: */
+ }
+/* L150: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
+
+ W := C1
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L160: */
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b15, &
+ v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C2 * V2' */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b15, &
+ c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k + 1) *
+ v_dim1 + 1], ldv, &c_b15, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b15, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (*n > *k) {
+
+/* C2 := C2 - W * V2 */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "No transpose", m, &i__1, k, &
+ c_b151, &work[work_offset], ldwork, &v[(*k + 1) *
+ v_dim1 + 1], ldv, &c_b15, &c__[(*k + 1) * c_dim1
+ + 1], ldc);
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b15,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L170: */
+ }
+/* L180: */
+ }
+
+ }
+
+ } else {
+
+/*
+ Let V = ( V1 V2 ) (V2: last K columns)
+ where V2 is unit lower triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
+
+ W := C2'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
+ work_dim1 + 1], &c__1);
+/* L190: */
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b15, &
+ v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[work_offset]
+ , ldwork);
+ if (*m > *k) {
+
+/* W := W + C1'*V1' */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b15, &
+ c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, &
+ work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b15, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (*m > *k) {
+
+/* C1 := C1 - V1' * W' */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b151, &v[
+ v_offset], ldv, &work[work_offset], ldwork, &
+ c_b15, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b15,
+ &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L200: */
+ }
+/* L210: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
+
+ W := C2
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
+ j * work_dim1 + 1], &c__1);
+/* L220: */
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b15, &
+ v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[work_offset]
+ , ldwork);
+ if (*n > *k) {
+
+/* W := W + C1 * V1' */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b15, &
+ c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, &
+ work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b15, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (*n > *k) {
+
+/* C1 := C1 - W * V1 */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "No transpose", m, &i__1, k, &
+ c_b151, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b15, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b15,
+ &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L230: */
+ }
+/* L240: */
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of DLARFB */
+
+} /* dlarfb_ */
+
+/* Subroutine */ int dlarfg_(integer *n, doublereal *alpha, doublereal *x,
+ integer *incx, doublereal *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer j, knt;
+ static doublereal beta;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ static doublereal xnorm;
+
+ static doublereal safmin, rsafmn;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLARFG generates a real elementary reflector H of order n, such
+ that
+
+ H * ( alpha ) = ( beta ), H' * H = I.
+ ( x ) ( 0 )
+
+ where alpha and beta are scalars, and x is an (n-1)-element real
+ vector. H is represented in the form
+
+ H = I - tau * ( 1 ) * ( 1 v' ) ,
+ ( v )
+
+ where tau is a real scalar and v is a real (n-1)-element
+ vector.
+
+ If the elements of x are all zero, then tau = 0 and H is taken to be
+ the unit matrix.
+
+ Otherwise 1 <= tau <= 2.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the elementary reflector.
+
+ ALPHA (input/output) DOUBLE PRECISION
+ On entry, the value alpha.
+ On exit, it is overwritten with the value beta.
+
+ X (input/output) DOUBLE PRECISION array, dimension
+ (1+(N-2)*abs(INCX))
+ On entry, the vector x.
+ On exit, it is overwritten with the vector v.
+
+ INCX (input) INTEGER
+ The increment between elements of X. INCX > 0.
+
+ TAU (output) DOUBLE PRECISION
+ The value tau.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 1) {
+ *tau = 0.;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = dnrm2_(&i__1, &x[1], incx);
+
+ if (xnorm == 0.) {
+
+/* H = I */
+
+ *tau = 0.;
+ } else {
+
+/* general case */
+
+ d__1 = dlapy2_(alpha, &xnorm);
+ beta = -d_sign(&d__1, alpha);
+ safmin = SAFEMINIMUM / EPSILON;
+ if (abs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+ rsafmn = 1. / safmin;
+ knt = 0;
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ dscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ *alpha *= rsafmn;
+ if (abs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = dnrm2_(&i__1, &x[1], incx);
+ d__1 = dlapy2_(alpha, &xnorm);
+ beta = -d_sign(&d__1, alpha);
+ *tau = (beta - *alpha) / beta;
+ i__1 = *n - 1;
+ d__1 = 1. / (*alpha - beta);
+ dscal_(&i__1, &d__1, &x[1], incx);
+
+/* If ALPHA is subnormal, it may lose relative accuracy */
+
+ *alpha = beta;
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ *alpha *= safmin;
+/* L20: */
+ }
+ } else {
+ *tau = (beta - *alpha) / beta;
+ i__1 = *n - 1;
+ d__1 = 1. / (*alpha - beta);
+ dscal_(&i__1, &d__1, &x[1], incx);
+ *alpha = beta;
+ }
+ }
+
+ return 0;
+
+/* End of DLARFG */
+
+} /* dlarfg_ */
+
+/* Subroutine */ int dlarft_(char *direct, char *storev, integer *n, integer *
+ k, doublereal *v, integer *ldv, doublereal *tau, doublereal *t,
+ integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal vii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dtrmv_(char *,
+ char *, char *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLARFT forms the triangular factor T of a real block reflector H
+ of order n, which is defined as a product of k elementary reflectors.
+
+ If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular;
+
+ If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular.
+
+ If STOREV = 'C', the vector which defines the elementary reflector
+ H(i) is stored in the i-th column of the array V, and
+
+ H = I - V * T * V'
+
+ If STOREV = 'R', the vector which defines the elementary reflector
+ H(i) is stored in the i-th row of the array V, and
+
+ H = I - V' * T * V
+
+ Arguments
+ =========
+
+ DIRECT (input) CHARACTER*1
+ Specifies the order in which the elementary reflectors are
+ multiplied to form the block reflector:
+ = 'F': H = H(1) H(2) . . . H(k) (Forward)
+ = 'B': H = H(k) . . . H(2) H(1) (Backward)
+
+ STOREV (input) CHARACTER*1
+ Specifies how the vectors which define the elementary
+ reflectors are stored (see also Further Details):
+ = 'C': columnwise
+ = 'R': rowwise
+
+ N (input) INTEGER
+ The order of the block reflector H. N >= 0.
+
+ K (input) INTEGER
+ The order of the triangular factor T (= the number of
+ elementary reflectors). K >= 1.
+
+ V (input/output) DOUBLE PRECISION array, dimension
+ (LDV,K) if STOREV = 'C'
+ (LDV,N) if STOREV = 'R'
+ The matrix V. See further details.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V.
+ If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K.
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i).
+
+ T (output) DOUBLE PRECISION array, dimension (LDT,K)
+ The k by k triangular factor T of the block reflector.
+ If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is
+ lower triangular. The rest of the array is not used.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= K.
+
+ Further Details
+ ===============
+
+ The shape of the matrix V and the storage of the vectors which define
+ the H(i) is best illustrated by the following example with n = 5 and
+ k = 3. The elements equal to 1 are not stored; the corresponding
+ array elements are modified but restored on exit. The rest of the
+ array is not used.
+
+ DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R':
+
+ V = ( 1 ) V = ( 1 v1 v1 v1 v1 )
+ ( v1 1 ) ( 1 v2 v2 v2 )
+ ( v1 v2 1 ) ( 1 v3 v3 )
+ ( v1 v2 v3 )
+ ( v1 v2 v3 )
+
+ DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R':
+
+ V = ( v1 v2 v3 ) V = ( v1 v1 1 )
+ ( v1 v2 v3 ) ( v2 v2 v2 1 )
+ ( 1 v2 v3 ) ( v3 v3 v3 v3 1 )
+ ( 1 v3 )
+ ( 1 )
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1 * 1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1 * 1;
+ t -= t_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (lsame_(direct, "F")) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (tau[i__] == 0.) {
+
+/* H(i) = I */
+
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ t[j + i__ * t_dim1] = 0.;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ vii = v[i__ + i__ * v_dim1];
+ v[i__ + i__ * v_dim1] = 1.;
+ if (lsame_(storev, "C")) {
+
+/* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ d__1 = -tau[i__];
+ dgemv_("Transpose", &i__2, &i__3, &d__1, &v[i__ + v_dim1],
+ ldv, &v[i__ + i__ * v_dim1], &c__1, &c_b29, &t[
+ i__ * t_dim1 + 1], &c__1);
+ } else {
+
+/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */
+
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ d__1 = -tau[i__];
+ dgemv_("No transpose", &i__2, &i__3, &d__1, &v[i__ *
+ v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, &
+ c_b29, &t[i__ * t_dim1 + 1], &c__1);
+ }
+ v[i__ + i__ * v_dim1] = vii;
+
+/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
+ t[i__ + i__ * t_dim1] = tau[i__];
+ }
+/* L20: */
+ }
+ } else {
+ for (i__ = *k; i__ >= 1; --i__) {
+ if (tau[i__] == 0.) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ t[j + i__ * t_dim1] = 0.;
+/* L30: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+ if (lsame_(storev, "C")) {
+ vii = v[*n - *k + i__ + i__ * v_dim1];
+ v[*n - *k + i__ + i__ * v_dim1] = 1.;
+
+/*
+ T(i+1:k,i) :=
+ - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i)
+*/
+
+ i__1 = *n - *k + i__;
+ i__2 = *k - i__;
+ d__1 = -tau[i__];
+ dgemv_("Transpose", &i__1, &i__2, &d__1, &v[(i__ + 1)
+ * v_dim1 + 1], ldv, &v[i__ * v_dim1 + 1], &
+ c__1, &c_b29, &t[i__ + 1 + i__ * t_dim1], &
+ c__1);
+ v[*n - *k + i__ + i__ * v_dim1] = vii;
+ } else {
+ vii = v[i__ + (*n - *k + i__) * v_dim1];
+ v[i__ + (*n - *k + i__) * v_dim1] = 1.;
+
+/*
+ T(i+1:k,i) :=
+ - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)'
+*/
+
+ i__1 = *k - i__;
+ i__2 = *n - *k + i__;
+ d__1 = -tau[i__];
+ dgemv_("No transpose", &i__1, &i__2, &d__1, &v[i__ +
+ 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, &
+ c_b29, &t[i__ + 1 + i__ * t_dim1], &c__1);
+ v[i__ + (*n - *k + i__) * v_dim1] = vii;
+ }
+
+/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ dtrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__
+ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ *
+ t_dim1], &c__1)
+ ;
+ }
+ t[i__ + i__ * t_dim1] = tau[i__];
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of DLARFT */
+
+} /* dlarft_ */
+
+/* Subroutine */ int dlarfx_(char *side, integer *m, integer *n, doublereal *
+ v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer j;
+ static doublereal t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5,
+ v6, v7, v8, v9, t10, v10, sum;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLARFX applies a real elementary reflector H to a real m by n
+ matrix C, from either the left or the right. H is represented in the
+ form
+
+ H = I - tau * v * v'
+
+ where tau is a real scalar and v is a real vector.
+
+ If tau = 0, then H is taken to be the unit matrix
+
+ This version uses inline code if H has order < 11.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': form H * C
+ = 'R': form C * H
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ V (input) DOUBLE PRECISION array, dimension (M) if SIDE = 'L'
+ or (N) if SIDE = 'R'
+ The vector v in the representation of H.
+
+ TAU (input) DOUBLE PRECISION
+ The value tau in the representation of H.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by the matrix H * C if SIDE = 'L',
+ or C * H if SIDE = 'R'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDA >= (1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (N) if SIDE = 'L'
+ or (M) if SIDE = 'R'
+ WORK is not referenced if H has order < 11.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (*tau == 0.) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form H * C, where H has order m. */
+
+ switch (*m) {
+ case 1: goto L10;
+ case 2: goto L30;
+ case 3: goto L50;
+ case 4: goto L70;
+ case 5: goto L90;
+ case 6: goto L110;
+ case 7: goto L130;
+ case 8: goto L150;
+ case 9: goto L170;
+ case 10: goto L190;
+ }
+
+/*
+ Code for general M
+
+ w := C'*v
+*/
+
+ dgemv_("Transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], &c__1, &
+ c_b29, &work[1], &c__1);
+
+/* C := C - tau * v * w' */
+
+ d__1 = -(*tau);
+ dger_(m, n, &d__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset], ldc)
+ ;
+ goto L410;
+L10:
+
+/* Special code for 1 x 1 Householder */
+
+ t1 = 1. - *tau * v[1] * v[1];
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1];
+/* L20: */
+ }
+ goto L410;
+L30:
+
+/* Special code for 2 x 2 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+/* L40: */
+ }
+ goto L410;
+L50:
+
+/* Special code for 3 x 3 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+/* L60: */
+ }
+ goto L410;
+L70:
+
+/* Special code for 4 x 4 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+/* L80: */
+ }
+ goto L410;
+L90:
+
+/* Special code for 5 x 5 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+/* L100: */
+ }
+ goto L410;
+L110:
+
+/* Special code for 6 x 6 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+/* L120: */
+ }
+ goto L410;
+L130:
+
+/* Special code for 7 x 7 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+/* L140: */
+ }
+ goto L410;
+L150:
+
+/* Special code for 8 x 8 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+/* L160: */
+ }
+ goto L410;
+L170:
+
+/* Special code for 9 x 9 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
+ c_dim1 + 9];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+ c__[j * c_dim1 + 9] -= sum * t9;
+/* L180: */
+ }
+ goto L410;
+L190:
+
+/* Special code for 10 x 10 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ v10 = v[10];
+ t10 = *tau * v10;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
+ c_dim1 + 9] + v10 * c__[j * c_dim1 + 10];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+ c__[j * c_dim1 + 9] -= sum * t9;
+ c__[j * c_dim1 + 10] -= sum * t10;
+/* L200: */
+ }
+ goto L410;
+ } else {
+
+/* Form C * H, where H has order n. */
+
+ switch (*n) {
+ case 1: goto L210;
+ case 2: goto L230;
+ case 3: goto L250;
+ case 4: goto L270;
+ case 5: goto L290;
+ case 6: goto L310;
+ case 7: goto L330;
+ case 8: goto L350;
+ case 9: goto L370;
+ case 10: goto L390;
+ }
+
+/*
+ Code for general N
+
+ w := C * v
+*/
+
+ dgemv_("No transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], &
+ c__1, &c_b29, &work[1], &c__1);
+
+/* C := C - tau * w * v' */
+
+ d__1 = -(*tau);
+ dger_(m, n, &d__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset], ldc)
+ ;
+ goto L410;
+L210:
+
+/* Special code for 1 x 1 Householder */
+
+ t1 = 1. - *tau * v[1] * v[1];
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j + c_dim1] = t1 * c__[j + c_dim1];
+/* L220: */
+ }
+ goto L410;
+L230:
+
+/* Special code for 2 x 2 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+/* L240: */
+ }
+ goto L410;
+L250:
+
+/* Special code for 3 x 3 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+/* L260: */
+ }
+ goto L410;
+L270:
+
+/* Special code for 4 x 4 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+/* L280: */
+ }
+ goto L410;
+L290:
+
+/* Special code for 5 x 5 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+/* L300: */
+ }
+ goto L410;
+L310:
+
+/* Special code for 6 x 6 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+/* L320: */
+ }
+ goto L410;
+L330:
+
+/* Special code for 7 x 7 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+/* L340: */
+ }
+ goto L410;
+L350:
+
+/* Special code for 8 x 8 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + ((c_dim1) << (3))] -= sum * t8;
+/* L360: */
+ }
+ goto L410;
+L370:
+
+/* Special code for 9 x 9 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))] +
+ v9 * c__[j + c_dim1 * 9];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + ((c_dim1) << (3))] -= sum * t8;
+ c__[j + c_dim1 * 9] -= sum * t9;
+/* L380: */
+ }
+ goto L410;
+L390:
+
+/* Special code for 10 x 10 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ v10 = v[10];
+ t10 = *tau * v10;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))] +
+ v9 * c__[j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + ((c_dim1) << (3))] -= sum * t8;
+ c__[j + c_dim1 * 9] -= sum * t9;
+ c__[j + c_dim1 * 10] -= sum * t10;
+/* L400: */
+ }
+ goto L410;
+ }
+L410:
+ return 0;
+
+/* End of DLARFX */
+
+} /* dlarfx_ */
+
+/* Subroutine */ int dlartg_(doublereal *f, doublereal *g, doublereal *cs,
+ doublereal *sn, doublereal *r__)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal), pow_di(doublereal *, integer *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal f1, g1, eps, scale;
+ static integer count;
+ static doublereal safmn2, safmx2;
+
+ static doublereal safmin;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLARTG generate a plane rotation so that
+
+ [ CS SN ] . [ F ] = [ R ] where CS**2 + SN**2 = 1.
+ [ -SN CS ] [ G ] [ 0 ]
+
+ This is a slower, more accurate version of the BLAS1 routine DROTG,
+ with the following other differences:
+ F and G are unchanged on return.
+ If G=0, then CS=1 and SN=0.
+ If F=0 and (G .ne. 0), then CS=0 and SN=1 without doing any
+ floating point operations (saves work in DBDSQR when
+ there are zeros on the diagonal).
+
+ If F exceeds G in magnitude, CS will be positive.
+
+ Arguments
+ =========
+
+ F (input) DOUBLE PRECISION
+ The first component of vector to be rotated.
+
+ G (input) DOUBLE PRECISION
+ The second component of vector to be rotated.
+
+ CS (output) DOUBLE PRECISION
+ The cosine of the rotation.
+
+ SN (output) DOUBLE PRECISION
+ The sine of the rotation.
+
+ R (output) DOUBLE PRECISION
+ The nonzero component of the rotated vector.
+
+ =====================================================================
+*/
+
+
+ if (first) {
+ first = FALSE_;
+ safmin = SAFEMINIMUM;
+ eps = EPSILON;
+ d__1 = BASE;
+ i__1 = (integer) (log(safmin / eps) / log(BASE) /
+ 2.);
+ safmn2 = pow_di(&d__1, &i__1);
+ safmx2 = 1. / safmn2;
+ }
+ if (*g == 0.) {
+ *cs = 1.;
+ *sn = 0.;
+ *r__ = *f;
+ } else if (*f == 0.) {
+ *cs = 0.;
+ *sn = 1.;
+ *r__ = *g;
+ } else {
+ f1 = *f;
+ g1 = *g;
+/* Computing MAX */
+ d__1 = abs(f1), d__2 = abs(g1);
+ scale = max(d__1,d__2);
+ if (scale >= safmx2) {
+ count = 0;
+L10:
+ ++count;
+ f1 *= safmn2;
+ g1 *= safmn2;
+/* Computing MAX */
+ d__1 = abs(f1), d__2 = abs(g1);
+ scale = max(d__1,d__2);
+ if (scale >= safmx2) {
+ goto L10;
+ }
+/* Computing 2nd power */
+ d__1 = f1;
+/* Computing 2nd power */
+ d__2 = g1;
+ *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *r__ *= safmx2;
+/* L20: */
+ }
+ } else if (scale <= safmn2) {
+ count = 0;
+L30:
+ ++count;
+ f1 *= safmx2;
+ g1 *= safmx2;
+/* Computing MAX */
+ d__1 = abs(f1), d__2 = abs(g1);
+ scale = max(d__1,d__2);
+ if (scale <= safmn2) {
+ goto L30;
+ }
+/* Computing 2nd power */
+ d__1 = f1;
+/* Computing 2nd power */
+ d__2 = g1;
+ *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *r__ *= safmn2;
+/* L40: */
+ }
+ } else {
+/* Computing 2nd power */
+ d__1 = f1;
+/* Computing 2nd power */
+ d__2 = g1;
+ *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ }
+ if ((abs(*f) > abs(*g) && *cs < 0.)) {
+ *cs = -(*cs);
+ *sn = -(*sn);
+ *r__ = -(*r__);
+ }
+ }
+ return 0;
+
+/* End of DLARTG */
+
+} /* dlartg_ */
+
+/* Subroutine */ int dlas2_(doublereal *f, doublereal *g, doublereal *h__,
+ doublereal *ssmin, doublereal *ssmax)
+{
+ /* System generated locals */
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal c__, fa, ga, ha, as, at, au, fhmn, fhmx;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAS2 computes the singular values of the 2-by-2 matrix
+ [ F G ]
+ [ 0 H ].
+ On return, SSMIN is the smaller singular value and SSMAX is the
+ larger singular value.
+
+ Arguments
+ =========
+
+ F (input) DOUBLE PRECISION
+ The (1,1) element of the 2-by-2 matrix.
+
+ G (input) DOUBLE PRECISION
+ The (1,2) element of the 2-by-2 matrix.
+
+ H (input) DOUBLE PRECISION
+ The (2,2) element of the 2-by-2 matrix.
+
+ SSMIN (output) DOUBLE PRECISION
+ The smaller singular value.
+
+ SSMAX (output) DOUBLE PRECISION
+ The larger singular value.
+
+ Further Details
+ ===============
+
+ Barring over/underflow, all output quantities are correct to within
+ a few units in the last place (ulps), even in the absence of a guard
+ digit in addition/subtraction.
+
+ In IEEE arithmetic, the code works correctly if one matrix element is
+ infinite.
+
+ Overflow will not occur unless the largest singular value itself
+ overflows, or is within a few ulps of overflow. (On machines with
+ partial overflow, like the Cray, overflow may occur if the largest
+ singular value is within a factor of 2 of overflow.)
+
+ Underflow is harmless if underflow is gradual. Otherwise, results
+ may correspond to a matrix modified by perturbations of size near
+ the underflow threshold.
+
+ ====================================================================
+*/
+
+
+ fa = abs(*f);
+ ga = abs(*g);
+ ha = abs(*h__);
+ fhmn = min(fa,ha);
+ fhmx = max(fa,ha);
+ if (fhmn == 0.) {
+ *ssmin = 0.;
+ if (fhmx == 0.) {
+ *ssmax = ga;
+ } else {
+/* Computing 2nd power */
+ d__1 = min(fhmx,ga) / max(fhmx,ga);
+ *ssmax = max(fhmx,ga) * sqrt(d__1 * d__1 + 1.);
+ }
+ } else {
+ if (ga < fhmx) {
+ as = fhmn / fhmx + 1.;
+ at = (fhmx - fhmn) / fhmx;
+/* Computing 2nd power */
+ d__1 = ga / fhmx;
+ au = d__1 * d__1;
+ c__ = 2. / (sqrt(as * as + au) + sqrt(at * at + au));
+ *ssmin = fhmn * c__;
+ *ssmax = fhmx / c__;
+ } else {
+ au = fhmx / ga;
+ if (au == 0.) {
+
+/*
+ Avoid possible harmful underflow if exponent range
+ asymmetric (true SSMIN may not underflow even if
+ AU underflows)
+*/
+
+ *ssmin = fhmn * fhmx / ga;
+ *ssmax = ga;
+ } else {
+ as = fhmn / fhmx + 1.;
+ at = (fhmx - fhmn) / fhmx;
+/* Computing 2nd power */
+ d__1 = as * au;
+/* Computing 2nd power */
+ d__2 = at * au;
+ c__ = 1. / (sqrt(d__1 * d__1 + 1.) + sqrt(d__2 * d__2 + 1.));
+ *ssmin = fhmn * c__ * au;
+ *ssmin += *ssmin;
+ *ssmax = ga / (c__ + c__);
+ }
+ }
+ }
+ return 0;
+
+/* End of DLAS2 */
+
+} /* dlas2_ */
+
+/* Subroutine */ int dlascl_(char *type__, integer *kl, integer *ku,
+ doublereal *cfrom, doublereal *cto, integer *m, integer *n,
+ doublereal *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ static integer i__, j, k1, k2, k3, k4;
+ static doublereal mul, cto1;
+ static logical done;
+ static doublereal ctoc;
+ extern logical lsame_(char *, char *);
+ static integer itype;
+ static doublereal cfrom1;
+
+ static doublereal cfromc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal bignum, smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLASCL multiplies the M by N real matrix A by the real scalar
+ CTO/CFROM. This is done without over/underflow as long as the final
+ result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
+ A may be full, upper triangular, lower triangular, upper Hessenberg,
+ or banded.
+
+ Arguments
+ =========
+
+ TYPE (input) CHARACTER*1
+ TYPE indices the storage type of the input matrix.
+ = 'G': A is a full matrix.
+ = 'L': A is a lower triangular matrix.
+ = 'U': A is an upper triangular matrix.
+ = 'H': A is an upper Hessenberg matrix.
+ = 'B': A is a symmetric band matrix with lower bandwidth KL
+ and upper bandwidth KU and with the only the lower
+ half stored.
+ = 'Q': A is a symmetric band matrix with lower bandwidth KL
+ and upper bandwidth KU and with the only the upper
+ half stored.
+ = 'Z': A is a band matrix with lower bandwidth KL and upper
+ bandwidth KU.
+
+ KL (input) INTEGER
+ The lower bandwidth of A. Referenced only if TYPE = 'B',
+ 'Q' or 'Z'.
+
+ KU (input) INTEGER
+ The upper bandwidth of A. Referenced only if TYPE = 'B',
+ 'Q' or 'Z'.
+
+ CFROM (input) DOUBLE PRECISION
+ CTO (input) DOUBLE PRECISION
+ The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
+ without over/underflow if the final result CTO*A(I,J)/CFROM
+ can be represented without over/underflow. CFROM must be
+ nonzero.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,M)
+ The matrix to be multiplied by CTO/CFROM. See TYPE for the
+ storage type.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ INFO (output) INTEGER
+ 0 - successful exit
+ <0 - if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(type__, "G")) {
+ itype = 0;
+ } else if (lsame_(type__, "L")) {
+ itype = 1;
+ } else if (lsame_(type__, "U")) {
+ itype = 2;
+ } else if (lsame_(type__, "H")) {
+ itype = 3;
+ } else if (lsame_(type__, "B")) {
+ itype = 4;
+ } else if (lsame_(type__, "Q")) {
+ itype = 5;
+ } else if (lsame_(type__, "Z")) {
+ itype = 6;
+ } else {
+ itype = -1;
+ }
+
+ if (itype == -1) {
+ *info = -1;
+ } else if (*cfrom == 0.) {
+ *info = -4;
+ } else if (*m < 0) {
+ *info = -6;
+ } else if (*n < 0 || (itype == 4 && *n != *m) || (itype == 5 && *n != *m))
+ {
+ *info = -7;
+ } else if ((itype <= 3 && *lda < max(1,*m))) {
+ *info = -9;
+ } else if (itype >= 4) {
+/* Computing MAX */
+ i__1 = *m - 1;
+ if (*kl < 0 || *kl > max(i__1,0)) {
+ *info = -2;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *n - 1;
+ if (*ku < 0 || *ku > max(i__1,0) || ((itype == 4 || itype == 5) &&
+ *kl != *ku)) {
+ *info = -3;
+ } else if ((itype == 4 && *lda < *kl + 1) || (itype == 5 && *lda <
+ *ku + 1) || (itype == 6 && *lda < ((*kl) << (1)) + *ku +
+ 1)) {
+ *info = -9;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASCL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *m == 0) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = SAFEMINIMUM;
+ bignum = 1. / smlnum;
+
+ cfromc = *cfrom;
+ ctoc = *cto;
+
+L10:
+ cfrom1 = cfromc * smlnum;
+ cto1 = ctoc / bignum;
+ if ((abs(cfrom1) > abs(ctoc) && ctoc != 0.)) {
+ mul = smlnum;
+ done = FALSE_;
+ cfromc = cfrom1;
+ } else if (abs(cto1) > abs(cfromc)) {
+ mul = bignum;
+ done = FALSE_;
+ ctoc = cto1;
+ } else {
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ }
+
+ if (itype == 0) {
+
+/* Full matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (itype == 1) {
+
+/* Lower triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ } else if (itype == 2) {
+
+/* Upper triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L60: */
+ }
+/* L70: */
+ }
+
+ } else if (itype == 3) {
+
+/* Upper Hessenberg matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (itype == 4) {
+
+/* Lower half of a symmetric band matrix */
+
+ k3 = *kl + 1;
+ k4 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = k3, i__4 = k4 - j;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L100: */
+ }
+/* L110: */
+ }
+
+ } else if (itype == 5) {
+
+/* Upper half of a symmetric band matrix */
+
+ k1 = *ku + 2;
+ k3 = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k1 - j;
+ i__3 = k3;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L120: */
+ }
+/* L130: */
+ }
+
+ } else if (itype == 6) {
+
+/* Band matrix */
+
+ k1 = *kl + *ku + 2;
+ k2 = *kl + 1;
+ k3 = ((*kl) << (1)) + *ku + 1;
+ k4 = *kl + *ku + 1 + *m;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = k1 - j;
+/* Computing MIN */
+ i__4 = k3, i__5 = k4 - j;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,k2); i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ }
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of DLASCL */
+
+} /* dlascl_ */
+
+/* Subroutine */ int dlasd0_(integer *n, integer *sqre, doublereal *d__,
+ doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *
+ ldvt, integer *smlsiz, integer *iwork, doublereal *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, m, i1, ic, lf, nd, ll, nl, nr, im1, ncc, nlf, nrf,
+ iwk, lvl, ndb1, nlp1, nrp1;
+ static doublereal beta;
+ static integer idxq, nlvl;
+ static doublereal alpha;
+ static integer inode, ndiml, idxqc, ndimr, itemp, sqrei;
+ extern /* Subroutine */ int dlasd1_(integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *, integer *, doublereal *,
+ integer *), dlasdq_(char *, integer *, integer *, integer *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlasdt_(integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *), xerbla_(
+ char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ Using a divide and conquer approach, DLASD0 computes the singular
+ value decomposition (SVD) of a real upper bidiagonal N-by-M
+ matrix B with diagonal D and offdiagonal E, where M = N + SQRE.
+ The algorithm computes orthogonal matrices U and VT such that
+ B = U * S * VT. The singular values S are overwritten on D.
+
+ A related subroutine, DLASDA, computes only the singular values,
+ and optionally, the singular vectors in compact form.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ On entry, the row dimension of the upper bidiagonal matrix.
+ This is also the dimension of the main diagonal array D.
+
+ SQRE (input) INTEGER
+ Specifies the column dimension of the bidiagonal matrix.
+ = 0: The bidiagonal matrix has column dimension M = N;
+ = 1: The bidiagonal matrix has column dimension M = N+1;
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry D contains the main diagonal of the bidiagonal
+ matrix.
+ On exit D, if INFO = 0, contains its singular values.
+
+ E (input) DOUBLE PRECISION array, dimension (M-1)
+ Contains the subdiagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ U (output) DOUBLE PRECISION array, dimension at least (LDQ, N)
+ On exit, U contains the left singular vectors.
+
+ LDU (input) INTEGER
+ On entry, leading dimension of U.
+
+ VT (output) DOUBLE PRECISION array, dimension at least (LDVT, M)
+ On exit, VT' contains the right singular vectors.
+
+ LDVT (input) INTEGER
+ On entry, leading dimension of VT.
+
+ SMLSIZ (input) INTEGER
+ On entry, maximum size of the subproblems at the
+ bottom of the computation tree.
+
+ IWORK INTEGER work array.
+ Dimension must be at least (8 * N)
+
+ WORK DOUBLE PRECISION work array.
+ Dimension must be at least (3 * M**2 + 2 * M)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ --iwork;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -2;
+ }
+
+ m = *n + *sqre;
+
+ if (*ldu < *n) {
+ *info = -6;
+ } else if (*ldvt < m) {
+ *info = -8;
+ } else if (*smlsiz < 3) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD0", &i__1);
+ return 0;
+ }
+
+/* If the input matrix is too small, call DLASDQ to find the SVD. */
+
+ if (*n <= *smlsiz) {
+ dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset],
+ ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info);
+ return 0;
+ }
+
+/* Set up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+ idxq = ndimr + *n;
+ iwk = idxq + *n;
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ For the nodes on bottom level of the tree, solve
+ their subproblems by DLASDQ.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ ncc = 0;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nlp1 = nl + 1;
+ nr = iwork[ndimr + i1];
+ nrp1 = nr + 1;
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sqrei = 1;
+ dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &vt[
+ nlf + nlf * vt_dim1], ldvt, &u[nlf + nlf * u_dim1], ldu, &u[
+ nlf + nlf * u_dim1], ldu, &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ itemp = idxq + nlf - 2;
+ i__2 = nl;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[itemp + j] = j;
+/* L10: */
+ }
+ if (i__ == nd) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ nrp1 = nr + sqrei;
+ dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &vt[
+ nrf + nrf * vt_dim1], ldvt, &u[nrf + nrf * u_dim1], ldu, &u[
+ nrf + nrf * u_dim1], ldu, &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ itemp = idxq + ic;
+ i__2 = nr;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[itemp + j - 1] = j;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Now conquer each subproblem bottom-up. */
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+
+/*
+ Find the first node LF and last node LL on the
+ current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ if ((*sqre == 0 && i__ == ll)) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ idxqc = idxq + nlf - 1;
+ alpha = d__[ic];
+ beta = e[ic];
+ dlasd1_(&nl, &nr, &sqrei, &d__[nlf], &alpha, &beta, &u[nlf + nlf *
+ u_dim1], ldu, &vt[nlf + nlf * vt_dim1], ldvt, &iwork[
+ idxqc], &iwork[iwk], &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of DLASD0 */
+
+} /* dlasd0_ */
+
+/* Subroutine */ int dlasd1_(integer *nl, integer *nr, integer *sqre,
+ doublereal *d__, doublereal *alpha, doublereal *beta, doublereal *u,
+ integer *ldu, doublereal *vt, integer *ldvt, integer *idxq, integer *
+ iwork, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static integer i__, k, m, n, n1, n2, iq, iz, iu2, ldq, idx, ldu2, ivt2,
+ idxc, idxp, ldvt2;
+ extern /* Subroutine */ int dlasd2_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *), dlasd3_(
+ integer *, integer *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, doublereal *, integer *),
+ dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *),
+ dlamrg_(integer *, integer *, doublereal *, integer *, integer *,
+ integer *);
+ static integer isigma;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal orgnrm;
+ static integer coltyp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASD1 computes the SVD of an upper bidiagonal N-by-M matrix B,
+ where N = NL + NR + 1 and M = N + SQRE. DLASD1 is called from DLASD0.
+
+ A related subroutine DLASD7 handles the case in which the singular
+ values (and the singular vectors in factored form) are desired.
+
+ DLASD1 computes the SVD as follows:
+
+ ( D1(in) 0 0 0 )
+ B = U(in) * ( Z1' a Z2' b ) * VT(in)
+ ( 0 0 D2(in) 0 )
+
+ = U(out) * ( D(out) 0) * VT(out)
+
+ where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M
+ with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros
+ elsewhere; and the entry b is empty if SQRE = 0.
+
+ The left singular vectors of the original matrix are stored in U, and
+ the transpose of the right singular vectors are stored in VT, and the
+ singular values are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple singular values or when there are zeros in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine DLASD2.
+
+ The second stage consists of calculating the updated
+ singular values. This is done by finding the square roots of the
+ roots of the secular equation via the routine DLASD4 (as called
+ by DLASD3). This routine also calculates the singular vectors of
+ the current problem.
+
+ The final stage consists of computing the updated singular vectors
+ directly using the updated singular values. The singular vectors
+ for the current problem are multiplied with the singular vectors
+ from the overall problem.
+
+ Arguments
+ =========
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ D (input/output) DOUBLE PRECISION array,
+ dimension (N = NL+NR+1).
+ On entry D(1:NL,1:NL) contains the singular values of the
+ upper block; and D(NL+2:N) contains the singular values of
+ the lower block. On exit D(1:N) contains the singular values
+ of the modified matrix.
+
+ ALPHA (input) DOUBLE PRECISION
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) DOUBLE PRECISION
+ Contains the off-diagonal element associated with the added
+ row.
+
+ U (input/output) DOUBLE PRECISION array, dimension(LDU,N)
+ On entry U(1:NL, 1:NL) contains the left singular vectors of
+ the upper block; U(NL+2:N, NL+2:N) contains the left singular
+ vectors of the lower block. On exit U contains the left
+ singular vectors of the bidiagonal matrix.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= max( 1, N ).
+
+ VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M)
+ where M = N + SQRE.
+ On entry VT(1:NL+1, 1:NL+1)' contains the right singular
+ vectors of the upper block; VT(NL+2:M, NL+2:M)' contains
+ the right singular vectors of the lower block. On exit
+ VT' contains the right singular vectors of the
+ bidiagonal matrix.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= max( 1, M ).
+
+ IDXQ (output) INTEGER array, dimension(N)
+ This contains the permutation which will reintegrate the
+ subproblem just solved back into sorted order, i.e.
+ D( IDXQ( I = 1, N ) ) will be in ascending order.
+
+ IWORK (workspace) INTEGER array, dimension( 4 * N )
+
+ WORK (workspace) DOUBLE PRECISION array, dimension( 3*M**2 + 2*M )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ --idxq;
+ --iwork;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD1", &i__1);
+ return 0;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in DLASD2 and DLASD3.
+*/
+
+ ldu2 = n;
+ ldvt2 = m;
+
+ iz = 1;
+ isigma = iz + m;
+ iu2 = isigma + n;
+ ivt2 = iu2 + ldu2 * n;
+ iq = ivt2 + ldvt2 * m;
+
+ idx = 1;
+ idxc = idx + n;
+ coltyp = idxc + n;
+ idxp = coltyp + n;
+
+/*
+ Scale.
+
+ Computing MAX
+*/
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ orgnrm = max(d__1,d__2);
+ d__[*nl + 1] = 0.;
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) > orgnrm) {
+ orgnrm = (d__1 = d__[i__], abs(d__1));
+ }
+/* L10: */
+ }
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &n, &c__1, &d__[1], &n, info);
+ *alpha /= orgnrm;
+ *beta /= orgnrm;
+
+/* Deflate singular values. */
+
+ dlasd2_(nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset],
+ ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, &
+ work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], &
+ idxq[1], &iwork[coltyp], info);
+
+/* Solve Secular Equation and update singular vectors. */
+
+ ldq = k;
+ dlasd3_(nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[
+ u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[
+ ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info);
+ if (*info != 0) {
+ return 0;
+ }
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &n, &c__1, &d__[1], &n, info);
+
+/* Prepare the IDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = n - k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
+
+ return 0;
+
+/* End of DLASD1 */
+
+} /* dlasd1_ */
+
+/* Subroutine */ int dlasd2_(integer *nl, integer *nr, integer *sqre, integer
+ *k, doublereal *d__, doublereal *z__, doublereal *alpha, doublereal *
+ beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt,
+ doublereal *dsigma, doublereal *u2, integer *ldu2, doublereal *vt2,
+ integer *ldvt2, integer *idxp, integer *idx, integer *idxc, integer *
+ idxq, integer *coltyp, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset,
+ vt2_dim1, vt2_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static doublereal c__;
+ static integer i__, j, m, n;
+ static doublereal s;
+ static integer k2;
+ static doublereal z1;
+ static integer ct, jp;
+ static doublereal eps, tau, tol;
+ static integer psm[4], nlp1, nlp2, idxi, idxj;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer ctot[4], idxjp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer jprev;
+
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ static doublereal hlftol;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASD2 merges the two sets of singular values together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ singular values are close together or if there is a tiny entry in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ DLASD2 is called from DLASD1.
+
+ Arguments
+ =========
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ K (output) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ D (input/output) DOUBLE PRECISION array, dimension(N)
+ On entry D contains the singular values of the two submatrices
+ to be combined. On exit D contains the trailing (N-K) updated
+ singular values (those which were deflated) sorted into
+ increasing order.
+
+ ALPHA (input) DOUBLE PRECISION
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) DOUBLE PRECISION
+ Contains the off-diagonal element associated with the added
+ row.
+
+ U (input/output) DOUBLE PRECISION array, dimension(LDU,N)
+ On entry U contains the left singular vectors of two
+ submatrices in the two square blocks with corners at (1,1),
+ (NL, NL), and (NL+2, NL+2), (N,N).
+ On exit U contains the trailing (N-K) updated left singular
+ vectors (those which were deflated) in its last N-K columns.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= N.
+
+ Z (output) DOUBLE PRECISION array, dimension(N)
+ On exit Z contains the updating row vector in the secular
+ equation.
+
+ DSIGMA (output) DOUBLE PRECISION array, dimension (N)
+ Contains a copy of the diagonal elements (K-1 singular values
+ and one zero) in the secular equation.
+
+ U2 (output) DOUBLE PRECISION array, dimension(LDU2,N)
+ Contains a copy of the first K-1 left singular vectors which
+ will be used by DLASD3 in a matrix multiply (DGEMM) to solve
+ for the new left singular vectors. U2 is arranged into four
+ blocks. The first block contains a column with 1 at NL+1 and
+ zero everywhere else; the second block contains non-zero
+ entries only at and above NL; the third contains non-zero
+ entries only below NL+1; and the fourth is dense.
+
+ LDU2 (input) INTEGER
+ The leading dimension of the array U2. LDU2 >= N.
+
+ VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M)
+ On entry VT' contains the right singular vectors of two
+ submatrices in the two square blocks with corners at (1,1),
+ (NL+1, NL+1), and (NL+2, NL+2), (M,M).
+ On exit VT' contains the trailing (N-K) updated right singular
+ vectors (those which were deflated) in its last N-K columns.
+ In case SQRE =1, the last row of VT spans the right null
+ space.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= M.
+
+ VT2 (output) DOUBLE PRECISION array, dimension(LDVT2,N)
+ VT2' contains a copy of the first K right singular vectors
+ which will be used by DLASD3 in a matrix multiply (DGEMM) to
+ solve for the new right singular vectors. VT2 is arranged into
+ three blocks. The first block contains a row that corresponds
+ to the special 0 diagonal element in SIGMA; the second block
+ contains non-zeros only at and before NL +1; the third block
+ contains non-zeros only at and after NL +2.
+
+ LDVT2 (input) INTEGER
+ The leading dimension of the array VT2. LDVT2 >= M.
+
+ IDXP (workspace) INTEGER array, dimension(N)
+ This will contain the permutation used to place deflated
+ values of D at the end of the array. On output IDXP(2:K)
+ points to the nondeflated D-values and IDXP(K+1:N)
+ points to the deflated singular values.
+
+ IDX (workspace) INTEGER array, dimension(N)
+ This will contain the permutation used to sort the contents of
+ D into ascending order.
+
+ IDXC (output) INTEGER array, dimension(N)
+ This will contain the permutation used to arrange the columns
+ of the deflated U matrix into three groups: the first group
+ contains non-zero entries only at and above NL, the second
+ contains non-zero entries only below NL+2, and the third is
+ dense.
+
+ COLTYP (workspace/output) INTEGER array, dimension(N)
+ As workspace, this will contain a label which will indicate
+ which of the following types a column in the U2 matrix or a
+ row in the VT2 matrix is:
+ 1 : non-zero in the upper half only
+ 2 : non-zero in the lower half only
+ 3 : dense
+ 4 : deflated
+
+ On exit, it is an array of dimension 4, with COLTYP(I) being
+ the dimension of the I-th type columns.
+
+ IDXQ (input) INTEGER array, dimension(N)
+ This contains the permutation which separately sorts the two
+ sub-problems in D into ascending order. Note that entries in
+ the first hlaf of this permutation must first be moved one
+ position backward; and entries in the second half
+ must first have NL+1 added to their values.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ --dsigma;
+ u2_dim1 = *ldu2;
+ u2_offset = 1 + u2_dim1 * 1;
+ u2 -= u2_offset;
+ vt2_dim1 = *ldvt2;
+ vt2_offset = 1 + vt2_dim1 * 1;
+ vt2 -= vt2_offset;
+ --idxp;
+ --idx;
+ --idxc;
+ --idxq;
+ --coltyp;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if ((*sqre != 1 && *sqre != 0)) {
+ *info = -3;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*ldu < n) {
+ *info = -10;
+ } else if (*ldvt < m) {
+ *info = -12;
+ } else if (*ldu2 < n) {
+ *info = -15;
+ } else if (*ldvt2 < m) {
+ *info = -17;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD2", &i__1);
+ return 0;
+ }
+
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+
+/*
+ Generate the first part of the vector Z; and move the singular
+ values in the first part of D one position backward.
+*/
+
+ z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1];
+ z__[1] = z1;
+ for (i__ = *nl; i__ >= 1; --i__) {
+ z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1];
+ d__[i__ + 1] = d__[i__];
+ idxq[i__ + 1] = idxq[i__] + 1;
+/* L10: */
+ }
+
+/* Generate the second part of the vector Z. */
+
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1];
+/* L20: */
+ }
+
+/* Initialize some reference arrays. */
+
+ i__1 = nlp1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ coltyp[i__] = 1;
+/* L30: */
+ }
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ coltyp[i__] = 2;
+/* L40: */
+ }
+
+/* Sort the singular values into increasing order */
+
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ idxq[i__] += nlp1;
+/* L50: */
+ }
+
+/*
+ DSIGMA, IDXC, IDXC, and the first column of U2
+ are used as storage space.
+*/
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dsigma[i__] = d__[idxq[i__]];
+ u2[i__ + u2_dim1] = z__[idxq[i__]];
+ idxc[i__] = coltyp[idxq[i__]];
+/* L60: */
+ }
+
+ dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ idxi = idx[i__] + 1;
+ d__[i__] = dsigma[idxi];
+ z__[i__] = u2[idxi + u2_dim1];
+ coltyp[i__] = idxc[idxi];
+/* L70: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ eps = EPSILON;
+/* Computing MAX */
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ tol = max(d__1,d__2);
+/* Computing MAX */
+ d__2 = (d__1 = d__[n], abs(d__1));
+ tol = eps * 8. * max(d__2,tol);
+
+/*
+ There are 2 kinds of deflation -- first a value in the z-vector
+ is small, second two (or more) singular values are very close
+ together (their difference is small).
+
+ If the value in the z-vector is small, we simply permute the
+ array so that the corresponding singular value is moved to the
+ end.
+
+ If two values in the D-vector are close, we perform a two-sided
+ rotation designed to make one of the corresponding z-vector
+ entries zero, and then permute the array so that the deflated
+ singular value is moved to the end.
+
+ If there are multiple singular values then the problem deflates.
+ Here the number of equal singular values are found. As each equal
+ singular value is found, an elementary reflector is computed to
+ rotate the corresponding singular subspace so that the
+ corresponding components of Z are zero in this new basis.
+*/
+
+ *k = 1;
+ k2 = n + 1;
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ coltyp[j] = 4;
+ if (j == n) {
+ goto L120;
+ }
+ } else {
+ jprev = j;
+ goto L90;
+ }
+/* L80: */
+ }
+L90:
+ j = jprev;
+L100:
+ ++j;
+ if (j > n) {
+ goto L110;
+ }
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ coltyp[j] = 4;
+ } else {
+
+/* Check if singular values are close enough to allow deflation. */
+
+ if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ s = z__[jprev];
+ c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = dlapy2_(&c__, &s);
+ c__ /= tau;
+ s = -s / tau;
+ z__[j] = tau;
+ z__[jprev] = 0.;
+
+/*
+ Apply back the Givens rotation to the left and right
+ singular vector matrices.
+*/
+
+ idxjp = idxq[idx[jprev] + 1];
+ idxj = idxq[idx[j] + 1];
+ if (idxjp <= nlp1) {
+ --idxjp;
+ }
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ drot_(&n, &u[idxjp * u_dim1 + 1], &c__1, &u[idxj * u_dim1 + 1], &
+ c__1, &c__, &s);
+ drot_(&m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, &
+ c__, &s);
+ if (coltyp[j] != coltyp[jprev]) {
+ coltyp[j] = 3;
+ }
+ coltyp[jprev] = 4;
+ --k2;
+ idxp[k2] = jprev;
+ jprev = j;
+ } else {
+ ++(*k);
+ u2[*k + u2_dim1] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+ jprev = j;
+ }
+ }
+ goto L100;
+L110:
+
+/* Record the last singular value. */
+
+ ++(*k);
+ u2[*k + u2_dim1] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+
+L120:
+
+/*
+ Count up the total number of the various types of columns, then
+ form a permutation which positions the four column types into
+ four groups of uniform structure (although one or more of these
+ groups may be empty).
+*/
+
+ for (j = 1; j <= 4; ++j) {
+ ctot[j - 1] = 0;
+/* L130: */
+ }
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ ct = coltyp[j];
+ ++ctot[ct - 1];
+/* L140: */
+ }
+
+/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
+
+ psm[0] = 2;
+ psm[1] = ctot[0] + 2;
+ psm[2] = psm[1] + ctot[1];
+ psm[3] = psm[2] + ctot[2];
+
+/*
+ Fill out the IDXC array so that the permutation which it induces
+ will place all type-1 columns first, all type-2 columns next,
+ then all type-3's, and finally all type-4's, starting from the
+ second column. This applies similarly to the rows of VT.
+*/
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ ct = coltyp[jp];
+ idxc[psm[ct - 1]] = j;
+ ++psm[ct - 1];
+/* L150: */
+ }
+
+/*
+ Sort the singular values and corresponding singular vectors into
+ DSIGMA, U2, and VT2 respectively. The singular values/vectors
+ which were not deflated go into the first K slots of DSIGMA, U2,
+ and VT2 respectively, while those which were deflated go into the
+ last N - K slots, except that the first column/row will be treated
+ separately.
+*/
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ dsigma[j] = d__[jp];
+ idxj = idxq[idx[idxp[idxc[j]]] + 1];
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ dcopy_(&n, &u[idxj * u_dim1 + 1], &c__1, &u2[j * u2_dim1 + 1], &c__1);
+ dcopy_(&m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2);
+/* L160: */
+ }
+
+/* Determine DSIGMA(1), DSIGMA(2) and Z(1) */
+
+ dsigma[1] = 0.;
+ hlftol = tol / 2.;
+ if (abs(dsigma[2]) <= hlftol) {
+ dsigma[2] = hlftol;
+ }
+ if (m > n) {
+ z__[1] = dlapy2_(&z1, &z__[m]);
+ if (z__[1] <= tol) {
+ c__ = 1.;
+ s = 0.;
+ z__[1] = tol;
+ } else {
+ c__ = z1 / z__[1];
+ s = z__[m] / z__[1];
+ }
+ } else {
+ if (abs(z1) <= tol) {
+ z__[1] = tol;
+ } else {
+ z__[1] = z1;
+ }
+ }
+
+/* Move the rest of the updating row to Z. */
+
+ i__1 = *k - 1;
+ dcopy_(&i__1, &u2[u2_dim1 + 2], &c__1, &z__[2], &c__1);
+
+/*
+ Determine the first column of U2, the first row of VT2 and the
+ last row of VT.
+*/
+
+ dlaset_("A", &n, &c__1, &c_b29, &c_b29, &u2[u2_offset], ldu2);
+ u2[nlp1 + u2_dim1] = 1.;
+ if (m > n) {
+ i__1 = nlp1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1];
+ vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1];
+/* L170: */
+ }
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1];
+ vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1];
+/* L180: */
+ }
+ } else {
+ dcopy_(&m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2);
+ }
+ if (m > n) {
+ dcopy_(&m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2);
+ }
+
+/*
+ The deflated singular values and their corresponding vectors go
+ into the back of D, U, and V respectively.
+*/
+
+ if (n > *k) {
+ i__1 = n - *k;
+ dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = n - *k;
+ dlacpy_("A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1)
+ * u_dim1 + 1], ldu);
+ i__1 = n - *k;
+ dlacpy_("A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 +
+ vt_dim1], ldvt);
+ }
+
+/* Copy CTOT into COLTYP for referencing in DLASD3. */
+
+ for (j = 1; j <= 4; ++j) {
+ coltyp[j] = ctot[j - 1];
+/* L190: */
+ }
+
+ return 0;
+
+/* End of DLASD2 */
+
+} /* dlasd2_ */
+
+/* Subroutine */ int dlasd3_(integer *nl, integer *nr, integer *sqre, integer
+ *k, doublereal *d__, doublereal *q, integer *ldq, doublereal *dsigma,
+ doublereal *u, integer *ldu, doublereal *u2, integer *ldu2,
+ doublereal *vt, integer *ldvt, doublereal *vt2, integer *ldvt2,
+ integer *idxc, integer *ctot, doublereal *z__, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1,
+ vt_offset, vt2_dim1, vt2_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer i__, j, m, n, jc;
+ static doublereal rho;
+ static integer nlp1, nlp2, nrp1;
+ static doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer ctemp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer ktemp;
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlacpy_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASD3 finds all the square roots of the roots of the secular
+ equation, as defined by the values in D and Z. It makes the
+ appropriate calls to DLASD4 and then updates the singular
+ vectors by matrix multiplication.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ DLASD3 is called from DLASD1.
+
+ Arguments
+ =========
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ K (input) INTEGER
+ The size of the secular equation, 1 =< K = < N.
+
+ D (output) DOUBLE PRECISION array, dimension(K)
+ On exit the square roots of the roots of the secular equation,
+ in ascending order.
+
+ Q (workspace) DOUBLE PRECISION array,
+ dimension at least (LDQ,K).
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= K.
+
+ DSIGMA (input) DOUBLE PRECISION array, dimension(K)
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation.
+
+ U (input) DOUBLE PRECISION array, dimension (LDU, N)
+ The last N - K columns of this matrix contain the deflated
+ left singular vectors.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= N.
+
+ U2 (input) DOUBLE PRECISION array, dimension (LDU2, N)
+ The first K columns of this matrix contain the non-deflated
+ left singular vectors for the split problem.
+
+ LDU2 (input) INTEGER
+ The leading dimension of the array U2. LDU2 >= N.
+
+ VT (input) DOUBLE PRECISION array, dimension (LDVT, M)
+ The last M - K columns of VT' contain the deflated
+ right singular vectors.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= N.
+
+ VT2 (input) DOUBLE PRECISION array, dimension (LDVT2, N)
+ The first K columns of VT2' contain the non-deflated
+ right singular vectors for the split problem.
+
+ LDVT2 (input) INTEGER
+ The leading dimension of the array VT2. LDVT2 >= N.
+
+ IDXC (input) INTEGER array, dimension ( N )
+ The permutation used to arrange the columns of U (and rows of
+ VT) into three groups: the first group contains non-zero
+ entries only at and above (or before) NL +1; the second
+ contains non-zero entries only at and below (or after) NL+2;
+ and the third is dense. The first column of U and the row of
+ VT are treated separately, however.
+
+ The rows of the singular vectors found by DLASD4
+ must be likewise permuted before the matrix multiplies can
+ take place.
+
+ CTOT (input) INTEGER array, dimension ( 4 )
+ A count of the total number of the various types of columns
+ in U (or rows in VT), as described in IDXC. The fourth column
+ type is any column which has been deflated.
+
+ Z (input) DOUBLE PRECISION array, dimension (K)
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating row vector.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ --dsigma;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ u2_dim1 = *ldu2;
+ u2_offset = 1 + u2_dim1 * 1;
+ u2 -= u2_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ vt2_dim1 = *ldvt2;
+ vt2_offset = 1 + vt2_dim1 * 1;
+ vt2 -= vt2_offset;
+ --idxc;
+ --ctot;
+ --z__;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if ((*sqre != 1 && *sqre != 0)) {
+ *info = -3;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+
+ if (*k < 1 || *k > n) {
+ *info = -4;
+ } else if (*ldq < *k) {
+ *info = -7;
+ } else if (*ldu < n) {
+ *info = -10;
+ } else if (*ldu2 < n) {
+ *info = -12;
+ } else if (*ldvt < m) {
+ *info = -14;
+ } else if (*ldvt2 < m) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 1) {
+ d__[1] = abs(z__[1]);
+ dcopy_(&m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt);
+ if (z__[1] > 0.) {
+ dcopy_(&n, &u2[u2_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
+ } else {
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ u[i__ + u_dim1] = -u2[i__ + u2_dim1];
+/* L10: */
+ }
+ }
+ return 0;
+ }
+
+/*
+ Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DSIGMA(I) if it is 1; this makes the subsequent
+ subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DSIGMA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DSIGMA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
+/* L20: */
+ }
+
+/* Keep a copy of Z. */
+
+ dcopy_(k, &z__[1], &c__1, &q[q_offset], &c__1);
+
+/* Normalize Z. */
+
+ rho = dnrm2_(k, &z__[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &rho, &c_b15, k, &c__1, &z__[1], k, info);
+ rho *= rho;
+
+/* Find the new singular values. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dlasd4_(k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j],
+ &vt[j * vt_dim1 + 1], info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ return 0;
+ }
+/* L30: */
+ }
+
+/* Compute updated Z. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1];
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
+ i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]);
+/* L40: */
+ }
+ i__2 = *k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
+ i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]);
+/* L50: */
+ }
+ d__2 = sqrt((d__1 = z__[i__], abs(d__1)));
+ z__[i__] = d_sign(&d__2, &q[i__ + q_dim1]);
+/* L60: */
+ }
+
+/*
+ Compute left singular vectors of the modified diagonal matrix,
+ and store related information for the right singular vectors.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ *
+ vt_dim1 + 1];
+ u[i__ * u_dim1 + 1] = -1.;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__
+ * vt_dim1];
+ u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1];
+/* L70: */
+ }
+ temp = dnrm2_(k, &u[i__ * u_dim1 + 1], &c__1);
+ q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ jc = idxc[j];
+ q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp;
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Update the left singular vector matrix. */
+
+ if (*k == 2) {
+ dgemm_("N", "N", &n, k, k, &c_b15, &u2[u2_offset], ldu2, &q[q_offset],
+ ldq, &c_b29, &u[u_offset], ldu);
+ goto L100;
+ }
+ if (ctot[1] > 0) {
+ dgemm_("N", "N", nl, k, &ctot[1], &c_b15, &u2[((u2_dim1) << (1)) + 1],
+ ldu2, &q[q_dim1 + 2], ldq, &c_b29, &u[u_dim1 + 1], ldu);
+ if (ctot[3] > 0) {
+ ktemp = ctot[1] + 2 + ctot[2];
+ dgemm_("N", "N", nl, k, &ctot[3], &c_b15, &u2[ktemp * u2_dim1 + 1]
+ , ldu2, &q[ktemp + q_dim1], ldq, &c_b15, &u[u_dim1 + 1],
+ ldu);
+ }
+ } else if (ctot[3] > 0) {
+ ktemp = ctot[1] + 2 + ctot[2];
+ dgemm_("N", "N", nl, k, &ctot[3], &c_b15, &u2[ktemp * u2_dim1 + 1],
+ ldu2, &q[ktemp + q_dim1], ldq, &c_b29, &u[u_dim1 + 1], ldu);
+ } else {
+ dlacpy_("F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu);
+ }
+ dcopy_(k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu);
+ ktemp = ctot[1] + 2;
+ ctemp = ctot[2] + ctot[3];
+ dgemm_("N", "N", nr, k, &ctemp, &c_b15, &u2[nlp2 + ktemp * u2_dim1], ldu2,
+ &q[ktemp + q_dim1], ldq, &c_b29, &u[nlp2 + u_dim1], ldu);
+
+/* Generate the right singular vectors. */
+
+L100:
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = dnrm2_(k, &vt[i__ * vt_dim1 + 1], &c__1);
+ q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ jc = idxc[j];
+ q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp;
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* Update the right singular vector matrix. */
+
+ if (*k == 2) {
+ dgemm_("N", "N", k, &m, k, &c_b15, &q[q_offset], ldq, &vt2[vt2_offset]
+ , ldvt2, &c_b29, &vt[vt_offset], ldvt);
+ return 0;
+ }
+ ktemp = ctot[1] + 1;
+ dgemm_("N", "N", k, &nlp1, &ktemp, &c_b15, &q[q_dim1 + 1], ldq, &vt2[
+ vt2_dim1 + 1], ldvt2, &c_b29, &vt[vt_dim1 + 1], ldvt);
+ ktemp = ctot[1] + 2 + ctot[2];
+ if (ktemp <= *ldvt2) {
+ dgemm_("N", "N", k, &nlp1, &ctot[3], &c_b15, &q[ktemp * q_dim1 + 1],
+ ldq, &vt2[ktemp + vt2_dim1], ldvt2, &c_b15, &vt[vt_dim1 + 1],
+ ldvt);
+ }
+
+ ktemp = ctot[1] + 1;
+ nrp1 = *nr + *sqre;
+ if (ktemp > 1) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ q[i__ + ktemp * q_dim1] = q[i__ + q_dim1];
+/* L130: */
+ }
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1];
+/* L140: */
+ }
+ }
+ ctemp = ctot[2] + 1 + ctot[3];
+ dgemm_("N", "N", k, &nrp1, &ctemp, &c_b15, &q[ktemp * q_dim1 + 1], ldq, &
+ vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &c_b29, &vt[nlp2 * vt_dim1 +
+ 1], ldvt);
+
+ return 0;
+
+/* End of DLASD3 */
+
+} /* dlasd3_ */
+
+/* Subroutine */ int dlasd4_(integer *n, integer *i__, doublereal *d__,
+ doublereal *z__, doublereal *delta, doublereal *rho, doublereal *
+ sigma, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal a, b, c__;
+ static integer j;
+ static doublereal w, dd[3];
+ static integer ii;
+ static doublereal dw, zz[3];
+ static integer ip1;
+ static doublereal eta, phi, eps, tau, psi;
+ static integer iim1, iip1;
+ static doublereal dphi, dpsi;
+ static integer iter;
+ static doublereal temp, prew, sg2lb, sg2ub, temp1, temp2, dtiim, delsq,
+ dtiip;
+ static integer niter;
+ static doublereal dtisq;
+ static logical swtch;
+ static doublereal dtnsq;
+ extern /* Subroutine */ int dlaed6_(integer *, logical *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *)
+ , dlasd5_(integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ static doublereal delsq2, dtnsq1;
+ static logical swtch3;
+
+ static logical orgati;
+ static doublereal erretm, dtipsq, rhoinv;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ This subroutine computes the square root of the I-th updated
+ eigenvalue of a positive symmetric rank-one modification to
+ a positive diagonal matrix whose entries are given as the squares
+ of the corresponding entries in the array d, and that
+
+ 0 <= D(i) < D(j) for i < j
+
+ and that RHO > 0. This is arranged by the calling routine, and is
+ no loss in generality. The rank-one modified system is thus
+
+ diag( D ) * diag( D ) + RHO * Z * Z_transpose.
+
+ where we assume the Euclidean norm of Z is 1.
+
+ The method consists of approximating the rational functions in the
+ secular equation by simpler interpolating rational functions.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The length of all arrays.
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. 1 <= I <= N.
+
+ D (input) DOUBLE PRECISION array, dimension ( N )
+ The original eigenvalues. It is assumed that they are in
+ order, 0 <= D(I) < D(J) for I < J.
+
+ Z (input) DOUBLE PRECISION array, dimension ( N )
+ The components of the updating vector.
+
+ DELTA (output) DOUBLE PRECISION array, dimension ( N )
+ If N .ne. 1, DELTA contains (D(j) - sigma_I) in its j-th
+ component. If N = 1, then DELTA(1) = 1. The vector DELTA
+ contains the information necessary to construct the
+ (singular) eigenvectors.
+
+ RHO (input) DOUBLE PRECISION
+ The scalar in the symmetric updating formula.
+
+ SIGMA (output) DOUBLE PRECISION
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension ( N )
+ If N .ne. 1, WORK contains (D(j) + sigma_I) in its j-th
+ component. If N = 1, then WORK( 1 ) = 1.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = 1, the updating process failed.
+
+ Internal Parameters
+ ===================
+
+ Logical variable ORGATI (origin-at-i?) is used for distinguishing
+ whether D(i) or D(i+1) is treated as the origin.
+
+ ORGATI = .true. origin at i
+ ORGATI = .false. origin at i+1
+
+ Logical variable SWTCH3 (switch-for-3-poles?) is for noting
+ if we are working with THREE poles!
+
+ MAXIT is the maximum number of iterations allowed for each
+ eigenvalue.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Since this routine is called in an inner loop, we do no argument
+ checking.
+
+ Quick return for N=1 and 2.
+*/
+
+ /* Parameter adjustments */
+ --work;
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n == 1) {
+
+/* Presumably, I=1 upon entry */
+
+ *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]);
+ delta[1] = 1.;
+ work[1] = 1.;
+ return 0;
+ }
+ if (*n == 2) {
+ dlasd5_(i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1]);
+ return 0;
+ }
+
+/* Compute machine epsilon */
+
+ eps = EPSILON;
+ rhoinv = 1. / *rho;
+
+/* The case I = N */
+
+ if (*i__ == *n) {
+
+/* Initialize some basic variables */
+
+ ii = *n - 1;
+ niter = 1;
+
+/* Calculate initial guess */
+
+ temp = *rho / 2.;
+
+/*
+ If ||Z||_2 is not one, then TEMP should be set to
+ RHO * ||Z||_2^2 / TWO
+*/
+
+ temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp));
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*n] + temp1;
+ delta[j] = d__[j] - d__[*n] - temp1;
+/* L10: */
+ }
+
+ psi = 0.;
+ i__1 = *n - 2;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / (delta[j] * work[j]);
+/* L20: */
+ }
+
+ c__ = rhoinv + psi;
+ w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[*
+ n] / (delta[*n] * work[*n]);
+
+ if (w <= 0.) {
+ temp1 = sqrt(d__[*n] * d__[*n] + *rho);
+ temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[*
+ n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] *
+ z__[*n] / *rho;
+
+/*
+ The following TAU is to approximate
+ SIGMA_n^2 - D( N )*D( N )
+*/
+
+ if (c__ <= temp) {
+ tau = *rho;
+ } else {
+ delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
+ a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*
+ n];
+ b = z__[*n] * z__[*n] * delsq;
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+ }
+
+/*
+ It can be proved that
+ D(N)^2+RHO/2 <= SIGMA_n^2 < D(N)^2+TAU <= D(N)^2+RHO
+*/
+
+ } else {
+ delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
+ a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
+ b = z__[*n] * z__[*n] * delsq;
+
+/*
+ The following TAU is to approximate
+ SIGMA_n^2 - D( N )*D( N )
+*/
+
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+
+/*
+ It can be proved that
+ D(N)^2 < D(N)^2+TAU < SIGMA(N)^2 < D(N)^2+RHO/2
+*/
+
+ }
+
+/* The following ETA is to approximate SIGMA_n - D( N ) */
+
+ eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau));
+
+ *sigma = d__[*n] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - eta;
+ work[j] = d__[j] + d__[*i__] + eta;
+/* L30: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (delta[j] * work[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L40: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (delta[*n] * work[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ dtnsq1 = work[*n - 1] * delta[*n - 1];
+ dtnsq = work[*n] * delta[*n];
+ c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
+ a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi);
+ b = dtnsq * dtnsq1 * w;
+ if (c__ < 0.) {
+ c__ = abs(c__);
+ }
+ if (c__ == 0.) {
+ eta = *rho - *sigma * *sigma;
+ } else if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
+ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
+ );
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = eta - dtnsq;
+ if (temp > *rho) {
+ eta = *rho + dtnsq;
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(eta + *sigma * *sigma);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+ work[j] += eta;
+/* L50: */
+ }
+
+ *sigma += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L60: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (work[*n] * delta[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ dtnsq1 = work[*n - 1] * delta[*n - 1];
+ dtnsq = work[*n] * delta[*n];
+ c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
+ a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi);
+ b = dtnsq1 * dtnsq * w;
+ if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = eta - dtnsq;
+ if (temp <= 0.) {
+ eta /= 2.;
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(eta + *sigma * *sigma);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+ work[j] += eta;
+/* L70: */
+ }
+
+ *sigma += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L80: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (work[*n] * delta[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+/* L90: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ goto L240;
+
+/* End for the case I = N */
+
+ } else {
+
+/* The case for I < N */
+
+ niter = 1;
+ ip1 = *i__ + 1;
+
+/* Calculate initial guess */
+
+ delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]);
+ delsq2 = delsq / 2.;
+ temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2));
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*i__] + temp;
+ delta[j] = d__[j] - d__[*i__] - temp;
+/* L100: */
+ }
+
+ psi = 0.;
+ i__1 = *i__ - 1;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / (work[j] * delta[j]);
+/* L110: */
+ }
+
+ phi = 0.;
+ i__1 = *i__ + 2;
+ for (j = *n; j >= i__1; --j) {
+ phi += z__[j] * z__[j] / (work[j] * delta[j]);
+/* L120: */
+ }
+ c__ = rhoinv + psi + phi;
+ w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[
+ ip1] * z__[ip1] / (work[ip1] * delta[ip1]);
+
+ if (w > 0.) {
+
+/*
+ d(i)^2 < the ith sigma^2 < (d(i)^2+d(i+1)^2)/2
+
+ We choose d(i) as origin.
+*/
+
+ orgati = TRUE_;
+ sg2lb = 0.;
+ sg2ub = delsq2;
+ a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
+ b = z__[*i__] * z__[*i__] * delsq;
+ if (a > 0.) {
+ tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ }
+
+/*
+ TAU now is an estimation of SIGMA^2 - D( I )^2. The
+ following, however, is the corresponding estimation of
+ SIGMA - D( I ).
+*/
+
+ eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau));
+ } else {
+
+/*
+ (d(i)^2+d(i+1)^2)/2 <= the ith sigma^2 < d(i+1)^2/2
+
+ We choose d(i+1) as origin.
+*/
+
+ orgati = FALSE_;
+ sg2lb = -delsq2;
+ sg2ub = 0.;
+ a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
+ b = z__[ip1] * z__[ip1] * delsq;
+ if (a < 0.) {
+ tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) /
+ (c__ * 2.);
+ }
+
+/*
+ TAU now is an estimation of SIGMA^2 - D( IP1 )^2. The
+ following, however, is the corresponding estimation of
+ SIGMA - D( IP1 ).
+*/
+
+ eta = tau / (d__[ip1] + sqrt((d__1 = d__[ip1] * d__[ip1] + tau,
+ abs(d__1))));
+ }
+
+ if (orgati) {
+ ii = *i__;
+ *sigma = d__[*i__] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*i__] + eta;
+ delta[j] = d__[j] - d__[*i__] - eta;
+/* L130: */
+ }
+ } else {
+ ii = *i__ + 1;
+ *sigma = d__[ip1] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[ip1] + eta;
+ delta[j] = d__[j] - d__[ip1] - eta;
+/* L140: */
+ }
+ }
+ iim1 = ii - 1;
+ iip1 = ii + 1;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L150: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L160: */
+ }
+
+ w = rhoinv + phi + psi;
+
+/*
+ W is the value of the secular function with
+ its ii-th element removed.
+*/
+
+ swtch3 = FALSE_;
+ if (orgati) {
+ if (w < 0.) {
+ swtch3 = TRUE_;
+ }
+ } else {
+ if (w > 0.) {
+ swtch3 = TRUE_;
+ }
+ }
+ if (ii == 1 || ii == *n) {
+ swtch3 = FALSE_;
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w += temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
+ abs(tau) * dw;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+ if (w <= 0.) {
+ sg2lb = max(sg2lb,tau);
+ } else {
+ sg2ub = min(sg2ub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ if (! swtch3) {
+ dtipsq = work[ip1] * delta[ip1];
+ dtisq = work[*i__] * delta[*i__];
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / dtisq;
+ c__ = w - dtipsq * dw + delsq * (d__1 * d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / dtipsq;
+ c__ = w - dtisq * dw - delsq * (d__1 * d__1);
+ }
+ a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
+ b = dtipsq * dtisq * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi +
+ dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi +
+ dphi);
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ dtiim = work[iim1] * delta[iim1];
+ dtiip = work[iip1] * delta[iip1];
+ temp = rhoinv + psi + phi;
+ if (orgati) {
+ temp1 = z__[iim1] / dtiim;
+ temp1 *= temp1;
+ c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) *
+ (d__[iim1] + d__[iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ if (dpsi < temp1) {
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
+ }
+ } else {
+ temp1 = z__[iip1] / dtiip;
+ temp1 *= temp1;
+ c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) *
+ (d__[iim1] + d__[iip1]) * temp1;
+ if (dphi < temp1) {
+ zz[0] = dtiim * dtiim * dpsi;
+ } else {
+ zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
+ }
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ zz[1] = z__[ii] * z__[ii];
+ dd[0] = dtiim;
+ dd[1] = delta[ii] * work[ii];
+ dd[2] = dtiip;
+ dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L240;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ if (orgati) {
+ temp1 = work[*i__] * delta[*i__];
+ temp = eta - temp1;
+ } else {
+ temp1 = work[ip1] * delta[ip1];
+ temp = eta - temp1;
+ }
+ if (temp > sg2ub || temp < sg2lb) {
+ if (w < 0.) {
+ eta = (sg2ub - tau) / 2.;
+ } else {
+ eta = (sg2lb - tau) / 2.;
+ }
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(*sigma * *sigma + eta);
+
+ prew = w;
+
+ *sigma += eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += eta;
+ delta[j] -= eta;
+/* L170: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L180: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L190: */
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
+ abs(tau) * dw;
+
+ if (w <= 0.) {
+ sg2lb = max(sg2lb,tau);
+ } else {
+ sg2ub = min(sg2ub,tau);
+ }
+
+ swtch = FALSE_;
+ if (orgati) {
+ if (-w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ } else {
+ if (w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ }
+
+/* Main loop to update the values of the array DELTA and WORK */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ if (! swtch3) {
+ dtipsq = work[ip1] * delta[ip1];
+ dtisq = work[*i__] * delta[*i__];
+ if (! swtch) {
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / dtisq;
+ c__ = w - dtipsq * dw + delsq * (d__1 * d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / dtipsq;
+ c__ = w - dtisq * dw - delsq * (d__1 * d__1);
+ }
+ } else {
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ if (orgati) {
+ dpsi += temp * temp;
+ } else {
+ dphi += temp * temp;
+ }
+ c__ = w - dtisq * dpsi - dtipsq * dphi;
+ }
+ a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
+ b = dtipsq * dtisq * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (! swtch) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + dtipsq * dtipsq *
+ (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + dtisq * dtisq * (
+ dpsi + dphi);
+ }
+ } else {
+ a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi;
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))))
+ / (c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__,
+ abs(d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ dtiim = work[iim1] * delta[iim1];
+ dtiip = work[iip1] * delta[iip1];
+ temp = rhoinv + psi + phi;
+ if (swtch) {
+ c__ = temp - dtiim * dpsi - dtiip * dphi;
+ zz[0] = dtiim * dtiim * dpsi;
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ if (orgati) {
+ temp1 = z__[iim1] / dtiim;
+ temp1 *= temp1;
+ temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[
+ iip1]) * temp1;
+ c__ = temp - dtiip * (dpsi + dphi) - temp2;
+ zz[0] = z__[iim1] * z__[iim1];
+ if (dpsi < temp1) {
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
+ }
+ } else {
+ temp1 = z__[iip1] / dtiip;
+ temp1 *= temp1;
+ temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[
+ iip1]) * temp1;
+ c__ = temp - dtiim * (dpsi + dphi) - temp2;
+ if (dphi < temp1) {
+ zz[0] = dtiim * dtiim * dpsi;
+ } else {
+ zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
+ }
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ }
+ dd[0] = dtiim;
+ dd[1] = delta[ii] * work[ii];
+ dd[2] = dtiip;
+ dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L240;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ if (orgati) {
+ temp1 = work[*i__] * delta[*i__];
+ temp = eta - temp1;
+ } else {
+ temp1 = work[ip1] * delta[ip1];
+ temp = eta - temp1;
+ }
+ if (temp > sg2ub || temp < sg2lb) {
+ if (w < 0.) {
+ eta = (sg2ub - tau) / 2.;
+ } else {
+ eta = (sg2lb - tau) / 2.;
+ }
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(*sigma * *sigma + eta);
+
+ *sigma += eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += eta;
+ delta[j] -= eta;
+/* L200: */
+ }
+
+ prew = w;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L210: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L220: */
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3.
+ + abs(tau) * dw;
+ if ((w * prew > 0. && abs(w) > abs(prew) / 10.)) {
+ swtch = ! swtch;
+ }
+
+ if (w <= 0.) {
+ sg2lb = max(sg2lb,tau);
+ } else {
+ sg2ub = min(sg2ub,tau);
+ }
+
+/* L230: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+
+ }
+
+L240:
+ return 0;
+
+/* End of DLASD4 */
+
+} /* dlasd4_ */
+
+/* Subroutine */ int dlasd5_(integer *i__, doublereal *d__, doublereal *z__,
+ doublereal *delta, doublereal *rho, doublereal *dsigma, doublereal *
+ work)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal b, c__, w, del, tau, delsq;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ This subroutine computes the square root of the I-th eigenvalue
+ of a positive symmetric rank-one modification of a 2-by-2 diagonal
+ matrix
+
+ diag( D ) * diag( D ) + RHO * Z * transpose(Z) .
+
+ The diagonal entries in the array D are assumed to satisfy
+
+ 0 <= D(i) < D(j) for i < j .
+
+ We also assume RHO > 0 and that the Euclidean norm of the vector
+ Z is one.
+
+ Arguments
+ =========
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. I = 1 or I = 2.
+
+ D (input) DOUBLE PRECISION array, dimension ( 2 )
+ The original eigenvalues. We assume 0 <= D(1) < D(2).
+
+ Z (input) DOUBLE PRECISION array, dimension ( 2 )
+ The components of the updating vector.
+
+ DELTA (output) DOUBLE PRECISION array, dimension ( 2 )
+ Contains (D(j) - lambda_I) in its j-th component.
+ The vector DELTA contains the information necessary
+ to construct the eigenvectors.
+
+ RHO (input) DOUBLE PRECISION
+ The scalar in the symmetric updating formula.
+
+ DSIGMA (output) DOUBLE PRECISION
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension ( 2 )
+ WORK contains (D(j) + sigma_I) in its j-th component.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --work;
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ del = d__[2] - d__[1];
+ delsq = del * (d__[2] + d__[1]);
+ if (*i__ == 1) {
+ w = *rho * 4. * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.) - z__[1] *
+ z__[1] / (d__[1] * 3. + d__[2])) / del + 1.;
+ if (w > 0.) {
+ b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[1] * z__[1] * delsq;
+
+/*
+ B > ZERO, always
+
+ The following TAU is DSIGMA * DSIGMA - D( 1 ) * D( 1 )
+*/
+
+ tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1))));
+
+/* The following TAU is DSIGMA - D( 1 ) */
+
+ tau /= d__[1] + sqrt(d__[1] * d__[1] + tau);
+ *dsigma = d__[1] + tau;
+ delta[1] = -tau;
+ delta[2] = del - tau;
+ work[1] = d__[1] * 2. + tau;
+ work[2] = d__[1] + tau + d__[2];
+/*
+ DELTA( 1 ) = -Z( 1 ) / TAU
+ DELTA( 2 ) = Z( 2 ) / ( DEL-TAU )
+*/
+ } else {
+ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * delsq;
+
+/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
+
+ if (b > 0.) {
+ tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.));
+ } else {
+ tau = (b - sqrt(b * b + c__ * 4.)) / 2.;
+ }
+
+/* The following TAU is DSIGMA - D( 2 ) */
+
+ tau /= d__[2] + sqrt((d__1 = d__[2] * d__[2] + tau, abs(d__1)));
+ *dsigma = d__[2] + tau;
+ delta[1] = -(del + tau);
+ delta[2] = -tau;
+ work[1] = d__[1] + tau + d__[2];
+ work[2] = d__[2] * 2. + tau;
+/*
+ DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU )
+ DELTA( 2 ) = -Z( 2 ) / TAU
+*/
+ }
+/*
+ TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) )
+ DELTA( 1 ) = DELTA( 1 ) / TEMP
+ DELTA( 2 ) = DELTA( 2 ) / TEMP
+*/
+ } else {
+
+/* Now I=2 */
+
+ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * delsq;
+
+/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
+
+ if (b > 0.) {
+ tau = (b + sqrt(b * b + c__ * 4.)) / 2.;
+ } else {
+ tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.));
+ }
+
+/* The following TAU is DSIGMA - D( 2 ) */
+
+ tau /= d__[2] + sqrt(d__[2] * d__[2] + tau);
+ *dsigma = d__[2] + tau;
+ delta[1] = -(del + tau);
+ delta[2] = -tau;
+ work[1] = d__[1] + tau + d__[2];
+ work[2] = d__[2] * 2. + tau;
+/*
+ DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU )
+ DELTA( 2 ) = -Z( 2 ) / TAU
+ TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) )
+ DELTA( 1 ) = DELTA( 1 ) / TEMP
+ DELTA( 2 ) = DELTA( 2 ) / TEMP
+*/
+ }
+ return 0;
+
+/* End of DLASD5 */
+
+} /* dlasd5_ */
+
+/* Subroutine */ int dlasd6_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, doublereal *d__, doublereal *vf, doublereal *vl,
+ doublereal *alpha, doublereal *beta, integer *idxq, integer *perm,
+ integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum,
+ integer *ldgnum, doublereal *poles, doublereal *difl, doublereal *
+ difr, doublereal *z__, integer *k, doublereal *c__, doublereal *s,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static integer i__, m, n, n1, n2, iw, idx, idxc, idxp, ivfw, ivlw;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlasd7_(integer *, integer *, integer *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), dlasd8_(
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlamrg_(integer *, integer *,
+ doublereal *, integer *, integer *, integer *);
+ static integer isigma;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal orgnrm;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASD6 computes the SVD of an updated upper bidiagonal matrix B
+ obtained by merging two smaller ones by appending a row. This
+ routine is used only for the problem which requires all singular
+ values and optionally singular vector matrices in factored form.
+ B is an N-by-M matrix with N = NL + NR + 1 and M = N + SQRE.
+ A related subroutine, DLASD1, handles the case in which all singular
+ values and singular vectors of the bidiagonal matrix are desired.
+
+ DLASD6 computes the SVD as follows:
+
+ ( D1(in) 0 0 0 )
+ B = U(in) * ( Z1' a Z2' b ) * VT(in)
+ ( 0 0 D2(in) 0 )
+
+ = U(out) * ( D(out) 0) * VT(out)
+
+ where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M
+ with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros
+ elsewhere; and the entry b is empty if SQRE = 0.
+
+ The singular values of B can be computed using D1, D2, the first
+ components of all the right singular vectors of the lower block, and
+ the last components of all the right singular vectors of the upper
+ block. These components are stored and updated in VF and VL,
+ respectively, in DLASD6. Hence U and VT are not explicitly
+ referenced.
+
+ The singular values are stored in D. The algorithm consists of two
+ stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple singular values or if there is a zero
+ in the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine DLASD7.
+
+ The second stage consists of calculating the updated
+ singular values. This is done by finding the roots of the
+ secular equation via the routine DLASD4 (as called by DLASD8).
+ This routine also updates VF and VL and computes the distances
+ between the updated singular values and the old singular
+ values.
+
+ DLASD6 is called from DLASDA.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form:
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors in factored form as well.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ D (input/output) DOUBLE PRECISION array, dimension ( NL+NR+1 ).
+ On entry D(1:NL,1:NL) contains the singular values of the
+ upper block, and D(NL+2:N) contains the singular values
+ of the lower block. On exit D(1:N) contains the singular
+ values of the modified matrix.
+
+ VF (input/output) DOUBLE PRECISION array, dimension ( M )
+ On entry, VF(1:NL+1) contains the first components of all
+ right singular vectors of the upper block; and VF(NL+2:M)
+ contains the first components of all right singular vectors
+ of the lower block. On exit, VF contains the first components
+ of all right singular vectors of the bidiagonal matrix.
+
+ VL (input/output) DOUBLE PRECISION array, dimension ( M )
+ On entry, VL(1:NL+1) contains the last components of all
+ right singular vectors of the upper block; and VL(NL+2:M)
+ contains the last components of all right singular vectors of
+ the lower block. On exit, VL contains the last components of
+ all right singular vectors of the bidiagonal matrix.
+
+ ALPHA (input) DOUBLE PRECISION
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) DOUBLE PRECISION
+ Contains the off-diagonal element associated with the added
+ row.
+
+ IDXQ (output) INTEGER array, dimension ( N )
+ This contains the permutation which will reintegrate the
+ subproblem just solved back into sorted order, i.e.
+ D( IDXQ( I = 1, N ) ) will be in ascending order.
+
+ PERM (output) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) to be applied
+ to each block. Not referenced if ICOMPQ = 0.
+
+ GIVPTR (output) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem. Not referenced if ICOMPQ = 0.
+
+ GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGCOL (input) INTEGER
+ leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value to be used in the
+ corresponding Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of GIVNUM and POLES, must be at least N.
+
+ POLES (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ On exit, POLES(1,*) is an array containing the new singular
+ values obtained from solving the secular equation, and
+ POLES(2,*) is an array containing the poles in the secular
+ equation. Not referenced if ICOMPQ = 0.
+
+ DIFL (output) DOUBLE PRECISION array, dimension ( N )
+ On exit, DIFL(I) is the distance between I-th updated
+ (undeflated) singular value and the I-th (undeflated) old
+ singular value.
+
+ DIFR (output) DOUBLE PRECISION array,
+ dimension ( LDGNUM, 2 ) if ICOMPQ = 1 and
+ dimension ( N ) if ICOMPQ = 0.
+ On exit, DIFR(I, 1) is the distance between I-th updated
+ (undeflated) singular value and the I+1-th (undeflated) old
+ singular value.
+
+ If ICOMPQ = 1, DIFR(1:K,2) is an array containing the
+ normalizing factors for the right singular vector matrix.
+
+ See DLASD8 for details on DIFL and DIFR.
+
+ Z (output) DOUBLE PRECISION array, dimension ( M )
+ The first elements of this array contain the components
+ of the deflation-adjusted updating row vector.
+
+ K (output) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ C (output) DOUBLE PRECISION
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (output) DOUBLE PRECISION
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension ( 4 * M )
+
+ IWORK (workspace) INTEGER array, dimension ( 3 * N )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --vf;
+ --vl;
+ --idxq;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1 * 1;
+ givcol -= givcol_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1 * 1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1 * 1;
+ givnum -= givnum_offset;
+ --difl;
+ --difr;
+ --z__;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ } else if (*ldgcol < n) {
+ *info = -14;
+ } else if (*ldgnum < n) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD6", &i__1);
+ return 0;
+ }
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in DLASD7 and DLASD8.
+*/
+
+ isigma = 1;
+ iw = isigma + n;
+ ivfw = iw + m;
+ ivlw = ivfw + m;
+
+ idx = 1;
+ idxc = idx + n;
+ idxp = idxc + n;
+
+/*
+ Scale.
+
+ Computing MAX
+*/
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ orgnrm = max(d__1,d__2);
+ d__[*nl + 1] = 0.;
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) > orgnrm) {
+ orgnrm = (d__1 = d__[i__], abs(d__1));
+ }
+/* L10: */
+ }
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &n, &c__1, &d__[1], &n, info);
+ *alpha /= orgnrm;
+ *beta /= orgnrm;
+
+/* Sort and Deflate singular values. */
+
+ dlasd7_(icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], &
+ work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], &
+ iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[
+ givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s,
+ info);
+
+/* Solve Secular Equation, compute DIFL, DIFR, and update VF, VL. */
+
+ dlasd8_(icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1],
+ ldgnum, &work[isigma], &work[iw], info);
+
+/* Save the poles if ICOMPQ = 1. */
+
+ if (*icompq == 1) {
+ dcopy_(k, &d__[1], &c__1, &poles[poles_dim1 + 1], &c__1);
+ dcopy_(k, &work[isigma], &c__1, &poles[((poles_dim1) << (1)) + 1], &
+ c__1);
+ }
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &n, &c__1, &d__[1], &n, info);
+
+/* Prepare the IDXQ sorting permutation. */
+
+ n1 = *k;
+ n2 = n - *k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
+
+ return 0;
+
+/* End of DLASD6 */
+
+} /* dlasd6_ */
+
+/* Subroutine */ int dlasd7_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *k, doublereal *d__, doublereal *z__,
+ doublereal *zw, doublereal *vf, doublereal *vfw, doublereal *vl,
+ doublereal *vlw, doublereal *alpha, doublereal *beta, doublereal *
+ dsigma, integer *idx, integer *idxp, integer *idxq, integer *perm,
+ integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum,
+ integer *ldgnum, doublereal *c__, doublereal *s, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static integer i__, j, m, n, k2;
+ static doublereal z1;
+ static integer jp;
+ static doublereal eps, tau, tol;
+ static integer nlp1, nlp2, idxi, idxj;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer idxjp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer jprev;
+
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+ static doublereal hlftol;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASD7 merges the two sets of singular values together into a single
+ sorted set. Then it tries to deflate the size of the problem. There
+ are two ways in which deflation can occur: when two or more singular
+ values are close together or if there is a tiny entry in the Z
+ vector. For each such occurrence the order of the related
+ secular equation problem is reduced by one.
+
+ DLASD7 is called from DLASD6.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed
+ in compact form, as follows:
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors of upper
+ bidiagonal matrix in compact form.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has
+ N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ K (output) INTEGER
+ Contains the dimension of the non-deflated matrix, this is
+ the order of the related secular equation. 1 <= K <=N.
+
+ D (input/output) DOUBLE PRECISION array, dimension ( N )
+ On entry D contains the singular values of the two submatrices
+ to be combined. On exit D contains the trailing (N-K) updated
+ singular values (those which were deflated) sorted into
+ increasing order.
+
+ Z (output) DOUBLE PRECISION array, dimension ( M )
+ On exit Z contains the updating row vector in the secular
+ equation.
+
+ ZW (workspace) DOUBLE PRECISION array, dimension ( M )
+ Workspace for Z.
+
+ VF (input/output) DOUBLE PRECISION array, dimension ( M )
+ On entry, VF(1:NL+1) contains the first components of all
+ right singular vectors of the upper block; and VF(NL+2:M)
+ contains the first components of all right singular vectors
+ of the lower block. On exit, VF contains the first components
+ of all right singular vectors of the bidiagonal matrix.
+
+ VFW (workspace) DOUBLE PRECISION array, dimension ( M )
+ Workspace for VF.
+
+ VL (input/output) DOUBLE PRECISION array, dimension ( M )
+ On entry, VL(1:NL+1) contains the last components of all
+ right singular vectors of the upper block; and VL(NL+2:M)
+ contains the last components of all right singular vectors
+ of the lower block. On exit, VL contains the last components
+ of all right singular vectors of the bidiagonal matrix.
+
+ VLW (workspace) DOUBLE PRECISION array, dimension ( M )
+ Workspace for VL.
+
+ ALPHA (input) DOUBLE PRECISION
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) DOUBLE PRECISION
+ Contains the off-diagonal element associated with the added
+ row.
+
+ DSIGMA (output) DOUBLE PRECISION array, dimension ( N )
+ Contains a copy of the diagonal elements (K-1 singular values
+ and one zero) in the secular equation.
+
+ IDX (workspace) INTEGER array, dimension ( N )
+ This will contain the permutation used to sort the contents of
+ D into ascending order.
+
+ IDXP (workspace) INTEGER array, dimension ( N )
+ This will contain the permutation used to place deflated
+ values of D at the end of the array. On output IDXP(2:K)
+ points to the nondeflated D-values and IDXP(K+1:N)
+ points to the deflated singular values.
+
+ IDXQ (input) INTEGER array, dimension ( N )
+ This contains the permutation which separately sorts the two
+ sub-problems in D into ascending order. Note that entries in
+ the first half of this permutation must first be moved one
+ position backward; and entries in the second half
+ must first have NL+1 added to their values.
+
+ PERM (output) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) to be applied
+ to each singular block. Not referenced if ICOMPQ = 0.
+
+ GIVPTR (output) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem. Not referenced if ICOMPQ = 0.
+
+ GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGCOL (input) INTEGER
+ The leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value to be used in the
+ corresponding Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of GIVNUM, must be at least N.
+
+ C (output) DOUBLE PRECISION
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (output) DOUBLE PRECISION
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ --zw;
+ --vf;
+ --vfw;
+ --vl;
+ --vlw;
+ --dsigma;
+ --idx;
+ --idxp;
+ --idxq;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1 * 1;
+ givcol -= givcol_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1 * 1;
+ givnum -= givnum_offset;
+
+ /* Function Body */
+ *info = 0;
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ } else if (*ldgcol < n) {
+ *info = -22;
+ } else if (*ldgnum < n) {
+ *info = -24;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD7", &i__1);
+ return 0;
+ }
+
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+ if (*icompq == 1) {
+ *givptr = 0;
+ }
+
+/*
+ Generate the first part of the vector Z and move the singular
+ values in the first part of D one position backward.
+*/
+
+ z1 = *alpha * vl[nlp1];
+ vl[nlp1] = 0.;
+ tau = vf[nlp1];
+ for (i__ = *nl; i__ >= 1; --i__) {
+ z__[i__ + 1] = *alpha * vl[i__];
+ vl[i__] = 0.;
+ vf[i__ + 1] = vf[i__];
+ d__[i__ + 1] = d__[i__];
+ idxq[i__ + 1] = idxq[i__] + 1;
+/* L10: */
+ }
+ vf[1] = tau;
+
+/* Generate the second part of the vector Z. */
+
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ z__[i__] = *beta * vf[i__];
+ vf[i__] = 0.;
+/* L20: */
+ }
+
+/* Sort the singular values into increasing order */
+
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ idxq[i__] += nlp1;
+/* L30: */
+ }
+
+/* DSIGMA, IDXC, IDXC, and ZW are used as storage space. */
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dsigma[i__] = d__[idxq[i__]];
+ zw[i__] = z__[idxq[i__]];
+ vfw[i__] = vf[idxq[i__]];
+ vlw[i__] = vl[idxq[i__]];
+/* L40: */
+ }
+
+ dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ idxi = idx[i__] + 1;
+ d__[i__] = dsigma[idxi];
+ z__[i__] = zw[idxi];
+ vf[i__] = vfw[idxi];
+ vl[i__] = vlw[idxi];
+/* L50: */
+ }
+
+/* Calculate the allowable deflation tolerence */
+
+ eps = EPSILON;
+/* Computing MAX */
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ tol = max(d__1,d__2);
+/* Computing MAX */
+ d__2 = (d__1 = d__[n], abs(d__1));
+ tol = eps * 64. * max(d__2,tol);
+
+/*
+ There are 2 kinds of deflation -- first a value in the z-vector
+ is small, second two (or more) singular values are very close
+ together (their difference is small).
+
+ If the value in the z-vector is small, we simply permute the
+ array so that the corresponding singular value is moved to the
+ end.
+
+ If two values in the D-vector are close, we perform a two-sided
+ rotation designed to make one of the corresponding z-vector
+ entries zero, and then permute the array so that the deflated
+ singular value is moved to the end.
+
+ If there are multiple singular values then the problem deflates.
+ Here the number of equal singular values are found. As each equal
+ singular value is found, an elementary reflector is computed to
+ rotate the corresponding singular subspace so that the
+ corresponding components of Z are zero in this new basis.
+*/
+
+ *k = 1;
+ k2 = n + 1;
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ if (j == n) {
+ goto L100;
+ }
+ } else {
+ jprev = j;
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ j = jprev;
+L80:
+ ++j;
+ if (j > n) {
+ goto L90;
+ }
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ } else {
+
+/* Check if singular values are close enough to allow deflation. */
+
+ if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ *s = z__[jprev];
+ *c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = dlapy2_(c__, s);
+ z__[j] = tau;
+ z__[jprev] = 0.;
+ *c__ /= tau;
+ *s = -(*s) / tau;
+
+/* Record the appropriate Givens rotation */
+
+ if (*icompq == 1) {
+ ++(*givptr);
+ idxjp = idxq[idx[jprev] + 1];
+ idxj = idxq[idx[j] + 1];
+ if (idxjp <= nlp1) {
+ --idxjp;
+ }
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ givcol[*givptr + ((givcol_dim1) << (1))] = idxjp;
+ givcol[*givptr + givcol_dim1] = idxj;
+ givnum[*givptr + ((givnum_dim1) << (1))] = *c__;
+ givnum[*givptr + givnum_dim1] = *s;
+ }
+ drot_(&c__1, &vf[jprev], &c__1, &vf[j], &c__1, c__, s);
+ drot_(&c__1, &vl[jprev], &c__1, &vl[j], &c__1, c__, s);
+ --k2;
+ idxp[k2] = jprev;
+ jprev = j;
+ } else {
+ ++(*k);
+ zw[*k] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+ jprev = j;
+ }
+ }
+ goto L80;
+L90:
+
+/* Record the last singular value. */
+
+ ++(*k);
+ zw[*k] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+
+L100:
+
+/*
+ Sort the singular values into DSIGMA. The singular values which
+ were not deflated go into the first K slots of DSIGMA, except
+ that DSIGMA(1) is treated separately.
+*/
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ dsigma[j] = d__[jp];
+ vfw[j] = vf[jp];
+ vlw[j] = vl[jp];
+/* L110: */
+ }
+ if (*icompq == 1) {
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ perm[j] = idxq[idx[jp] + 1];
+ if (perm[j] <= nlp1) {
+ --perm[j];
+ }
+/* L120: */
+ }
+ }
+
+/*
+ The deflated singular values go back into the last N - K slots of
+ D.
+*/
+
+ i__1 = n - *k;
+ dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
+
+/*
+ Determine DSIGMA(1), DSIGMA(2), Z(1), VF(1), VL(1), VF(M), and
+ VL(M).
+*/
+
+ dsigma[1] = 0.;
+ hlftol = tol / 2.;
+ if (abs(dsigma[2]) <= hlftol) {
+ dsigma[2] = hlftol;
+ }
+ if (m > n) {
+ z__[1] = dlapy2_(&z1, &z__[m]);
+ if (z__[1] <= tol) {
+ *c__ = 1.;
+ *s = 0.;
+ z__[1] = tol;
+ } else {
+ *c__ = z1 / z__[1];
+ *s = -z__[m] / z__[1];
+ }
+ drot_(&c__1, &vf[m], &c__1, &vf[1], &c__1, c__, s);
+ drot_(&c__1, &vl[m], &c__1, &vl[1], &c__1, c__, s);
+ } else {
+ if (abs(z1) <= tol) {
+ z__[1] = tol;
+ } else {
+ z__[1] = z1;
+ }
+ }
+
+/* Restore Z, VF, and VL. */
+
+ i__1 = *k - 1;
+ dcopy_(&i__1, &zw[2], &c__1, &z__[2], &c__1);
+ i__1 = n - 1;
+ dcopy_(&i__1, &vfw[2], &c__1, &vf[2], &c__1);
+ i__1 = n - 1;
+ dcopy_(&i__1, &vlw[2], &c__1, &vl[2], &c__1);
+
+ return 0;
+
+/* End of DLASD7 */
+
+} /* dlasd7_ */
+
+/* Subroutine */ int dlasd8_(integer *icompq, integer *k, doublereal *d__,
+ doublereal *z__, doublereal *vf, doublereal *vl, doublereal *difl,
+ doublereal *difr, integer *lddifr, doublereal *dsigma, doublereal *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer difr_dim1, difr_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal dj, rho;
+ static integer iwk1, iwk2, iwk3;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ static integer iwk2i, iwk3i;
+ static doublereal diflj, difrj, dsigj;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlaset_(char *, integer *, integer
+ *, doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ static doublereal dsigjp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASD8 finds the square roots of the roots of the secular equation,
+ as defined by the values in DSIGMA and Z. It makes the appropriate
+ calls to DLASD4, and stores, for each element in D, the distance
+ to its two nearest poles (elements in DSIGMA). It also updates
+ the arrays VF and VL, the first and last components of all the
+ right singular vectors of the original bidiagonal matrix.
+
+ DLASD8 is called from DLASD6.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form in the calling routine:
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors in factored form as well.
+
+ K (input) INTEGER
+ The number of terms in the rational function to be solved
+ by DLASD4. K >= 1.
+
+ D (output) DOUBLE PRECISION array, dimension ( K )
+ On output, D contains the updated singular values.
+
+ Z (input) DOUBLE PRECISION array, dimension ( K )
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating row vector.
+
+ VF (input/output) DOUBLE PRECISION array, dimension ( K )
+ On entry, VF contains information passed through DBEDE8.
+ On exit, VF contains the first K components of the first
+ components of all right singular vectors of the bidiagonal
+ matrix.
+
+ VL (input/output) DOUBLE PRECISION array, dimension ( K )
+ On entry, VL contains information passed through DBEDE8.
+ On exit, VL contains the first K components of the last
+ components of all right singular vectors of the bidiagonal
+ matrix.
+
+ DIFL (output) DOUBLE PRECISION array, dimension ( K )
+ On exit, DIFL(I) = D(I) - DSIGMA(I).
+
+ DIFR (output) DOUBLE PRECISION array,
+ dimension ( LDDIFR, 2 ) if ICOMPQ = 1 and
+ dimension ( K ) if ICOMPQ = 0.
+ On exit, DIFR(I,1) = D(I) - DSIGMA(I+1), DIFR(K,1) is not
+ defined and will not be referenced.
+
+ If ICOMPQ = 1, DIFR(1:K,2) is an array containing the
+ normalizing factors for the right singular vector matrix.
+
+ LDDIFR (input) INTEGER
+ The leading dimension of DIFR, must be at least K.
+
+ DSIGMA (input) DOUBLE PRECISION array, dimension ( K )
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension at least 3 * K
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ --vf;
+ --vl;
+ --difl;
+ difr_dim1 = *lddifr;
+ difr_offset = 1 + difr_dim1 * 1;
+ difr -= difr_offset;
+ --dsigma;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*k < 1) {
+ *info = -2;
+ } else if (*lddifr < *k) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 1) {
+ d__[1] = abs(z__[1]);
+ difl[1] = d__[1];
+ if (*icompq == 1) {
+ difl[2] = 1.;
+ difr[((difr_dim1) << (1)) + 1] = 1.;
+ }
+ return 0;
+ }
+
+/*
+ Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DSIGMA(I) if it is 1; this makes the subsequent
+ subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DSIGMA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DSIGMA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
+/* L10: */
+ }
+
+/* Book keeping. */
+
+ iwk1 = 1;
+ iwk2 = iwk1 + *k;
+ iwk3 = iwk2 + *k;
+ iwk2i = iwk2 - 1;
+ iwk3i = iwk3 - 1;
+
+/* Normalize Z. */
+
+ rho = dnrm2_(k, &z__[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &rho, &c_b15, k, &c__1, &z__[1], k, info);
+ rho *= rho;
+
+/* Initialize WORK(IWK3). */
+
+ dlaset_("A", k, &c__1, &c_b15, &c_b15, &work[iwk3], k);
+
+/*
+ Compute the updated singular values, the arrays DIFL, DIFR,
+ and the updated Z.
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dlasd4_(k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[
+ iwk2], info);
+
+/* If the root finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ return 0;
+ }
+ work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j];
+ difl[j] = -work[j];
+ difr[j + difr_dim1] = -work[j + 1];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
+ i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
+ j]);
+/* L20: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
+ i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
+ j]);
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Compute updated Z. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__2 = sqrt((d__1 = work[iwk3i + i__], abs(d__1)));
+ z__[i__] = d_sign(&d__2, &z__[i__]);
+/* L50: */
+ }
+
+/* Update VF and VL. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = d__[j];
+ dsigj = -dsigma[j];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -dsigma[j + 1];
+ }
+ work[j] = -z__[j] / diflj / (dsigma[j] + dj);
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigj) - diflj) / (
+ dsigma[i__] + dj);
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigjp) + difrj) /
+ (dsigma[i__] + dj);
+/* L70: */
+ }
+ temp = dnrm2_(k, &work[1], &c__1);
+ work[iwk2i + j] = ddot_(k, &work[1], &c__1, &vf[1], &c__1) / temp;
+ work[iwk3i + j] = ddot_(k, &work[1], &c__1, &vl[1], &c__1) / temp;
+ if (*icompq == 1) {
+ difr[j + ((difr_dim1) << (1))] = temp;
+ }
+/* L80: */
+ }
+
+ dcopy_(k, &work[iwk2], &c__1, &vf[1], &c__1);
+ dcopy_(k, &work[iwk3], &c__1, &vl[1], &c__1);
+
+ return 0;
+
+/* End of DLASD8 */
+
+} /* dlasd8_ */
+
+/* Subroutine */ int dlasda_(integer *icompq, integer *smlsiz, integer *n,
+ integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer
+ *ldu, doublereal *vt, integer *k, doublereal *difl, doublereal *difr,
+ doublereal *z__, doublereal *poles, integer *givptr, integer *givcol,
+ integer *ldgcol, integer *perm, doublereal *givnum, doublereal *c__,
+ doublereal *s, doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
+ difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
+ z_dim1, z_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, m, i1, ic, lf, nd, ll, nl, vf, nr, vl, im1, ncc,
+ nlf, nrf, vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1;
+ static doublereal beta;
+ static integer idxq, nlvl;
+ static doublereal alpha;
+ static integer inode, ndiml, ndimr, idxqi, itemp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer sqrei;
+ extern /* Subroutine */ int dlasd6_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *);
+ static integer nwork1, nwork2;
+ extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlasdt_(integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *), dlaset_(
+ char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+ static integer smlszp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ Using a divide and conquer approach, DLASDA computes the singular
+ value decomposition (SVD) of a real upper bidiagonal N-by-M matrix
+ B with diagonal D and offdiagonal E, where M = N + SQRE. The
+ algorithm computes the singular values in the SVD B = U * S * VT.
+ The orthogonal matrices U and VT are optionally computed in
+ compact form.
+
+ A related subroutine, DLASD0, computes the singular values and
+ the singular vectors in explicit form.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed
+ in compact form, as follows
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors of upper bidiagonal
+ matrix in compact form.
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The row dimension of the upper bidiagonal matrix. This is
+ also the dimension of the main diagonal array D.
+
+ SQRE (input) INTEGER
+ Specifies the column dimension of the bidiagonal matrix.
+ = 0: The bidiagonal matrix has column dimension M = N;
+ = 1: The bidiagonal matrix has column dimension M = N + 1.
+
+ D (input/output) DOUBLE PRECISION array, dimension ( N )
+ On entry D contains the main diagonal of the bidiagonal
+ matrix. On exit D, if INFO = 0, contains its singular values.
+
+ E (input) DOUBLE PRECISION array, dimension ( M-1 )
+ Contains the subdiagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ U (output) DOUBLE PRECISION array,
+ dimension ( LDU, SMLSIZ ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, U contains the left
+ singular vector matrices of all subproblems at the bottom
+ level.
+
+ LDU (input) INTEGER, LDU = > N.
+ The leading dimension of arrays U, VT, DIFL, DIFR, POLES,
+ GIVNUM, and Z.
+
+ VT (output) DOUBLE PRECISION array,
+ dimension ( LDU, SMLSIZ+1 ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, VT' contains the right
+ singular vector matrices of all subproblems at the bottom
+ level.
+
+ K (output) INTEGER array,
+ dimension ( N ) if ICOMPQ = 1 and dimension 1 if ICOMPQ = 0.
+ If ICOMPQ = 1, on exit, K(I) is the dimension of the I-th
+ secular equation on the computation tree.
+
+ DIFL (output) DOUBLE PRECISION array, dimension ( LDU, NLVL ),
+ where NLVL = floor(log_2 (N/SMLSIZ))).
+
+ DIFR (output) DOUBLE PRECISION array,
+ dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1 and
+ dimension ( N ) if ICOMPQ = 0.
+ If ICOMPQ = 1, on exit, DIFL(1:N, I) and DIFR(1:N, 2 * I - 1)
+ record distances between singular values on the I-th
+ level and singular values on the (I -1)-th level, and
+ DIFR(1:N, 2 * I ) contains the normalizing factors for
+ the right singular vector matrix. See DLASD8 for details.
+
+ Z (output) DOUBLE PRECISION array,
+ dimension ( LDU, NLVL ) if ICOMPQ = 1 and
+ dimension ( N ) if ICOMPQ = 0.
+ The first K elements of Z(1, I) contain the components of
+ the deflation-adjusted updating row vector for subproblems
+ on the I-th level.
+
+ POLES (output) DOUBLE PRECISION array,
+ dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, POLES(1, 2*I - 1) and
+ POLES(1, 2*I) contain the new and old singular values
+ involved in the secular equations on the I-th level.
+
+ GIVPTR (output) INTEGER array,
+ dimension ( N ) if ICOMPQ = 1, and not referenced if
+ ICOMPQ = 0. If ICOMPQ = 1, on exit, GIVPTR( I ) records
+ the number of Givens rotations performed on the I-th
+ problem on the computation tree.
+
+ GIVCOL (output) INTEGER array,
+ dimension ( LDGCOL, 2 * NLVL ) if ICOMPQ = 1, and not
+ referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I,
+ GIVCOL(1, 2 *I - 1) and GIVCOL(1, 2 *I) record the locations
+ of Givens rotations performed on the I-th level on the
+ computation tree.
+
+ LDGCOL (input) INTEGER, LDGCOL = > N.
+ The leading dimension of arrays GIVCOL and PERM.
+
+ PERM (output) INTEGER array,
+ dimension ( LDGCOL, NLVL ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, PERM(1, I) records
+ permutations done on the I-th level of the computation tree.
+
+ GIVNUM (output) DOUBLE PRECISION array,
+ dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not
+ referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I,
+ GIVNUM(1, 2 *I - 1) and GIVNUM(1, 2 *I) record the C- and S-
+ values of Givens rotations performed on the I-th level on
+ the computation tree.
+
+ C (output) DOUBLE PRECISION array,
+ dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0.
+ If ICOMPQ = 1 and the I-th subproblem is not square, on exit,
+ C( I ) contains the C-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ S (output) DOUBLE PRECISION array, dimension ( N ) if
+ ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1
+ and the I-th subproblem is not square, on exit, S( I )
+ contains the S-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (6 * N + (SMLSIZ + 1)*(SMLSIZ + 1)).
+
+ IWORK (workspace) INTEGER array.
+ Dimension must be at least (7 * N).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1 * 1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1 * 1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1 * 1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1 * 1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1 * 1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1 * 1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ } else if (*ldu < *n + *sqre) {
+ *info = -8;
+ } else if (*ldgcol < *n) {
+ *info = -17;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASDA", &i__1);
+ return 0;
+ }
+
+ m = *n + *sqre;
+
+/* If the input matrix is too small, call DLASDQ to find the SVD. */
+
+ if (*n <= *smlsiz) {
+ if (*icompq == 0) {
+ dlasdq_("U", sqre, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
+ vt_offset], ldu, &u[u_offset], ldu, &u[u_offset], ldu, &
+ work[1], info);
+ } else {
+ dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
+ , ldu, &u[u_offset], ldu, &u[u_offset], ldu, &work[1],
+ info);
+ }
+ return 0;
+ }
+
+/* Book-keeping and set up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+ idxq = ndimr + *n;
+ iwk = idxq + *n;
+
+ ncc = 0;
+ nru = 0;
+
+ smlszp = *smlsiz + 1;
+ vf = 1;
+ vl = vf + m;
+ nwork1 = vl + m;
+ nwork2 = nwork1 + smlszp * smlszp;
+
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ for the nodes on bottom level of the tree, solve
+ their subproblems by DLASDQ.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nlp1 = nl + 1;
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ idxqi = idxq + nlf - 2;
+ vfi = vf + nlf - 1;
+ vli = vl + nlf - 1;
+ sqrei = 1;
+ if (*icompq == 0) {
+ dlaset_("A", &nlp1, &nlp1, &c_b29, &c_b15, &work[nwork1], &smlszp);
+ dlasdq_("U", &sqrei, &nl, &nlp1, &nru, &ncc, &d__[nlf], &e[nlf], &
+ work[nwork1], &smlszp, &work[nwork2], &nl, &work[nwork2],
+ &nl, &work[nwork2], info);
+ itemp = nwork1 + nl * smlszp;
+ dcopy_(&nlp1, &work[nwork1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nlp1, &work[itemp], &c__1, &work[vli], &c__1);
+ } else {
+ dlaset_("A", &nl, &nl, &c_b29, &c_b15, &u[nlf + u_dim1], ldu);
+ dlaset_("A", &nlp1, &nlp1, &c_b29, &c_b15, &vt[nlf + vt_dim1],
+ ldu);
+ dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &
+ vt[nlf + vt_dim1], ldu, &u[nlf + u_dim1], ldu, &u[nlf +
+ u_dim1], ldu, &work[nwork1], info);
+ dcopy_(&nlp1, &vt[nlf + vt_dim1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nlp1, &vt[nlf + nlp1 * vt_dim1], &c__1, &work[vli], &c__1)
+ ;
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ i__2 = nl;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[idxqi + j] = j;
+/* L10: */
+ }
+ if ((i__ == nd && *sqre == 0)) {
+ sqrei = 0;
+ } else {
+ sqrei = 1;
+ }
+ idxqi += nlp1;
+ vfi += nlp1;
+ vli += nlp1;
+ nrp1 = nr + sqrei;
+ if (*icompq == 0) {
+ dlaset_("A", &nrp1, &nrp1, &c_b29, &c_b15, &work[nwork1], &smlszp);
+ dlasdq_("U", &sqrei, &nr, &nrp1, &nru, &ncc, &d__[nrf], &e[nrf], &
+ work[nwork1], &smlszp, &work[nwork2], &nr, &work[nwork2],
+ &nr, &work[nwork2], info);
+ itemp = nwork1 + (nrp1 - 1) * smlszp;
+ dcopy_(&nrp1, &work[nwork1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nrp1, &work[itemp], &c__1, &work[vli], &c__1);
+ } else {
+ dlaset_("A", &nr, &nr, &c_b29, &c_b15, &u[nrf + u_dim1], ldu);
+ dlaset_("A", &nrp1, &nrp1, &c_b29, &c_b15, &vt[nrf + vt_dim1],
+ ldu);
+ dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &
+ vt[nrf + vt_dim1], ldu, &u[nrf + u_dim1], ldu, &u[nrf +
+ u_dim1], ldu, &work[nwork1], info);
+ dcopy_(&nrp1, &vt[nrf + vt_dim1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nrp1, &vt[nrf + nrp1 * vt_dim1], &c__1, &work[vli], &c__1)
+ ;
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ i__2 = nr;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[idxqi + j] = j;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Now conquer each subproblem bottom-up. */
+
+ j = pow_ii(&c__2, &nlvl);
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ Find the first node LF and last node LL on
+ the current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ vfi = vf + nlf - 1;
+ vli = vl + nlf - 1;
+ idxqi = idxq + nlf - 1;
+ alpha = d__[ic];
+ beta = e[ic];
+ if (*icompq == 0) {
+ dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
+ work[vli], &alpha, &beta, &iwork[idxqi], &perm[
+ perm_offset], &givptr[1], &givcol[givcol_offset],
+ ldgcol, &givnum[givnum_offset], ldu, &poles[
+ poles_offset], &difl[difl_offset], &difr[difr_offset],
+ &z__[z_offset], &k[1], &c__[1], &s[1], &work[nwork1],
+ &iwork[iwk], info);
+ } else {
+ --j;
+ dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
+ work[vli], &alpha, &beta, &iwork[idxqi], &perm[nlf +
+ lvl * perm_dim1], &givptr[j], &givcol[nlf + lvl2 *
+ givcol_dim1], ldgcol, &givnum[nlf + lvl2 *
+ givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], &
+ difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 *
+ difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[j],
+ &s[j], &work[nwork1], &iwork[iwk], info);
+ }
+ if (*info != 0) {
+ return 0;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of DLASDA */
+
+} /* dlasda_ */
+
+/* Subroutine */ int dlasdq_(char *uplo, integer *sqre, integer *n, integer *
+ ncvt, integer *nru, integer *ncc, doublereal *d__, doublereal *e,
+ doublereal *vt, integer *ldvt, doublereal *u, integer *ldu,
+ doublereal *c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal r__, cs, sn;
+ static integer np1, isub;
+ static doublereal smin;
+ static integer sqre1;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dswap_(integer *, doublereal *, integer *
+ , doublereal *, integer *);
+ static integer iuplo;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), xerbla_(char *,
+ integer *), dbdsqr_(char *, integer *, integer *, integer
+ *, integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static logical rotate;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASDQ computes the singular value decomposition (SVD) of a real
+ (upper or lower) bidiagonal matrix with diagonal D and offdiagonal
+ E, accumulating the transformations if desired. Letting B denote
+ the input bidiagonal matrix, the algorithm computes orthogonal
+ matrices Q and P such that B = Q * S * P' (P' denotes the transpose
+ of P). The singular values S are overwritten on D.
+
+ The input matrix U is changed to U * Q if desired.
+ The input matrix VT is changed to P' * VT if desired.
+ The input matrix C is changed to Q' * C if desired.
+
+ See "Computing Small Singular Values of Bidiagonal Matrices With
+ Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan,
+ LAPACK Working Note #3, for a detailed description of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ On entry, UPLO specifies whether the input bidiagonal matrix
+ is upper or lower bidiagonal, and wether it is square are
+ not.
+ UPLO = 'U' or 'u' B is upper bidiagonal.
+ UPLO = 'L' or 'l' B is lower bidiagonal.
+
+ SQRE (input) INTEGER
+ = 0: then the input matrix is N-by-N.
+ = 1: then the input matrix is N-by-(N+1) if UPLU = 'U' and
+ (N+1)-by-N if UPLU = 'L'.
+
+ The bidiagonal matrix has
+ N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ N (input) INTEGER
+ On entry, N specifies the number of rows and columns
+ in the matrix. N must be at least 0.
+
+ NCVT (input) INTEGER
+ On entry, NCVT specifies the number of columns of
+ the matrix VT. NCVT must be at least 0.
+
+ NRU (input) INTEGER
+ On entry, NRU specifies the number of rows of
+ the matrix U. NRU must be at least 0.
+
+ NCC (input) INTEGER
+ On entry, NCC specifies the number of columns of
+ the matrix C. NCC must be at least 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, D contains the diagonal entries of the
+ bidiagonal matrix whose SVD is desired. On normal exit,
+ D contains the singular values in ascending order.
+
+ E (input/output) DOUBLE PRECISION array.
+ dimension is (N-1) if SQRE = 0 and N if SQRE = 1.
+ On entry, the entries of E contain the offdiagonal entries
+ of the bidiagonal matrix whose SVD is desired. On normal
+ exit, E will contain 0. If the algorithm does not converge,
+ D and E will contain the diagonal and superdiagonal entries
+ of a bidiagonal matrix orthogonally equivalent to the one
+ given as input.
+
+ VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT)
+ On entry, contains a matrix which on exit has been
+ premultiplied by P', dimension N-by-NCVT if SQRE = 0
+ and (N+1)-by-NCVT if SQRE = 1 (not referenced if NCVT=0).
+
+ LDVT (input) INTEGER
+ On entry, LDVT specifies the leading dimension of VT as
+ declared in the calling (sub) program. LDVT must be at
+ least 1. If NCVT is nonzero LDVT must also be at least N.
+
+ U (input/output) DOUBLE PRECISION array, dimension (LDU, N)
+ On entry, contains a matrix which on exit has been
+ postmultiplied by Q, dimension NRU-by-N if SQRE = 0
+ and NRU-by-(N+1) if SQRE = 1 (not referenced if NRU=0).
+
+ LDU (input) INTEGER
+ On entry, LDU specifies the leading dimension of U as
+ declared in the calling (sub) program. LDU must be at
+ least max( 1, NRU ) .
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC)
+ On entry, contains an N-by-NCC matrix which on exit
+ has been premultiplied by Q' dimension N-by-NCC if SQRE = 0
+ and (N+1)-by-NCC if SQRE = 1 (not referenced if NCC=0).
+
+ LDC (input) INTEGER
+ On entry, LDC specifies the leading dimension of C as
+ declared in the calling (sub) program. LDC must be at
+ least 1. If NCC is nonzero, LDC must also be at least N.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (4*N)
+ Workspace. Only referenced if one of NCVT, NRU, or NCC is
+ nonzero, and if N is at least 2.
+
+ INFO (output) INTEGER
+ On exit, a value of 0 indicates a successful exit.
+ If INFO < 0, argument number -INFO is illegal.
+ If INFO > 0, the algorithm did not converge, and INFO
+ specifies how many superdiagonals did not converge.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ iuplo = 0;
+ if (lsame_(uplo, "U")) {
+ iuplo = 1;
+ }
+ if (lsame_(uplo, "L")) {
+ iuplo = 2;
+ }
+ if (iuplo == 0) {
+ *info = -1;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ncvt < 0) {
+ *info = -4;
+ } else if (*nru < 0) {
+ *info = -5;
+ } else if (*ncc < 0) {
+ *info = -6;
+ } else if ((*ncvt == 0 && *ldvt < 1) || (*ncvt > 0 && *ldvt < max(1,*n)))
+ {
+ *info = -10;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -12;
+ } else if ((*ncc == 0 && *ldc < 1) || (*ncc > 0 && *ldc < max(1,*n))) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASDQ", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
+ np1 = *n + 1;
+ sqre1 = *sqre;
+
+/*
+ If matrix non-square upper bidiagonal, rotate to be lower
+ bidiagonal. The rotations are on the right.
+*/
+
+ if ((iuplo == 1 && sqre1 == 1)) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (rotate) {
+ work[i__] = cs;
+ work[*n + i__] = sn;
+ }
+/* L10: */
+ }
+ dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
+ d__[*n] = r__;
+ e[*n] = 0.;
+ if (rotate) {
+ work[*n] = cs;
+ work[*n + *n] = sn;
+ }
+ iuplo = 2;
+ sqre1 = 0;
+
+/* Update singular vectors if desired. */
+
+ if (*ncvt > 0) {
+ dlasr_("L", "V", "F", &np1, ncvt, &work[1], &work[np1], &vt[
+ vt_offset], ldvt);
+ }
+ }
+
+/*
+ If matrix lower bidiagonal, rotate to be upper bidiagonal
+ by applying Givens rotations on the left.
+*/
+
+ if (iuplo == 2) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (rotate) {
+ work[i__] = cs;
+ work[*n + i__] = sn;
+ }
+/* L20: */
+ }
+
+/*
+ If matrix (N+1)-by-N lower bidiagonal, one additional
+ rotation is needed.
+*/
+
+ if (sqre1 == 1) {
+ dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
+ d__[*n] = r__;
+ if (rotate) {
+ work[*n] = cs;
+ work[*n + *n] = sn;
+ }
+ }
+
+/* Update singular vectors if desired. */
+
+ if (*nru > 0) {
+ if (sqre1 == 0) {
+ dlasr_("R", "V", "F", nru, n, &work[1], &work[np1], &u[
+ u_offset], ldu);
+ } else {
+ dlasr_("R", "V", "F", nru, &np1, &work[1], &work[np1], &u[
+ u_offset], ldu);
+ }
+ }
+ if (*ncc > 0) {
+ if (sqre1 == 0) {
+ dlasr_("L", "V", "F", n, ncc, &work[1], &work[np1], &c__[
+ c_offset], ldc);
+ } else {
+ dlasr_("L", "V", "F", &np1, ncc, &work[1], &work[np1], &c__[
+ c_offset], ldc);
+ }
+ }
+ }
+
+/*
+ Call DBDSQR to compute the SVD of the reduced real
+ N-by-N upper bidiagonal matrix.
+*/
+
+ dbdsqr_("U", n, ncvt, nru, ncc, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[
+ u_offset], ldu, &c__[c_offset], ldc, &work[1], info);
+
+/*
+ Sort the singular values into ascending order (insertion sort on
+ singular values, but only one transposition per singular vector)
+*/
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I). */
+
+ isub = i__;
+ smin = d__[i__];
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (d__[j] < smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L30: */
+ }
+ if (isub != i__) {
+
+/* Swap singular values and vectors. */
+
+ d__[isub] = d__[i__];
+ d__[i__] = smin;
+ if (*ncvt > 0) {
+ dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[i__ + vt_dim1],
+ ldvt);
+ }
+ if (*nru > 0) {
+ dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[i__ * u_dim1 + 1]
+ , &c__1);
+ }
+ if (*ncc > 0) {
+ dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[i__ + c_dim1], ldc)
+ ;
+ }
+ }
+/* L40: */
+ }
+
+ return 0;
+
+/* End of DLASDQ */
+
+} /* dlasdq_ */
+
+/* Subroutine */ int dlasdt_(integer *n, integer *lvl, integer *nd, integer *
+ inode, integer *ndiml, integer *ndimr, integer *msub)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ static integer i__, il, ir, maxn;
+ static doublereal temp;
+ static integer nlvl, llst, ncrnt;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASDT creates a tree of subproblems for bidiagonal divide and
+ conquer.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ On entry, the number of diagonal elements of the
+ bidiagonal matrix.
+
+ LVL (output) INTEGER
+ On exit, the number of levels on the computation tree.
+
+ ND (output) INTEGER
+ On exit, the number of nodes on the tree.
+
+ INODE (output) INTEGER array, dimension ( N )
+ On exit, centers of subproblems.
+
+ NDIML (output) INTEGER array, dimension ( N )
+ On exit, row dimensions of left children.
+
+ NDIMR (output) INTEGER array, dimension ( N )
+ On exit, row dimensions of right children.
+
+ MSUB (input) INTEGER.
+ On entry, the maximum row dimension each subproblem at the
+ bottom of the tree can be of.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Find the number of levels on the tree.
+*/
+
+ /* Parameter adjustments */
+ --ndimr;
+ --ndiml;
+ --inode;
+
+ /* Function Body */
+ maxn = max(1,*n);
+ temp = log((doublereal) maxn / (doublereal) (*msub + 1)) / log(2.);
+ *lvl = (integer) temp + 1;
+
+ i__ = *n / 2;
+ inode[1] = i__ + 1;
+ ndiml[1] = i__;
+ ndimr[1] = *n - i__ - 1;
+ il = 0;
+ ir = 1;
+ llst = 1;
+ i__1 = *lvl - 1;
+ for (nlvl = 1; nlvl <= i__1; ++nlvl) {
+
+/*
+ Constructing the tree at (NLVL+1)-st level. The number of
+ nodes created on this level is LLST * 2.
+*/
+
+ i__2 = llst - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ il += 2;
+ ir += 2;
+ ncrnt = llst + i__;
+ ndiml[il] = ndiml[ncrnt] / 2;
+ ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1;
+ inode[il] = inode[ncrnt] - ndimr[il] - 1;
+ ndiml[ir] = ndimr[ncrnt] / 2;
+ ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1;
+ inode[ir] = inode[ncrnt] + ndiml[ir] + 1;
+/* L10: */
+ }
+ llst <<= 1;
+/* L20: */
+ }
+ *nd = ((llst) << (1)) - 1;
+
+ return 0;
+
+/* End of DLASDT */
+
+} /* dlasdt_ */
+
+/* Subroutine */ int dlaset_(char *uplo, integer *m, integer *n, doublereal *
+ alpha, doublereal *beta, doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLASET initializes an m-by-n matrix A to BETA on the diagonal and
+ ALPHA on the offdiagonals.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be set.
+ = 'U': Upper triangular part is set; the strictly lower
+ triangular part of A is not changed.
+ = 'L': Lower triangular part is set; the strictly upper
+ triangular part of A is not changed.
+ Otherwise: All of the matrix A is set.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ ALPHA (input) DOUBLE PRECISION
+ The constant to which the offdiagonal elements are to be set.
+
+ BETA (input) DOUBLE PRECISION
+ The constant to which the diagonal elements are to be set.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On exit, the leading m-by-n submatrix of A is set as follows:
+
+ if UPLO = 'U', A(i,j) = ALPHA, 1<=i<=j-1, 1<=j<=n,
+ if UPLO = 'L', A(i,j) = ALPHA, j+1<=i<=m, 1<=j<=n,
+ otherwise, A(i,j) = ALPHA, 1<=i<=m, 1<=j<=n, i.ne.j,
+
+ and, for all UPLO, A(i,i) = BETA, 1<=i<=min(m,n).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+
+/*
+ Set the strictly upper triangular or trapezoidal part of the
+ array to ALPHA.
+*/
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+
+/*
+ Set the strictly lower triangular or trapezoidal part of the
+ array to ALPHA.
+*/
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+
+/* Set the leading m-by-n submatrix to ALPHA. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+/* Set the first min(M,N) diagonal elements to BETA. */
+
+ i__1 = min(*m,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ a[i__ + i__ * a_dim1] = *beta;
+/* L70: */
+ }
+
+ return 0;
+
+/* End of DLASET */
+
+} /* dlaset_ */
+
+/* Subroutine */ int dlasq1_(integer *n, doublereal *d__, doublereal *e,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal eps;
+ extern /* Subroutine */ int dlas2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ static doublereal scale;
+ static integer iinfo;
+ static doublereal sigmn;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static doublereal sigmx;
+ extern /* Subroutine */ int dlasq2_(integer *, doublereal *, integer *);
+
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ static doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlasrt_(
+ char *, integer *, doublereal *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASQ1 computes the singular values of a real N-by-N bidiagonal
+ matrix with diagonal D and off-diagonal E. The singular values
+ are computed to high relative accuracy, in the absence of
+ denormalization, underflow and overflow. The algorithm was first
+ presented in
+
+ "Accurate singular values and differential qd algorithms" by K. V.
+ Fernando and B. N. Parlett, Numer. Math., Vol-67, No. 2, pp. 191-230,
+ 1994,
+
+ and the present implementation is described in "An implementation of
+ the dqds Algorithm (Positive Case)", LAPACK Working Note.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of rows and columns in the matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, D contains the diagonal elements of the
+ bidiagonal matrix whose SVD is desired. On normal exit,
+ D contains the singular values in decreasing order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, elements E(1:N-1) contain the off-diagonal elements
+ of the bidiagonal matrix whose SVD is desired.
+ On exit, E is overwritten.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm failed
+ = 1, a split was marked by a positive value in E
+ = 2, current block of Z not diagonalized after 30*N
+ iterations (in inner while loop)
+ = 3, termination criterion of outer while loop not met
+ (program created more than N unreduced blocks)
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --work;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -2;
+ i__1 = -(*info);
+ xerbla_("DLASQ1", &i__1);
+ return 0;
+ } else if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ d__[1] = abs(d__[1]);
+ return 0;
+ } else if (*n == 2) {
+ dlas2_(&d__[1], &e[1], &d__[2], &sigmn, &sigmx);
+ d__[1] = sigmx;
+ d__[2] = sigmn;
+ return 0;
+ }
+
+/* Estimate the largest singular value. */
+
+ sigmx = 0.;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = (d__1 = d__[i__], abs(d__1));
+/* Computing MAX */
+ d__2 = sigmx, d__3 = (d__1 = e[i__], abs(d__1));
+ sigmx = max(d__2,d__3);
+/* L10: */
+ }
+ d__[*n] = (d__1 = d__[*n], abs(d__1));
+
+/* Early return if SIGMX is zero (matrix is already diagonal). */
+
+ if (sigmx == 0.) {
+ dlasrt_("D", n, &d__[1], &iinfo);
+ return 0;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = sigmx, d__2 = d__[i__];
+ sigmx = max(d__1,d__2);
+/* L20: */
+ }
+
+/*
+ Copy D and E into WORK (in the Z format) and scale (squaring the
+ input data makes scaling by a power of the radix pointless).
+*/
+
+ eps = PRECISION;
+ safmin = SAFEMINIMUM;
+ scale = sqrt(eps / safmin);
+ dcopy_(n, &d__[1], &c__1, &work[1], &c__2);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &e[1], &c__1, &work[2], &c__2);
+ i__1 = ((*n) << (1)) - 1;
+ i__2 = ((*n) << (1)) - 1;
+ dlascl_("G", &c__0, &c__0, &sigmx, &scale, &i__1, &c__1, &work[1], &i__2,
+ &iinfo);
+
+/* Compute the q's and e's. */
+
+ i__1 = ((*n) << (1)) - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ d__1 = work[i__];
+ work[i__] = d__1 * d__1;
+/* L30: */
+ }
+ work[*n * 2] = 0.;
+
+ dlasq2_(n, &work[1], info);
+
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = sqrt(work[i__]);
+/* L40: */
+ }
+ dlascl_("G", &c__0, &c__0, &scale, &sigmx, n, &c__1, &d__[1], n, &
+ iinfo);
+ }
+
+ return 0;
+
+/* End of DLASQ1 */
+
+} /* dlasq1_ */
+
+/* Subroutine */ int dlasq2_(integer *n, doublereal *z__, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal d__, e;
+ static integer k;
+ static doublereal s, t;
+ static integer i0, i4, n0, pp;
+ static doublereal eps, tol;
+ static integer ipn4;
+ static doublereal tol2;
+ static logical ieee;
+ static integer nbig;
+ static doublereal dmin__, emin, emax;
+ static integer ndiv, iter;
+ static doublereal qmin, temp, qmax, zmax;
+ static integer splt, nfail;
+ static doublereal desig, trace, sigma;
+ static integer iinfo;
+ extern /* Subroutine */ int dlasq3_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *, integer *, logical *);
+
+ static integer iwhila, iwhilb;
+ static doublereal oldemn, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASQ2 computes all the eigenvalues of the symmetric positive
+ definite tridiagonal matrix associated with the qd array Z to high
+ relative accuracy are computed to high relative accuracy, in the
+ absence of denormalization, underflow and overflow.
+
+ To see the relation of Z to the tridiagonal matrix, let L be a
+ unit lower bidiagonal matrix with subdiagonals Z(2,4,6,,..) and
+ let U be an upper bidiagonal matrix with 1's above and diagonal
+ Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the
+ symmetric tridiagonal to which it is similar.
+
+ Note : DLASQ2 defines a logical variable, IEEE, which is true
+ on machines which follow ieee-754 floating-point standard in their
+ handling of infinities and NaNs, and false otherwise. This variable
+ is passed to DLASQ3.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of rows and columns in the matrix. N >= 0.
+
+ Z (workspace) DOUBLE PRECISION array, dimension ( 4*N )
+ On entry Z holds the qd array. On exit, entries 1 to N hold
+ the eigenvalues in decreasing order, Z( 2*N+1 ) holds the
+ trace, and Z( 2*N+2 ) holds the sum of the eigenvalues. If
+ N > 2, then Z( 2*N+3 ) holds the iteration count, Z( 2*N+4 )
+ holds NDIVS/NIN^2, and Z( 2*N+5 ) holds the percentage of
+ shifts that failed.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if the i-th argument is a scalar and had an illegal
+ value, then INFO = -i, if the i-th argument is an
+ array and the j-entry had an illegal value, then
+ INFO = -(i*100+j)
+ > 0: the algorithm failed
+ = 1, a split was marked by a positive value in E
+ = 2, current block of Z not diagonalized after 30*N
+ iterations (in inner while loop)
+ = 3, termination criterion of outer while loop not met
+ (program created more than N unreduced blocks)
+
+ Further Details
+ ===============
+ Local Variables: I0:N0 defines a current unreduced segment of Z.
+ The shifts are accumulated in SIGMA. Iteration count is in ITER.
+ Ping-pong is controlled by PP (alternates between 0 and 1).
+
+ =====================================================================
+
+
+ Test the input arguments.
+ (in case DLASQ2 is not called by DLASQ1)
+*/
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ *info = 0;
+ eps = PRECISION;
+ safmin = SAFEMINIMUM;
+ tol = eps * 100.;
+/* Computing 2nd power */
+ d__1 = tol;
+ tol2 = d__1 * d__1;
+
+ if (*n < 0) {
+ *info = -1;
+ xerbla_("DLASQ2", &c__1);
+ return 0;
+ } else if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+
+/* 1-by-1 case. */
+
+ if (z__[1] < 0.) {
+ *info = -201;
+ xerbla_("DLASQ2", &c__2);
+ }
+ return 0;
+ } else if (*n == 2) {
+
+/* 2-by-2 case. */
+
+ if (z__[2] < 0. || z__[3] < 0.) {
+ *info = -2;
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ } else if (z__[3] > z__[1]) {
+ d__ = z__[3];
+ z__[3] = z__[1];
+ z__[1] = d__;
+ }
+ z__[5] = z__[1] + z__[2] + z__[3];
+ if (z__[2] > z__[3] * tol2) {
+ t = (z__[1] - z__[3] + z__[2]) * .5;
+ s = z__[3] * (z__[2] / t);
+ if (s <= t) {
+ s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.) + 1.)));
+ } else {
+ s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s)));
+ }
+ t = z__[1] + (s + z__[2]);
+ z__[3] *= z__[1] / t;
+ z__[1] = t;
+ }
+ z__[2] = z__[3];
+ z__[6] = z__[2] + z__[1];
+ return 0;
+ }
+
+/* Check for negative data and compute sums of q's and e's. */
+
+ z__[*n * 2] = 0.;
+ emin = z__[2];
+ qmax = 0.;
+ zmax = 0.;
+ d__ = 0.;
+ e = 0.;
+
+ i__1 = (*n - 1) << (1);
+ for (k = 1; k <= i__1; k += 2) {
+ if (z__[k] < 0.) {
+ *info = -(k + 200);
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ } else if (z__[k + 1] < 0.) {
+ *info = -(k + 201);
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ }
+ d__ += z__[k];
+ e += z__[k + 1];
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[k];
+ qmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[k + 1];
+ emin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = max(qmax,zmax), d__2 = z__[k + 1];
+ zmax = max(d__1,d__2);
+/* L10: */
+ }
+ if (z__[((*n) << (1)) - 1] < 0.) {
+ *info = -(((*n) << (1)) + 199);
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ }
+ d__ += z__[((*n) << (1)) - 1];
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[((*n) << (1)) - 1];
+ qmax = max(d__1,d__2);
+ zmax = max(qmax,zmax);
+
+/* Check for diagonality. */
+
+ if (e == 0.) {
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ z__[k] = z__[((k) << (1)) - 1];
+/* L20: */
+ }
+ dlasrt_("D", n, &z__[1], &iinfo);
+ z__[((*n) << (1)) - 1] = d__;
+ return 0;
+ }
+
+ trace = d__ + e;
+
+/* Check for zero data. */
+
+ if (trace == 0.) {
+ z__[((*n) << (1)) - 1] = 0.;
+ return 0;
+ }
+
+/* Check whether the machine is IEEE conformable. */
+
+ ieee = (ilaenv_(&c__10, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4, (
+ ftnlen)6, (ftnlen)1) == 1 && ilaenv_(&c__11, "DLASQ2", "N", &c__1,
+ &c__2, &c__3, &c__4, (ftnlen)6, (ftnlen)1) == 1);
+
+/* Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). */
+
+ for (k = (*n) << (1); k >= 2; k += -2) {
+ z__[k * 2] = 0.;
+ z__[((k) << (1)) - 1] = z__[k];
+ z__[((k) << (1)) - 2] = 0.;
+ z__[((k) << (1)) - 3] = z__[k - 1];
+/* L30: */
+ }
+
+ i0 = 1;
+ n0 = *n;
+
+/* Reverse the qd-array, if warranted. */
+
+ if (z__[((i0) << (2)) - 3] * 1.5 < z__[((n0) << (2)) - 3]) {
+ ipn4 = (i0 + n0) << (2);
+ i__1 = (i0 + n0 - 1) << (1);
+ for (i4 = (i0) << (2); i4 <= i__1; i4 += 4) {
+ temp = z__[i4 - 3];
+ z__[i4 - 3] = z__[ipn4 - i4 - 3];
+ z__[ipn4 - i4 - 3] = temp;
+ temp = z__[i4 - 1];
+ z__[i4 - 1] = z__[ipn4 - i4 - 5];
+ z__[ipn4 - i4 - 5] = temp;
+/* L40: */
+ }
+ }
+
+/* Initial split checking via dqd and Li's test. */
+
+ pp = 0;
+
+ for (k = 1; k <= 2; ++k) {
+
+ d__ = z__[((n0) << (2)) + pp - 3];
+ i__1 = ((i0) << (2)) + pp;
+ for (i4 = ((n0 - 1) << (2)) + pp; i4 >= i__1; i4 += -4) {
+ if (z__[i4 - 1] <= tol2 * d__) {
+ z__[i4 - 1] = -0.;
+ d__ = z__[i4 - 3];
+ } else {
+ d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1]));
+ }
+/* L50: */
+ }
+
+/* dqd maps Z to ZZ plus Li's test. */
+
+ emin = z__[((i0) << (2)) + pp + 1];
+ d__ = z__[((i0) << (2)) + pp - 3];
+ i__1 = ((n0 - 1) << (2)) + pp;
+ for (i4 = ((i0) << (2)) + pp; i4 <= i__1; i4 += 4) {
+ z__[i4 - ((pp) << (1)) - 2] = d__ + z__[i4 - 1];
+ if (z__[i4 - 1] <= tol2 * d__) {
+ z__[i4 - 1] = -0.;
+ z__[i4 - ((pp) << (1)) - 2] = d__;
+ z__[i4 - ((pp) << (1))] = 0.;
+ d__ = z__[i4 + 1];
+ } else if ((safmin * z__[i4 + 1] < z__[i4 - ((pp) << (1)) - 2] &&
+ safmin * z__[i4 - ((pp) << (1)) - 2] < z__[i4 + 1])) {
+ temp = z__[i4 + 1] / z__[i4 - ((pp) << (1)) - 2];
+ z__[i4 - ((pp) << (1))] = z__[i4 - 1] * temp;
+ d__ *= temp;
+ } else {
+ z__[i4 - ((pp) << (1))] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4
+ - ((pp) << (1)) - 2]);
+ d__ = z__[i4 + 1] * (d__ / z__[i4 - ((pp) << (1)) - 2]);
+ }
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[i4 - ((pp) << (1))];
+ emin = min(d__1,d__2);
+/* L60: */
+ }
+ z__[((n0) << (2)) - pp - 2] = d__;
+
+/* Now find qmax. */
+
+ qmax = z__[((i0) << (2)) - pp - 2];
+ i__1 = ((n0) << (2)) - pp - 2;
+ for (i4 = ((i0) << (2)) - pp + 2; i4 <= i__1; i4 += 4) {
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[i4];
+ qmax = max(d__1,d__2);
+/* L70: */
+ }
+
+/* Prepare for the next iteration on K. */
+
+ pp = 1 - pp;
+/* L80: */
+ }
+
+ iter = 2;
+ nfail = 0;
+ ndiv = (n0 - i0) << (1);
+
+ i__1 = *n + 1;
+ for (iwhila = 1; iwhila <= i__1; ++iwhila) {
+ if (n0 < 1) {
+ goto L150;
+ }
+
+/*
+ While array unfinished do
+
+ E(N0) holds the value of SIGMA when submatrix in I0:N0
+ splits from the rest of the array, but is negated.
+*/
+
+ desig = 0.;
+ if (n0 == *n) {
+ sigma = 0.;
+ } else {
+ sigma = -z__[((n0) << (2)) - 1];
+ }
+ if (sigma < 0.) {
+ *info = 1;
+ return 0;
+ }
+
+/*
+ Find last unreduced submatrix's top index I0, find QMAX and
+ EMIN. Find Gershgorin-type bound if Q's much greater than E's.
+*/
+
+ emax = 0.;
+ if (n0 > i0) {
+ emin = (d__1 = z__[((n0) << (2)) - 5], abs(d__1));
+ } else {
+ emin = 0.;
+ }
+ qmin = z__[((n0) << (2)) - 3];
+ qmax = qmin;
+ for (i4 = (n0) << (2); i4 >= 8; i4 += -4) {
+ if (z__[i4 - 5] <= 0.) {
+ goto L100;
+ }
+ if (qmin >= emax * 4.) {
+/* Computing MIN */
+ d__1 = qmin, d__2 = z__[i4 - 3];
+ qmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = emax, d__2 = z__[i4 - 5];
+ emax = max(d__1,d__2);
+ }
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[i4 - 7] + z__[i4 - 5];
+ qmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[i4 - 5];
+ emin = min(d__1,d__2);
+/* L90: */
+ }
+ i4 = 4;
+
+L100:
+ i0 = i4 / 4;
+
+/* Store EMIN for passing to DLASQ3. */
+
+ z__[((n0) << (2)) - 1] = emin;
+
+/*
+ Put -(initial shift) into DMIN.
+
+ Computing MAX
+*/
+ d__1 = 0., d__2 = qmin - sqrt(qmin) * 2. * sqrt(emax);
+ dmin__ = -max(d__1,d__2);
+
+/* Now I0:N0 is unreduced. PP = 0 for ping, PP = 1 for pong. */
+
+ pp = 0;
+
+ nbig = (n0 - i0 + 1) * 30;
+ i__2 = nbig;
+ for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) {
+ if (i0 > n0) {
+ goto L130;
+ }
+
+/* While submatrix unfinished take a good dqds step. */
+
+ dlasq3_(&i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, &
+ nfail, &iter, &ndiv, &ieee);
+
+ pp = 1 - pp;
+
+/* When EMIN is very small check for splits. */
+
+ if ((pp == 0 && n0 - i0 >= 3)) {
+ if (z__[n0 * 4] <= tol2 * qmax || z__[((n0) << (2)) - 1] <=
+ tol2 * sigma) {
+ splt = i0 - 1;
+ qmax = z__[((i0) << (2)) - 3];
+ emin = z__[((i0) << (2)) - 1];
+ oldemn = z__[i0 * 4];
+ i__3 = (n0 - 3) << (2);
+ for (i4 = (i0) << (2); i4 <= i__3; i4 += 4) {
+ if (z__[i4] <= tol2 * z__[i4 - 3] || z__[i4 - 1] <=
+ tol2 * sigma) {
+ z__[i4 - 1] = -sigma;
+ splt = i4 / 4;
+ qmax = 0.;
+ emin = z__[i4 + 3];
+ oldemn = z__[i4 + 4];
+ } else {
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[i4 + 1];
+ qmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[i4 - 1];
+ emin = min(d__1,d__2);
+/* Computing MIN */
+ d__1 = oldemn, d__2 = z__[i4];
+ oldemn = min(d__1,d__2);
+ }
+/* L110: */
+ }
+ z__[((n0) << (2)) - 1] = emin;
+ z__[n0 * 4] = oldemn;
+ i0 = splt + 1;
+ }
+ }
+
+/* L120: */
+ }
+
+ *info = 2;
+ return 0;
+
+/* end IWHILB */
+
+L130:
+
+/* L140: */
+ ;
+ }
+
+ *info = 3;
+ return 0;
+
+/* end IWHILA */
+
+L150:
+
+/* Move q's to the front. */
+
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ z__[k] = z__[((k) << (2)) - 3];
+/* L160: */
+ }
+
+/* Sort and compute sum of eigenvalues. */
+
+ dlasrt_("D", n, &z__[1], &iinfo);
+
+ e = 0.;
+ for (k = *n; k >= 1; --k) {
+ e += z__[k];
+/* L170: */
+ }
+
+/* Store trace, sum(eigenvalues) and information on performance. */
+
+ z__[((*n) << (1)) + 1] = trace;
+ z__[((*n) << (1)) + 2] = e;
+ z__[((*n) << (1)) + 3] = (doublereal) iter;
+/* Computing 2nd power */
+ i__1 = *n;
+ z__[((*n) << (1)) + 4] = (doublereal) ndiv / (doublereal) (i__1 * i__1);
+ z__[((*n) << (1)) + 5] = nfail * 100. / (doublereal) iter;
+ return 0;
+
+/* End of DLASQ2 */
+
+} /* dlasq2_ */
+
+/* Subroutine */ int dlasq3_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, doublereal *dmin__, doublereal *sigma, doublereal *desig,
+ doublereal *qmax, integer *nfail, integer *iter, integer *ndiv,
+ logical *ieee)
+{
+ /* Initialized data */
+
+ static integer ttype = 0;
+ static doublereal dmin1 = 0.;
+ static doublereal dmin2 = 0.;
+ static doublereal dn = 0.;
+ static doublereal dn1 = 0.;
+ static doublereal dn2 = 0.;
+ static doublereal tau = 0.;
+
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal s, t;
+ static integer j4, nn;
+ static doublereal eps, tol;
+ static integer n0in, ipn4;
+ static doublereal tol2, temp;
+ extern /* Subroutine */ int dlasq4_(integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *)
+ , dlasq5_(integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, logical *), dlasq6_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+
+ static doublereal safmin;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ May 17, 2000
+
+
+ Purpose
+ =======
+
+ DLASQ3 checks for deflation, computes a shift (TAU) and calls dqds.
+ In case of failure it changes shifts, and tries again until output
+ is positive.
+
+ Arguments
+ =========
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) DOUBLE PRECISION array, dimension ( 4*N )
+ Z holds the qd array.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ DMIN (output) DOUBLE PRECISION
+ Minimum value of d.
+
+ SIGMA (output) DOUBLE PRECISION
+ Sum of shifts used in current segment.
+
+ DESIG (input/output) DOUBLE PRECISION
+ Lower order part of SIGMA
+
+ QMAX (input) DOUBLE PRECISION
+ Maximum value of q.
+
+ NFAIL (output) INTEGER
+ Number of times shift was too big.
+
+ ITER (output) INTEGER
+ Number of iterations.
+
+ NDIV (output) INTEGER
+ Number of divisions.
+
+ TTYPE (output) INTEGER
+ Shift type.
+
+ IEEE (input) LOGICAL
+ Flag for IEEE or non IEEE arithmetic (passed to DLASQ5).
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+
+ n0in = *n0;
+ eps = PRECISION;
+ safmin = SAFEMINIMUM;
+ tol = eps * 100.;
+/* Computing 2nd power */
+ d__1 = tol;
+ tol2 = d__1 * d__1;
+
+/* Check for deflation. */
+
+L10:
+
+ if (*n0 < *i0) {
+ return 0;
+ }
+ if (*n0 == *i0) {
+ goto L20;
+ }
+ nn = ((*n0) << (2)) + *pp;
+ if (*n0 == *i0 + 1) {
+ goto L40;
+ }
+
+/* Check whether E(N0-1) is negligible, 1 eigenvalue. */
+
+ if ((z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - ((*pp) << (1)
+ ) - 4] > tol2 * z__[nn - 7])) {
+ goto L30;
+ }
+
+L20:
+
+ z__[((*n0) << (2)) - 3] = z__[((*n0) << (2)) + *pp - 3] + *sigma;
+ --(*n0);
+ goto L10;
+
+/* Check whether E(N0-2) is negligible, 2 eigenvalues. */
+
+L30:
+
+ if ((z__[nn - 9] > tol2 * *sigma && z__[nn - ((*pp) << (1)) - 8] > tol2 *
+ z__[nn - 11])) {
+ goto L50;
+ }
+
+L40:
+
+ if (z__[nn - 3] > z__[nn - 7]) {
+ s = z__[nn - 3];
+ z__[nn - 3] = z__[nn - 7];
+ z__[nn - 7] = s;
+ }
+ if (z__[nn - 5] > z__[nn - 3] * tol2) {
+ t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5;
+ s = z__[nn - 3] * (z__[nn - 5] / t);
+ if (s <= t) {
+ s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.) + 1.)));
+ } else {
+ s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s)));
+ }
+ t = z__[nn - 7] + (s + z__[nn - 5]);
+ z__[nn - 3] *= z__[nn - 7] / t;
+ z__[nn - 7] = t;
+ }
+ z__[((*n0) << (2)) - 7] = z__[nn - 7] + *sigma;
+ z__[((*n0) << (2)) - 3] = z__[nn - 3] + *sigma;
+ *n0 += -2;
+ goto L10;
+
+L50:
+
+/* Reverse the qd-array, if warranted. */
+
+ if (*dmin__ <= 0. || *n0 < n0in) {
+ if (z__[((*i0) << (2)) + *pp - 3] * 1.5 < z__[((*n0) << (2)) + *pp -
+ 3]) {
+ ipn4 = (*i0 + *n0) << (2);
+ i__1 = (*i0 + *n0 - 1) << (1);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ temp = z__[j4 - 3];
+ z__[j4 - 3] = z__[ipn4 - j4 - 3];
+ z__[ipn4 - j4 - 3] = temp;
+ temp = z__[j4 - 2];
+ z__[j4 - 2] = z__[ipn4 - j4 - 2];
+ z__[ipn4 - j4 - 2] = temp;
+ temp = z__[j4 - 1];
+ z__[j4 - 1] = z__[ipn4 - j4 - 5];
+ z__[ipn4 - j4 - 5] = temp;
+ temp = z__[j4];
+ z__[j4] = z__[ipn4 - j4 - 4];
+ z__[ipn4 - j4 - 4] = temp;
+/* L60: */
+ }
+ if (*n0 - *i0 <= 4) {
+ z__[((*n0) << (2)) + *pp - 1] = z__[((*i0) << (2)) + *pp - 1];
+ z__[((*n0) << (2)) - *pp] = z__[((*i0) << (2)) - *pp];
+ }
+/* Computing MIN */
+ d__1 = dmin2, d__2 = z__[((*n0) << (2)) + *pp - 1];
+ dmin2 = min(d__1,d__2);
+/* Computing MIN */
+ d__1 = z__[((*n0) << (2)) + *pp - 1], d__2 = z__[((*i0) << (2)) +
+ *pp - 1], d__1 = min(d__1,d__2), d__2 = z__[((*i0) << (2))
+ + *pp + 3];
+ z__[((*n0) << (2)) + *pp - 1] = min(d__1,d__2);
+/* Computing MIN */
+ d__1 = z__[((*n0) << (2)) - *pp], d__2 = z__[((*i0) << (2)) - *pp]
+ , d__1 = min(d__1,d__2), d__2 = z__[((*i0) << (2)) - *pp
+ + 4];
+ z__[((*n0) << (2)) - *pp] = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *qmax, d__2 = z__[((*i0) << (2)) + *pp - 3], d__1 = max(
+ d__1,d__2), d__2 = z__[((*i0) << (2)) + *pp + 1];
+ *qmax = max(d__1,d__2);
+ *dmin__ = -0.;
+ }
+ }
+
+/*
+ L70:
+
+ Computing MIN
+*/
+ d__1 = z__[((*n0) << (2)) + *pp - 1], d__2 = z__[((*n0) << (2)) + *pp - 9]
+ , d__1 = min(d__1,d__2), d__2 = dmin2 + z__[((*n0) << (2)) - *pp];
+ if (*dmin__ < 0. || safmin * *qmax < min(d__1,d__2)) {
+
+/* Choose a shift. */
+
+ dlasq4_(i0, n0, &z__[1], pp, &n0in, dmin__, &dmin1, &dmin2, &dn, &dn1,
+ &dn2, &tau, &ttype);
+
+/* Call dqds until DMIN > 0. */
+
+L80:
+
+ dlasq5_(i0, n0, &z__[1], pp, &tau, dmin__, &dmin1, &dmin2, &dn, &dn1,
+ &dn2, ieee);
+
+ *ndiv += *n0 - *i0 + 2;
+ ++(*iter);
+
+/* Check status. */
+
+ if ((*dmin__ >= 0. && dmin1 > 0.)) {
+
+/* Success. */
+
+ goto L100;
+
+ } else if ((((*dmin__ < 0. && dmin1 > 0.) && z__[((*n0 - 1) << (2)) -
+ *pp] < tol * (*sigma + dn1)) && abs(dn) < tol * *sigma)) {
+
+/* Convergence hidden by negative DN. */
+
+ z__[((*n0 - 1) << (2)) - *pp + 2] = 0.;
+ *dmin__ = 0.;
+ goto L100;
+ } else if (*dmin__ < 0.) {
+
+/* TAU too big. Select new TAU and try again. */
+
+ ++(*nfail);
+ if (ttype < -22) {
+
+/* Failed twice. Play it safe. */
+
+ tau = 0.;
+ } else if (dmin1 > 0.) {
+
+/* Late failure. Gives excellent shift. */
+
+ tau = (tau + *dmin__) * (1. - eps * 2.);
+ ttype += -11;
+ } else {
+
+/* Early failure. Divide by 4. */
+
+ tau *= .25;
+ ttype += -12;
+ }
+ goto L80;
+ } else if (*dmin__ != *dmin__) {
+
+/* NaN. */
+
+ tau = 0.;
+ goto L80;
+ } else {
+
+/* Possible underflow. Play it safe. */
+
+ goto L90;
+ }
+ }
+
+/* Risk of underflow. */
+
+L90:
+ dlasq6_(i0, n0, &z__[1], pp, dmin__, &dmin1, &dmin2, &dn, &dn1, &dn2);
+ *ndiv += *n0 - *i0 + 2;
+ ++(*iter);
+ tau = 0.;
+
+L100:
+ if (tau < *sigma) {
+ *desig += tau;
+ t = *sigma + *desig;
+ *desig -= t - *sigma;
+ } else {
+ t = *sigma + tau;
+ *desig = *sigma - (t - tau) + *desig;
+ }
+ *sigma = t;
+
+ return 0;
+
+/* End of DLASQ3 */
+
+} /* dlasq3_ */
+
+/* Subroutine */ int dlasq4_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, integer *n0in, doublereal *dmin__, doublereal *dmin1,
+ doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2,
+ doublereal *tau, integer *ttype)
+{
+ /* Initialized data */
+
+ static doublereal g = 0.;
+
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal s, a2, b1, b2;
+ static integer i4, nn, np;
+ static doublereal gam, gap1, gap2;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASQ4 computes an approximation TAU to the smallest eigenvalue
+ using values of d from the previous transform.
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) DOUBLE PRECISION array, dimension ( 4*N )
+ Z holds the qd array.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ NOIN (input) INTEGER
+ The value of N0 at start of EIGTEST.
+
+ DMIN (input) DOUBLE PRECISION
+ Minimum value of d.
+
+ DMIN1 (input) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ).
+
+ DMIN2 (input) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ) and D( N0-1 ).
+
+ DN (input) DOUBLE PRECISION
+ d(N)
+
+ DN1 (input) DOUBLE PRECISION
+ d(N-1)
+
+ DN2 (input) DOUBLE PRECISION
+ d(N-2)
+
+ TAU (output) DOUBLE PRECISION
+ This is the shift.
+
+ TTYPE (output) INTEGER
+ Shift type.
+
+ Further Details
+ ===============
+ CNST1 = 9/16
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+
+/*
+ A negative DMIN forces the shift to take that absolute value
+ TTYPE records the type of shift.
+*/
+
+ if (*dmin__ <= 0.) {
+ *tau = -(*dmin__);
+ *ttype = -1;
+ return 0;
+ }
+
+ nn = ((*n0) << (2)) + *pp;
+ if (*n0in == *n0) {
+
+/* No eigenvalues deflated. */
+
+ if (*dmin__ == *dn || *dmin__ == *dn1) {
+
+ b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]);
+ b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]);
+ a2 = z__[nn - 7] + z__[nn - 5];
+
+/* Cases 2 and 3. */
+
+ if ((*dmin__ == *dn && *dmin1 == *dn1)) {
+ gap2 = *dmin2 - a2 - *dmin2 * .25;
+ if ((gap2 > 0. && gap2 > b2)) {
+ gap1 = a2 - *dn - b2 / gap2 * b2;
+ } else {
+ gap1 = a2 - *dn - (b1 + b2);
+ }
+ if ((gap1 > 0. && gap1 > b1)) {
+/* Computing MAX */
+ d__1 = *dn - b1 / gap1 * b1, d__2 = *dmin__ * .5;
+ s = max(d__1,d__2);
+ *ttype = -2;
+ } else {
+ s = 0.;
+ if (*dn > b1) {
+ s = *dn - b1;
+ }
+ if (a2 > b1 + b2) {
+/* Computing MIN */
+ d__1 = s, d__2 = a2 - (b1 + b2);
+ s = min(d__1,d__2);
+ }
+/* Computing MAX */
+ d__1 = s, d__2 = *dmin__ * .333;
+ s = max(d__1,d__2);
+ *ttype = -3;
+ }
+ } else {
+
+/* Case 4. */
+
+ *ttype = -4;
+ s = *dmin__ * .25;
+ if (*dmin__ == *dn) {
+ gam = *dn;
+ a2 = 0.;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b2 = z__[nn - 5] / z__[nn - 7];
+ np = nn - 9;
+ } else {
+ np = nn - ((*pp) << (1));
+ b2 = z__[np - 2];
+ gam = *dn1;
+ if (z__[np - 4] > z__[np - 2]) {
+ return 0;
+ }
+ a2 = z__[np - 4] / z__[np - 2];
+ if (z__[nn - 9] > z__[nn - 11]) {
+ return 0;
+ }
+ b2 = z__[nn - 9] / z__[nn - 11];
+ np = nn - 13;
+ }
+
+/* Approximate contribution to norm squared from I < NN-1. */
+
+ a2 += b2;
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = np; i4 >= i__1; i4 += -4) {
+ if (b2 == 0.) {
+ goto L20;
+ }
+ b1 = b2;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b2 *= z__[i4] / z__[i4 - 2];
+ a2 += b2;
+ if (max(b2,b1) * 100. < a2 || .563 < a2) {
+ goto L20;
+ }
+/* L10: */
+ }
+L20:
+ a2 *= 1.05;
+
+/* Rayleigh quotient residual bound. */
+
+ if (a2 < .563) {
+ s = gam * (1. - sqrt(a2)) / (a2 + 1.);
+ }
+ }
+ } else if (*dmin__ == *dn2) {
+
+/* Case 5. */
+
+ *ttype = -5;
+ s = *dmin__ * .25;
+
+/* Compute contribution to norm squared from I > NN-2. */
+
+ np = nn - ((*pp) << (1));
+ b1 = z__[np - 2];
+ b2 = z__[np - 6];
+ gam = *dn2;
+ if (z__[np - 8] > b2 || z__[np - 4] > b1) {
+ return 0;
+ }
+ a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.);
+
+/* Approximate contribution to norm squared from I < NN-2. */
+
+ if (*n0 - *i0 > 2) {
+ b2 = z__[nn - 13] / z__[nn - 15];
+ a2 += b2;
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = nn - 17; i4 >= i__1; i4 += -4) {
+ if (b2 == 0.) {
+ goto L40;
+ }
+ b1 = b2;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b2 *= z__[i4] / z__[i4 - 2];
+ a2 += b2;
+ if (max(b2,b1) * 100. < a2 || .563 < a2) {
+ goto L40;
+ }
+/* L30: */
+ }
+L40:
+ a2 *= 1.05;
+ }
+
+ if (a2 < .563) {
+ s = gam * (1. - sqrt(a2)) / (a2 + 1.);
+ }
+ } else {
+
+/* Case 6, no information to guide us. */
+
+ if (*ttype == -6) {
+ g += (1. - g) * .333;
+ } else if (*ttype == -18) {
+ g = .083250000000000005;
+ } else {
+ g = .25;
+ }
+ s = g * *dmin__;
+ *ttype = -6;
+ }
+
+ } else if (*n0in == *n0 + 1) {
+
+/* One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. */
+
+ if ((*dmin1 == *dn1 && *dmin2 == *dn2)) {
+
+/* Cases 7 and 8. */
+
+ *ttype = -7;
+ s = *dmin1 * .333;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b1 = z__[nn - 5] / z__[nn - 7];
+ b2 = b1;
+ if (b2 == 0.) {
+ goto L60;
+ }
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = ((*n0) << (2)) - 9 + *pp; i4 >= i__1; i4 += -4) {
+ a2 = b1;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b1 *= z__[i4] / z__[i4 - 2];
+ b2 += b1;
+ if (max(b1,a2) * 100. < b2) {
+ goto L60;
+ }
+/* L50: */
+ }
+L60:
+ b2 = sqrt(b2 * 1.05);
+/* Computing 2nd power */
+ d__1 = b2;
+ a2 = *dmin1 / (d__1 * d__1 + 1.);
+ gap2 = *dmin2 * .5 - a2;
+ if ((gap2 > 0. && gap2 > b2 * a2)) {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2);
+ s = max(d__1,d__2);
+ } else {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - b2 * 1.01);
+ s = max(d__1,d__2);
+ *ttype = -8;
+ }
+ } else {
+
+/* Case 9. */
+
+ s = *dmin1 * .25;
+ if (*dmin1 == *dn1) {
+ s = *dmin1 * .5;
+ }
+ *ttype = -9;
+ }
+
+ } else if (*n0in == *n0 + 2) {
+
+/*
+ Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN.
+
+ Cases 10 and 11.
+*/
+
+ if ((*dmin2 == *dn2 && z__[nn - 5] * 2. < z__[nn - 7])) {
+ *ttype = -10;
+ s = *dmin2 * .333;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b1 = z__[nn - 5] / z__[nn - 7];
+ b2 = b1;
+ if (b2 == 0.) {
+ goto L80;
+ }
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = ((*n0) << (2)) - 9 + *pp; i4 >= i__1; i4 += -4) {
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b1 *= z__[i4] / z__[i4 - 2];
+ b2 += b1;
+ if (b1 * 100. < b2) {
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ b2 = sqrt(b2 * 1.05);
+/* Computing 2nd power */
+ d__1 = b2;
+ a2 = *dmin2 / (d__1 * d__1 + 1.);
+ gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[
+ nn - 9]) - a2;
+ if ((gap2 > 0. && gap2 > b2 * a2)) {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2);
+ s = max(d__1,d__2);
+ } else {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - b2 * 1.01);
+ s = max(d__1,d__2);
+ }
+ } else {
+ s = *dmin2 * .25;
+ *ttype = -11;
+ }
+ } else if (*n0in > *n0 + 2) {
+
+/* Case 12, more than two eigenvalues deflated. No information. */
+
+ s = 0.;
+ *ttype = -12;
+ }
+
+ *tau = s;
+ return 0;
+
+/* End of DLASQ4 */
+
+} /* dlasq4_ */
+
+/* Subroutine */ int dlasq5_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, doublereal *tau, doublereal *dmin__, doublereal *dmin1,
+ doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2,
+ logical *ieee)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static doublereal d__;
+ static integer j4, j4p2;
+ static doublereal emin, temp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ May 17, 2000
+
+
+ Purpose
+ =======
+
+ DLASQ5 computes one dqds transform in ping-pong form, one
+ version for IEEE machines another for non IEEE machines.
+
+ Arguments
+ =========
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) DOUBLE PRECISION array, dimension ( 4*N )
+ Z holds the qd array. EMIN is stored in Z(4*N0) to avoid
+ an extra argument.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ TAU (input) DOUBLE PRECISION
+ This is the shift.
+
+ DMIN (output) DOUBLE PRECISION
+ Minimum value of d.
+
+ DMIN1 (output) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ).
+
+ DMIN2 (output) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ) and D( N0-1 ).
+
+ DN (output) DOUBLE PRECISION
+ d(N0), the last value of d.
+
+ DNM1 (output) DOUBLE PRECISION
+ d(N0-1).
+
+ DNM2 (output) DOUBLE PRECISION
+ d(N0-2).
+
+ IEEE (input) LOGICAL
+ Flag for IEEE or non IEEE arithmetic.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*n0 - *i0 - 1 <= 0) {
+ return 0;
+ }
+
+ j4 = ((*i0) << (2)) + *pp - 3;
+ emin = z__[j4 + 4];
+ d__ = z__[j4] - *tau;
+ *dmin__ = d__;
+ *dmin1 = -z__[j4];
+
+ if (*ieee) {
+
+/* Code for IEEE arithmetic. */
+
+ if (*pp == 0) {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ temp = z__[j4 + 1] / z__[j4 - 2];
+ d__ = d__ * temp - *tau;
+ *dmin__ = min(*dmin__,d__);
+ z__[j4] = z__[j4 - 1] * temp;
+/* Computing MIN */
+ d__1 = z__[j4];
+ emin = min(d__1,emin);
+/* L10: */
+ }
+ } else {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ temp = z__[j4 + 2] / z__[j4 - 3];
+ d__ = d__ * temp - *tau;
+ *dmin__ = min(*dmin__,d__);
+ z__[j4 - 1] = z__[j4] * temp;
+/* Computing MIN */
+ d__1 = z__[j4 - 1];
+ emin = min(d__1,emin);
+/* L20: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = ((*n0 - 2) << (2)) - *pp;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
+ *dmin__ = min(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
+ *dmin__ = min(*dmin__,*dn);
+
+ } else {
+
+/* Code for non IEEE arithmetic. */
+
+ if (*pp == 0) {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ if (d__ < 0.) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
+ d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4];
+ emin = min(d__1,d__2);
+/* L30: */
+ }
+ } else {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ if (d__ < 0.) {
+ return 0;
+ } else {
+ z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
+ d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau;
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4 - 1];
+ emin = min(d__1,d__2);
+/* L40: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = ((*n0 - 2) << (2)) - *pp;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ if (*dnm2 < 0.) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = min(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ if (*dnm1 < 0.) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = min(*dmin__,*dn);
+
+ }
+
+ z__[j4 + 2] = *dn;
+ z__[((*n0) << (2)) - *pp] = emin;
+ return 0;
+
+/* End of DLASQ5 */
+
+} /* dlasq5_ */
+
+/* Subroutine */ int dlasq6_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2,
+ doublereal *dn, doublereal *dnm1, doublereal *dnm2)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static doublereal d__;
+ static integer j4, j4p2;
+ static doublereal emin, temp;
+
+ static doublereal safmin;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASQ6 computes one dqd (shift equal to zero) transform in
+ ping-pong form, with protection against underflow and overflow.
+
+ Arguments
+ =========
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) DOUBLE PRECISION array, dimension ( 4*N )
+ Z holds the qd array. EMIN is stored in Z(4*N0) to avoid
+ an extra argument.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ DMIN (output) DOUBLE PRECISION
+ Minimum value of d.
+
+ DMIN1 (output) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ).
+
+ DMIN2 (output) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ) and D( N0-1 ).
+
+ DN (output) DOUBLE PRECISION
+ d(N0), the last value of d.
+
+ DNM1 (output) DOUBLE PRECISION
+ d(N0-1).
+
+ DNM2 (output) DOUBLE PRECISION
+ d(N0-2).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*n0 - *i0 - 1 <= 0) {
+ return 0;
+ }
+
+ safmin = SAFEMINIMUM;
+ j4 = ((*i0) << (2)) + *pp - 3;
+ emin = z__[j4 + 4];
+ d__ = z__[j4];
+ *dmin__ = d__;
+
+ if (*pp == 0) {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ if (z__[j4 - 2] == 0.) {
+ z__[j4] = 0.;
+ d__ = z__[j4 + 1];
+ *dmin__ = d__;
+ emin = 0.;
+ } else if ((safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4
+ - 2] < z__[j4 + 1])) {
+ temp = z__[j4 + 1] / z__[j4 - 2];
+ z__[j4] = z__[j4 - 1] * temp;
+ d__ *= temp;
+ } else {
+ z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
+ d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]);
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4];
+ emin = min(d__1,d__2);
+/* L10: */
+ }
+ } else {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ if (z__[j4 - 3] == 0.) {
+ z__[j4 - 1] = 0.;
+ d__ = z__[j4 + 2];
+ *dmin__ = d__;
+ emin = 0.;
+ } else if ((safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4
+ - 3] < z__[j4 + 2])) {
+ temp = z__[j4 + 2] / z__[j4 - 3];
+ z__[j4 - 1] = z__[j4] * temp;
+ d__ *= temp;
+ } else {
+ z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
+ d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]);
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4 - 1];
+ emin = min(d__1,d__2);
+/* L20: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = ((*n0 - 2) << (2)) - *pp;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ if (z__[j4 - 2] == 0.) {
+ z__[j4] = 0.;
+ *dnm1 = z__[j4p2 + 2];
+ *dmin__ = *dnm1;
+ emin = 0.;
+ } else if ((safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
+ z__[j4p2 + 2])) {
+ temp = z__[j4p2 + 2] / z__[j4 - 2];
+ z__[j4] = z__[j4p2] * temp;
+ *dnm1 = *dnm2 * temp;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]);
+ }
+ *dmin__ = min(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ if (z__[j4 - 2] == 0.) {
+ z__[j4] = 0.;
+ *dn = z__[j4p2 + 2];
+ *dmin__ = *dn;
+ emin = 0.;
+ } else if ((safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
+ z__[j4p2 + 2])) {
+ temp = z__[j4p2 + 2] / z__[j4 - 2];
+ z__[j4] = z__[j4p2] * temp;
+ *dn = *dnm1 * temp;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]);
+ }
+ *dmin__ = min(*dmin__,*dn);
+
+ z__[j4 + 2] = *dn;
+ z__[((*n0) << (2)) - *pp] = emin;
+ return 0;
+
+/* End of DLASQ6 */
+
+} /* dlasq6_ */
+
+/* Subroutine */ int dlasr_(char *side, char *pivot, char *direct, integer *m,
+ integer *n, doublereal *c__, doublereal *s, doublereal *a, integer *
+ lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, info;
+ static doublereal temp;
+ extern logical lsame_(char *, char *);
+ static doublereal ctemp, stemp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLASR performs the transformation
+
+ A := P*A, when SIDE = 'L' or 'l' ( Left-hand side )
+
+ A := A*P', when SIDE = 'R' or 'r' ( Right-hand side )
+
+ where A is an m by n real matrix and P is an orthogonal matrix,
+ consisting of a sequence of plane rotations determined by the
+ parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l'
+ and z = n when SIDE = 'R' or 'r' ):
+
+ When DIRECT = 'F' or 'f' ( Forward sequence ) then
+
+ P = P( z - 1 )*...*P( 2 )*P( 1 ),
+
+ and when DIRECT = 'B' or 'b' ( Backward sequence ) then
+
+ P = P( 1 )*P( 2 )*...*P( z - 1 ),
+
+ where P( k ) is a plane rotation matrix for the following planes:
+
+ when PIVOT = 'V' or 'v' ( Variable pivot ),
+ the plane ( k, k + 1 )
+
+ when PIVOT = 'T' or 't' ( Top pivot ),
+ the plane ( 1, k + 1 )
+
+ when PIVOT = 'B' or 'b' ( Bottom pivot ),
+ the plane ( k, z )
+
+ c( k ) and s( k ) must contain the cosine and sine that define the
+ matrix P( k ). The two by two plane rotation part of the matrix
+ P( k ), R( k ), is assumed to be of the form
+
+ R( k ) = ( c( k ) s( k ) ).
+ ( -s( k ) c( k ) )
+
+ This version vectorises across rows of the array A when SIDE = 'L'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ Specifies whether the plane rotation matrix P is applied to
+ A on the left or the right.
+ = 'L': Left, compute A := P*A
+ = 'R': Right, compute A:= A*P'
+
+ DIRECT (input) CHARACTER*1
+ Specifies whether P is a forward or backward sequence of
+ plane rotations.
+ = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 )
+ = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 )
+
+ PIVOT (input) CHARACTER*1
+ Specifies the plane for which P(k) is a plane rotation
+ matrix.
+ = 'V': Variable pivot, the plane (k,k+1)
+ = 'T': Top pivot, the plane (1,k+1)
+ = 'B': Bottom pivot, the plane (k,z)
+
+ M (input) INTEGER
+ The number of rows of the matrix A. If m <= 1, an immediate
+ return is effected.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. If n <= 1, an
+ immediate return is effected.
+
+ C, S (input) DOUBLE PRECISION arrays, dimension
+ (M-1) if SIDE = 'L'
+ (N-1) if SIDE = 'R'
+ c(k) and s(k) contain the cosine and sine that define the
+ matrix P(k). The two by two plane rotation part of the
+ matrix P(k), R(k), is assumed to be of the form
+ R( k ) = ( c( k ) s( k ) ).
+ ( -s( k ) c( k ) )
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ The m by n matrix A. On exit, A is overwritten by P*A if
+ SIDE = 'R' or by A*P' if SIDE = 'L'.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --c__;
+ --s;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! (lsame_(side, "L") || lsame_(side, "R"))) {
+ info = 1;
+ } else if (! (lsame_(pivot, "V") || lsame_(pivot,
+ "T") || lsame_(pivot, "B"))) {
+ info = 2;
+ } else if (! (lsame_(direct, "F") || lsame_(direct,
+ "B"))) {
+ info = 3;
+ } else if (*m < 0) {
+ info = 4;
+ } else if (*n < 0) {
+ info = 5;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DLASR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form P * A */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + 1 + i__ * a_dim1];
+ a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
+ a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
+ + i__ * a_dim1];
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + 1 + i__ * a_dim1];
+ a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
+ a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
+ + i__ * a_dim1];
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
+ i__ * a_dim1 + 1];
+ a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
+ i__ * a_dim1 + 1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
+ i__ * a_dim1 + 1];
+ a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
+ i__ * a_dim1 + 1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
+ + ctemp * temp;
+ a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
+ a_dim1] - stemp * temp;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
+ + ctemp * temp;
+ a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
+ a_dim1] - stemp * temp;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ } else if (lsame_(side, "R")) {
+
+/* Form A * P' */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + (j + 1) * a_dim1];
+ a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
+ a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
+ i__ + j * a_dim1];
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + (j + 1) * a_dim1];
+ a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
+ a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
+ i__ + j * a_dim1];
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
+ i__ + a_dim1];
+ a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
+ a_dim1];
+/* L170: */
+ }
+ }
+/* L180: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
+ i__ + a_dim1];
+ a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
+ a_dim1];
+/* L190: */
+ }
+ }
+/* L200: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
+ + ctemp * temp;
+ a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
+ a_dim1] - stemp * temp;
+/* L210: */
+ }
+ }
+/* L220: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
+ + ctemp * temp;
+ a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
+ a_dim1] - stemp * temp;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DLASR */
+
+} /* dlasr_ */
+
+/* Subroutine */ int dlasrt_(char *id, integer *n, doublereal *d__, integer *
+ info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal d1, d2, d3;
+ static integer dir;
+ static doublereal tmp;
+ static integer endd;
+ extern logical lsame_(char *, char *);
+ static integer stack[64] /* was [2][32] */;
+ static doublereal dmnmx;
+ static integer start;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static integer stkpnt;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ Sort the numbers in D in increasing order (if ID = 'I') or
+ in decreasing order (if ID = 'D' ).
+
+ Use Quick Sort, reverting to Insertion sort on arrays of
+ size <= 20. Dimension of STACK limits N to about 2**32.
+
+ Arguments
+ =========
+
+ ID (input) CHARACTER*1
+ = 'I': sort D in increasing order;
+ = 'D': sort D in decreasing order.
+
+ N (input) INTEGER
+ The length of the array D.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the array to be sorted.
+ On exit, D has been sorted into increasing order
+ (D(1) <= ... <= D(N) ) or into decreasing order
+ (D(1) >= ... >= D(N) ), depending on ID.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input paramters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ dir = -1;
+ if (lsame_(id, "D")) {
+ dir = 0;
+ } else if (lsame_(id, "I")) {
+ dir = 1;
+ }
+ if (dir == -1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASRT", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+ stkpnt = 1;
+ stack[0] = 1;
+ stack[1] = *n;
+L10:
+ start = stack[((stkpnt) << (1)) - 2];
+ endd = stack[((stkpnt) << (1)) - 1];
+ --stkpnt;
+ if ((endd - start <= 20 && endd - start > 0)) {
+
+/* Do Insertion sort on D( START:ENDD ) */
+
+ if (dir == 0) {
+
+/* Sort into decreasing order */
+
+ i__1 = endd;
+ for (i__ = start + 1; i__ <= i__1; ++i__) {
+ i__2 = start + 1;
+ for (j = i__; j >= i__2; --j) {
+ if (d__[j] > d__[j - 1]) {
+ dmnmx = d__[j];
+ d__[j] = d__[j - 1];
+ d__[j - 1] = dmnmx;
+ } else {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ ;
+ }
+
+ } else {
+
+/* Sort into increasing order */
+
+ i__1 = endd;
+ for (i__ = start + 1; i__ <= i__1; ++i__) {
+ i__2 = start + 1;
+ for (j = i__; j >= i__2; --j) {
+ if (d__[j] < d__[j - 1]) {
+ dmnmx = d__[j];
+ d__[j] = d__[j - 1];
+ d__[j - 1] = dmnmx;
+ } else {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ ;
+ }
+
+ }
+
+ } else if (endd - start > 20) {
+
+/*
+ Partition D( START:ENDD ) and stack parts, largest one first
+
+ Choose partition entry as median of 3
+*/
+
+ d1 = d__[start];
+ d2 = d__[endd];
+ i__ = (start + endd) / 2;
+ d3 = d__[i__];
+ if (d1 < d2) {
+ if (d3 < d1) {
+ dmnmx = d1;
+ } else if (d3 < d2) {
+ dmnmx = d3;
+ } else {
+ dmnmx = d2;
+ }
+ } else {
+ if (d3 < d2) {
+ dmnmx = d2;
+ } else if (d3 < d1) {
+ dmnmx = d3;
+ } else {
+ dmnmx = d1;
+ }
+ }
+
+ if (dir == 0) {
+
+/* Sort into decreasing order */
+
+ i__ = start - 1;
+ j = endd + 1;
+L60:
+L70:
+ --j;
+ if (d__[j] < dmnmx) {
+ goto L70;
+ }
+L80:
+ ++i__;
+ if (d__[i__] > dmnmx) {
+ goto L80;
+ }
+ if (i__ < j) {
+ tmp = d__[i__];
+ d__[i__] = d__[j];
+ d__[j] = tmp;
+ goto L60;
+ }
+ if (j - start > endd - j - 1) {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ } else {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ }
+ } else {
+
+/* Sort into increasing order */
+
+ i__ = start - 1;
+ j = endd + 1;
+L90:
+L100:
+ --j;
+ if (d__[j] > dmnmx) {
+ goto L100;
+ }
+L110:
+ ++i__;
+ if (d__[i__] < dmnmx) {
+ goto L110;
+ }
+ if (i__ < j) {
+ tmp = d__[i__];
+ d__[i__] = d__[j];
+ d__[j] = tmp;
+ goto L90;
+ }
+ if (j - start > endd - j - 1) {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ } else {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ }
+ }
+ }
+ if (stkpnt > 0) {
+ goto L10;
+ }
+ return 0;
+
+/* End of DLASRT */
+
+} /* dlasrt_ */
+
+/* Subroutine */ int dlassq_(integer *n, doublereal *x, integer *incx,
+ doublereal *scale, doublereal *sumsq)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer ix;
+ static doublereal absxi;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASSQ returns the values scl and smsq such that
+
+ ( scl**2 )*smsq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq,
+
+ where x( i ) = X( 1 + ( i - 1 )*INCX ). The value of sumsq is
+ assumed to be non-negative and scl returns the value
+
+ scl = max( scale, abs( x( i ) ) ).
+
+ scale and sumsq must be supplied in SCALE and SUMSQ and
+ scl and smsq are overwritten on SCALE and SUMSQ respectively.
+
+ The routine makes only one pass through the vector x.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of elements to be used from the vector X.
+
+ X (input) DOUBLE PRECISION array, dimension (N)
+ The vector for which a scaled sum of squares is computed.
+ x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n.
+
+ INCX (input) INTEGER
+ The increment between successive values of the vector X.
+ INCX > 0.
+
+ SCALE (input/output) DOUBLE PRECISION
+ On entry, the value scale in the equation above.
+ On exit, SCALE is overwritten with scl , the scaling factor
+ for the sum of squares.
+
+ SUMSQ (input/output) DOUBLE PRECISION
+ On entry, the value sumsq in the equation above.
+ On exit, SUMSQ is overwritten with smsq , the basic sum of
+ squares from which scl has been factored out.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n > 0) {
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ if (x[ix] != 0.) {
+ absxi = (d__1 = x[ix], abs(d__1));
+ if (*scale < absxi) {
+/* Computing 2nd power */
+ d__1 = *scale / absxi;
+ *sumsq = *sumsq * (d__1 * d__1) + 1;
+ *scale = absxi;
+ } else {
+/* Computing 2nd power */
+ d__1 = absxi / *scale;
+ *sumsq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ }
+ return 0;
+
+/* End of DLASSQ */
+
+} /* dlassq_ */
+
+/* Subroutine */ int dlasv2_(doublereal *f, doublereal *g, doublereal *h__,
+ doublereal *ssmin, doublereal *ssmax, doublereal *snr, doublereal *
+ csr, doublereal *snl, doublereal *csl)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static doublereal a, d__, l, m, r__, s, t, fa, ga, ha, ft, gt, ht, mm, tt,
+ clt, crt, slt, srt;
+ static integer pmax;
+ static doublereal temp;
+ static logical swap;
+ static doublereal tsign;
+
+ static logical gasmal;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLASV2 computes the singular value decomposition of a 2-by-2
+ triangular matrix
+ [ F G ]
+ [ 0 H ].
+ On return, abs(SSMAX) is the larger singular value, abs(SSMIN) is the
+ smaller singular value, and (CSL,SNL) and (CSR,SNR) are the left and
+ right singular vectors for abs(SSMAX), giving the decomposition
+
+ [ CSL SNL ] [ F G ] [ CSR -SNR ] = [ SSMAX 0 ]
+ [-SNL CSL ] [ 0 H ] [ SNR CSR ] [ 0 SSMIN ].
+
+ Arguments
+ =========
+
+ F (input) DOUBLE PRECISION
+ The (1,1) element of the 2-by-2 matrix.
+
+ G (input) DOUBLE PRECISION
+ The (1,2) element of the 2-by-2 matrix.
+
+ H (input) DOUBLE PRECISION
+ The (2,2) element of the 2-by-2 matrix.
+
+ SSMIN (output) DOUBLE PRECISION
+ abs(SSMIN) is the smaller singular value.
+
+ SSMAX (output) DOUBLE PRECISION
+ abs(SSMAX) is the larger singular value.
+
+ SNL (output) DOUBLE PRECISION
+ CSL (output) DOUBLE PRECISION
+ The vector (CSL, SNL) is a unit left singular vector for the
+ singular value abs(SSMAX).
+
+ SNR (output) DOUBLE PRECISION
+ CSR (output) DOUBLE PRECISION
+ The vector (CSR, SNR) is a unit right singular vector for the
+ singular value abs(SSMAX).
+
+ Further Details
+ ===============
+
+ Any input parameter may be aliased with any output parameter.
+
+ Barring over/underflow and assuming a guard digit in subtraction, all
+ output quantities are correct to within a few units in the last
+ place (ulps).
+
+ In IEEE arithmetic, the code works correctly if one matrix element is
+ infinite.
+
+ Overflow will not occur unless the largest singular value itself
+ overflows or is within a few ulps of overflow. (On machines with
+ partial overflow, like the Cray, overflow may occur if the largest
+ singular value is within a factor of 2 of overflow.)
+
+ Underflow is harmless if underflow is gradual. Otherwise, results
+ may correspond to a matrix modified by perturbations of size near
+ the underflow threshold.
+
+ =====================================================================
+*/
+
+
+ ft = *f;
+ fa = abs(ft);
+ ht = *h__;
+ ha = abs(*h__);
+
+/*
+ PMAX points to the maximum absolute element of matrix
+ PMAX = 1 if F largest in absolute values
+ PMAX = 2 if G largest in absolute values
+ PMAX = 3 if H largest in absolute values
+*/
+
+ pmax = 1;
+ swap = ha > fa;
+ if (swap) {
+ pmax = 3;
+ temp = ft;
+ ft = ht;
+ ht = temp;
+ temp = fa;
+ fa = ha;
+ ha = temp;
+
+/* Now FA .ge. HA */
+
+ }
+ gt = *g;
+ ga = abs(gt);
+ if (ga == 0.) {
+
+/* Diagonal matrix */
+
+ *ssmin = ha;
+ *ssmax = fa;
+ clt = 1.;
+ crt = 1.;
+ slt = 0.;
+ srt = 0.;
+ } else {
+ gasmal = TRUE_;
+ if (ga > fa) {
+ pmax = 2;
+ if (fa / ga < EPSILON) {
+
+/* Case of very large GA */
+
+ gasmal = FALSE_;
+ *ssmax = ga;
+ if (ha > 1.) {
+ *ssmin = fa / (ga / ha);
+ } else {
+ *ssmin = fa / ga * ha;
+ }
+ clt = 1.;
+ slt = ht / gt;
+ srt = 1.;
+ crt = ft / gt;
+ }
+ }
+ if (gasmal) {
+
+/* Normal case */
+
+ d__ = fa - ha;
+ if (d__ == fa) {
+
+/* Copes with infinite F or H */
+
+ l = 1.;
+ } else {
+ l = d__ / fa;
+ }
+
+/* Note that 0 .le. L .le. 1 */
+
+ m = gt / ft;
+
+/* Note that abs(M) .le. 1/macheps */
+
+ t = 2. - l;
+
+/* Note that T .ge. 1 */
+
+ mm = m * m;
+ tt = t * t;
+ s = sqrt(tt + mm);
+
+/* Note that 1 .le. S .le. 1 + 1/macheps */
+
+ if (l == 0.) {
+ r__ = abs(m);
+ } else {
+ r__ = sqrt(l * l + mm);
+ }
+
+/* Note that 0 .le. R .le. 1 + 1/macheps */
+
+ a = (s + r__) * .5;
+
+/* Note that 1 .le. A .le. 1 + abs(M) */
+
+ *ssmin = ha / a;
+ *ssmax = fa * a;
+ if (mm == 0.) {
+
+/* Note that M is very tiny */
+
+ if (l == 0.) {
+ t = d_sign(&c_b2804, &ft) * d_sign(&c_b15, &gt);
+ } else {
+ t = gt / d_sign(&d__, &ft) + m / t;
+ }
+ } else {
+ t = (m / (s + t) + m / (r__ + l)) * (a + 1.);
+ }
+ l = sqrt(t * t + 4.);
+ crt = 2. / l;
+ srt = t / l;
+ clt = (crt + srt * m) / a;
+ slt = ht / ft * srt / a;
+ }
+ }
+ if (swap) {
+ *csl = srt;
+ *snl = crt;
+ *csr = slt;
+ *snr = clt;
+ } else {
+ *csl = clt;
+ *snl = slt;
+ *csr = crt;
+ *snr = srt;
+ }
+
+/* Correct signs of SSMAX and SSMIN */
+
+ if (pmax == 1) {
+ tsign = d_sign(&c_b15, csr) * d_sign(&c_b15, csl) * d_sign(&c_b15, f);
+ }
+ if (pmax == 2) {
+ tsign = d_sign(&c_b15, snr) * d_sign(&c_b15, csl) * d_sign(&c_b15, g);
+ }
+ if (pmax == 3) {
+ tsign = d_sign(&c_b15, snr) * d_sign(&c_b15, snl) * d_sign(&c_b15,
+ h__);
+ }
+ *ssmax = d_sign(ssmax, &tsign);
+ d__1 = tsign * d_sign(&c_b15, f) * d_sign(&c_b15, h__);
+ *ssmin = d_sign(ssmin, &d__1);
+ return 0;
+
+/* End of DLASV2 */
+
+} /* dlasv2_ */
+
+/* Subroutine */ int dlaswp_(integer *n, doublereal *a, integer *lda, integer
+ *k1, integer *k2, integer *ipiv, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
+ static doublereal temp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASWP performs a series of row interchanges on the matrix A.
+ One row interchange is initiated for each of rows K1 through K2 of A.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of columns of the matrix A.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the matrix of column dimension N to which the row
+ interchanges will be applied.
+ On exit, the permuted matrix.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+
+ K1 (input) INTEGER
+ The first element of IPIV for which a row interchange will
+ be done.
+
+ K2 (input) INTEGER
+ The last element of IPIV for which a row interchange will
+ be done.
+
+ IPIV (input) INTEGER array, dimension (M*abs(INCX))
+ The vector of pivot indices. Only the elements in positions
+ K1 through K2 of IPIV are accessed.
+ IPIV(K) = L implies rows K and L are to be interchanged.
+
+ INCX (input) INTEGER
+ The increment between successive values of IPIV. If IPIV
+ is negative, the pivots are applied in reverse order.
+
+ Further Details
+ ===============
+
+ Modified by
+ R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA
+
+ =====================================================================
+
+
+ Interchange row I with row IPIV(I) for each of rows K1 through K2.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ if (*incx > 0) {
+ ix0 = *k1;
+ i1 = *k1;
+ i2 = *k2;
+ inc = 1;
+ } else if (*incx < 0) {
+ ix0 = (1 - *k2) * *incx + 1;
+ i1 = *k2;
+ i2 = *k1;
+ inc = -1;
+ } else {
+ return 0;
+ }
+
+ n32 = (*n / 32) << (5);
+ if (n32 != 0) {
+ i__1 = n32;
+ for (j = 1; j <= i__1; j += 32) {
+ ix = ix0;
+ i__2 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__4 = j + 31;
+ for (k = j; k <= i__4; ++k) {
+ temp = a[i__ + k * a_dim1];
+ a[i__ + k * a_dim1] = a[ip + k * a_dim1];
+ a[ip + k * a_dim1] = temp;
+/* L10: */
+ }
+ }
+ ix += *incx;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ if (n32 != *n) {
+ ++n32;
+ ix = ix0;
+ i__1 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__2 = *n;
+ for (k = n32; k <= i__2; ++k) {
+ temp = a[i__ + k * a_dim1];
+ a[i__ + k * a_dim1] = a[ip + k * a_dim1];
+ a[ip + k * a_dim1] = temp;
+/* L40: */
+ }
+ }
+ ix += *incx;
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of DLASWP */
+
+} /* dlaswp_ */
+
+/* Subroutine */ int dlatrd_(char *uplo, integer *n, integer *nb, doublereal *
+ a, integer *lda, doublereal *e, doublereal *tau, doublereal *w,
+ integer *ldw)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, iw;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static doublereal alpha;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), daxpy_(integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *),
+ dsymv_(char *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLATRD reduces NB rows and columns of a real symmetric matrix A to
+ symmetric tridiagonal form by an orthogonal similarity
+ transformation Q' * A * Q, and returns the matrices V and W which are
+ needed to apply the transformation to the unreduced part of A.
+
+ If UPLO = 'U', DLATRD reduces the last NB rows and columns of a
+ matrix, of which the upper triangle is supplied;
+ if UPLO = 'L', DLATRD reduces the first NB rows and columns of a
+ matrix, of which the lower triangle is supplied.
+
+ This is an auxiliary routine called by DSYTRD.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is stored:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A.
+
+ NB (input) INTEGER
+ The number of rows and columns to be reduced.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ n-by-n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n-by-n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit:
+ if UPLO = 'U', the last NB columns have been reduced to
+ tridiagonal form, with the diagonal elements overwriting
+ the diagonal elements of A; the elements above the diagonal
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors;
+ if UPLO = 'L', the first NB columns have been reduced to
+ tridiagonal form, with the diagonal elements overwriting
+ the diagonal elements of A; the elements below the diagonal
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= (1,N).
+
+ E (output) DOUBLE PRECISION array, dimension (N-1)
+ If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal
+ elements of the last NB columns of the reduced matrix;
+ if UPLO = 'L', E(1:nb) contains the subdiagonal elements of
+ the first NB columns of the reduced matrix.
+
+ TAU (output) DOUBLE PRECISION array, dimension (N-1)
+ The scalar factors of the elementary reflectors, stored in
+ TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'.
+ See Further Details.
+
+ W (output) DOUBLE PRECISION array, dimension (LDW,NB)
+ The n-by-nb matrix W required to update the unreduced part
+ of A.
+
+ LDW (input) INTEGER
+ The leading dimension of the array W. LDW >= max(1,N).
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n) H(n-1) . . . H(n-nb+1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i),
+ and tau in TAU(i-1).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(nb).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i),
+ and tau in TAU(i).
+
+ The elements of the vectors v together form the n-by-nb matrix V
+ which is needed, with W, to apply the transformation to the unreduced
+ part of the matrix, using a symmetric rank-2k update of the form:
+ A := A - V*W' - W*V'.
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5 and nb = 2:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( a a a v4 v5 ) ( d )
+ ( a a v4 v5 ) ( 1 d )
+ ( a 1 v5 ) ( v1 1 a )
+ ( d 1 ) ( v1 v2 a a )
+ ( d ) ( v1 v2 a a a )
+
+ where d denotes a diagonal element of the reduced matrix, a denotes
+ an element of the original matrix that is unchanged, and vi denotes
+ an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --e;
+ --tau;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1 * 1;
+ w -= w_offset;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(uplo, "U")) {
+
+/* Reduce last NB columns of upper triangle */
+
+ i__1 = *n - *nb + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ iw = i__ - *n + *nb;
+ if (i__ < *n) {
+
+/* Update A(1:i,i) */
+
+ i__2 = *n - i__;
+ dgemv_("No transpose", &i__, &i__2, &c_b151, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
+ c_b15, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ dgemv_("No transpose", &i__, &i__2, &c_b151, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b15, &a[i__ * a_dim1 + 1], &c__1);
+ }
+ if (i__ > 1) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(1:i-2,i)
+*/
+
+ i__2 = i__ - 1;
+ dlarfg_(&i__2, &a[i__ - 1 + i__ * a_dim1], &a[i__ * a_dim1 +
+ 1], &c__1, &tau[i__ - 1]);
+ e[i__ - 1] = a[i__ - 1 + i__ * a_dim1];
+ a[i__ - 1 + i__ * a_dim1] = 1.;
+
+/* Compute W(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ dsymv_("Upper", &i__2, &c_b15, &a[a_offset], lda, &a[i__ *
+ a_dim1 + 1], &c__1, &c_b29, &w[iw * w_dim1 + 1], &
+ c__1);
+ if (i__ < *n) {
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &c__1, &
+ c_b29, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1)
+ * a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b15, &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, &
+ c_b29, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[(iw + 1)
+ * w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b15, &w[iw * w_dim1 + 1], &c__1);
+ }
+ i__2 = i__ - 1;
+ dscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ alpha = tau[i__ - 1] * -.5 * ddot_(&i__2, &w[iw * w_dim1 + 1],
+ &c__1, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ daxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw *
+ w_dim1 + 1], &c__1);
+ }
+
+/* L10: */
+ }
+ } else {
+
+/* Reduce first NB columns of lower triangle */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:n,i) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + a_dim1],
+ lda, &w[i__ + w_dim1], ldw, &c_b15, &a[i__ + i__ * a_dim1]
+ , &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[i__ + w_dim1],
+ ldw, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + i__ * a_dim1]
+ , &c__1);
+ if (i__ < *n) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(i+2:n,i)
+*/
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) +
+ i__ * a_dim1], &c__1, &tau[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Compute W(i+1:n,i) */
+
+ i__2 = *n - i__;
+ dsymv_("Lower", &i__2, &c_b15, &a[i__ + 1 + (i__ + 1) *
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b29, &w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &w[i__ + 1 + w_dim1]
+ , ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &w[
+ i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 +
+ a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b15, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + a_dim1]
+ , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &w[
+ i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[i__ + 1 +
+ w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b15, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ dscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ alpha = tau[i__] * -.5 * ddot_(&i__2, &w[i__ + 1 + i__ *
+ w_dim1], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ }
+
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DLATRD */
+
+} /* dlatrd_ */
+
+/* Subroutine */ int dorg2r_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dlarf_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DORG2R generates an m by n real matrix Q with orthonormal columns,
+ which is defined as the first n columns of a product of k elementary
+ reflectors of order m
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by DGEQRF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. M >= N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. N >= K >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the i-th column must contain the vector which
+ defines the elementary reflector H(i), for i = 1,2,...,k, as
+ returned by DGEQRF in the first k columns of its array
+ argument A.
+ On exit, the m-by-n matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQRF.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORG2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns k+1:n to columns of the unit matrix */
+
+ i__1 = *n;
+ for (j = *k + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L10: */
+ }
+ a[j + j * a_dim1] = 1.;
+/* L20: */
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the left */
+
+ if (i__ < *n) {
+ a[i__ + i__ * a_dim1] = 1.;
+ i__1 = *m - i__ + 1;
+ i__2 = *n - i__;
+ dlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ if (i__ < *m) {
+ i__1 = *m - i__;
+ d__1 = -tau[i__];
+ dscal_(&i__1, &d__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ }
+ a[i__ + i__ * a_dim1] = 1. - tau[i__];
+
+/* Set A(1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ a[l + i__ * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of DORG2R */
+
+} /* dorg2r_ */
+
+/* Subroutine */ int dorgbr_(char *vect, integer *m, integer *n, integer *k,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, nb, mn;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ static logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dorglq_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dorgqr_(integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORGBR generates one of the real orthogonal matrices Q or P**T
+ determined by DGEBRD when reducing a real matrix A to bidiagonal
+ form: A = Q * B * P**T. Q and P**T are defined as products of
+ elementary reflectors H(i) or G(i) respectively.
+
+ If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q
+ is of order M:
+ if m >= k, Q = H(1) H(2) . . . H(k) and DORGBR returns the first n
+ columns of Q, where m >= n >= k;
+ if m < k, Q = H(1) H(2) . . . H(m-1) and DORGBR returns Q as an
+ M-by-M matrix.
+
+ If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**T
+ is of order N:
+ if k < n, P**T = G(k) . . . G(2) G(1) and DORGBR returns the first m
+ rows of P**T, where n >= m >= k;
+ if k >= n, P**T = G(n-1) . . . G(2) G(1) and DORGBR returns P**T as
+ an N-by-N matrix.
+
+ Arguments
+ =========
+
+ VECT (input) CHARACTER*1
+ Specifies whether the matrix Q or the matrix P**T is
+ required, as defined in the transformation applied by DGEBRD:
+ = 'Q': generate Q;
+ = 'P': generate P**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix Q or P**T to be returned.
+ M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q or P**T to be returned.
+ N >= 0.
+ If VECT = 'Q', M >= N >= min(M,K);
+ if VECT = 'P', N >= M >= min(N,K).
+
+ K (input) INTEGER
+ If VECT = 'Q', the number of columns in the original M-by-K
+ matrix reduced by DGEBRD.
+ If VECT = 'P', the number of rows in the original K-by-N
+ matrix reduced by DGEBRD.
+ K >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the vectors which define the elementary reflectors,
+ as returned by DGEBRD.
+ On exit, the M-by-N matrix Q or P**T.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) DOUBLE PRECISION array, dimension
+ (min(M,K)) if VECT = 'Q'
+ (min(N,K)) if VECT = 'P'
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i) or G(i), which determines Q or P**T, as
+ returned by DGEBRD in its array argument TAUQ or TAUP.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,min(M,N)).
+ For optimum performance LWORK >= min(M,N)*NB, where NB
+ is the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(vect, "Q");
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if ((! wantq && ! lsame_(vect, "P"))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0 || (wantq && (*n > *m || *n < min(*m,*k))) || (! wantq
+ && (*m > *n || *m < min(*n,*k)))) {
+ *info = -3;
+ } else if (*k < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if ((*lwork < max(1,mn) && ! lquery)) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (wantq) {
+ nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ } else {
+ nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ }
+ lwkopt = max(1,mn) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (wantq) {
+
+/*
+ Form Q, determined by a call to DGEBRD to reduce an m-by-k
+ matrix
+*/
+
+ if (*m >= *k) {
+
+/* If m >= k, assume m >= n >= k */
+
+ dorgqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/*
+ If m < k, assume m = n
+
+ Shift the vectors which define the elementary reflectors one
+ column to the right, and set the first row and column of Q
+ to those of the unit matrix
+*/
+
+ for (j = *m; j >= 2; --j) {
+ a[j * a_dim1 + 1] = 0.;
+ i__1 = *m;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ a[a_dim1 + 1] = 1.;
+ i__1 = *m;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.;
+/* L30: */
+ }
+ if (*m > 1) {
+
+/* Form Q(2:m,2:m) */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dorgqr_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
+ tau[1], &work[1], lwork, &iinfo);
+ }
+ }
+ } else {
+
+/*
+ Form P', determined by a call to DGEBRD to reduce a k-by-n
+ matrix
+*/
+
+ if (*k < *n) {
+
+/* If k < n, assume k <= m <= n */
+
+ dorglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/*
+ If k >= n, assume m = n
+
+ Shift the vectors which define the elementary reflectors one
+ row downward, and set the first row and column of P' to
+ those of the unit matrix
+*/
+
+ a[a_dim1 + 1] = 1.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.;
+/* L40: */
+ }
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ for (i__ = j - 1; i__ >= 2; --i__) {
+ a[i__ + j * a_dim1] = a[i__ - 1 + j * a_dim1];
+/* L50: */
+ }
+ a[j * a_dim1 + 1] = 0.;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Form P'(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dorglq_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
+ tau[1], &work[1], lwork, &iinfo);
+ }
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORGBR */
+
+} /* dorgbr_ */
+
+/* Subroutine */ int dorghr_(integer *n, integer *ilo, integer *ihi,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, nb, nh, iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORGHR generates a real orthogonal matrix Q which is defined as the
+ product of IHI-ILO elementary reflectors of order N, as returned by
+ DGEHRD:
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix Q. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ ILO and IHI must have the same values as in the previous call
+ of DGEHRD. Q is equal to the unit matrix except in the
+ submatrix Q(ilo+1:ihi,ilo+1:ihi).
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the vectors which define the elementary reflectors,
+ as returned by DGEHRD.
+ On exit, the N-by-N orthogonal matrix Q.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (input) DOUBLE PRECISION array, dimension (N-1)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEHRD.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= IHI-ILO.
+ For optimum performance LWORK >= (IHI-ILO)*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if ((*lwork < max(1,nh) && ! lquery)) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "DORGQR", " ", &nh, &nh, &nh, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ lwkopt = max(1,nh) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGHR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+/*
+ Shift the vectors which define the elementary reflectors one
+ column to the right, and set the first ilo and the last n-ihi
+ rows and columns to those of the unit matrix
+*/
+
+ i__1 = *ilo + 1;
+ for (j = *ihi; j >= i__1; --j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+ i__2 = *ihi;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L20: */
+ }
+ i__2 = *n;
+ for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ i__1 = *ilo;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L50: */
+ }
+ a[j + j * a_dim1] = 1.;
+/* L60: */
+ }
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L70: */
+ }
+ a[j + j * a_dim1] = 1.;
+/* L80: */
+ }
+
+ if (nh > 0) {
+
+/* Generate Q(ilo+1:ihi,ilo+1:ihi) */
+
+ dorgqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[*
+ ilo], &work[1], lwork, &iinfo);
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORGHR */
+
+} /* dorghr_ */
+
+/* Subroutine */ int dorgl2_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dlarf_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORGL2 generates an m by n real matrix Q with orthonormal rows,
+ which is defined as the first m rows of a product of k elementary
+ reflectors of order n
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGELQF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. N >= M.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. M >= K >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the i-th row must contain the vector which defines
+ the elementary reflector H(i), for i = 1,2,...,k, as returned
+ by DGELQF in the first k rows of its array argument A.
+ On exit, the m-by-n matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGELQF.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (M)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGL2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows k+1:m to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = *k + 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L10: */
+ }
+ if ((j > *k && j <= *m)) {
+ a[j + j * a_dim1] = 1.;
+ }
+/* L20: */
+ }
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the right */
+
+ if (i__ < *n) {
+ if (i__ < *m) {
+ a[i__ + i__ * a_dim1] = 1.;
+ i__1 = *m - i__;
+ i__2 = *n - i__ + 1;
+ dlarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, &
+ tau[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__1 = *n - i__;
+ d__1 = -tau[i__];
+ dscal_(&i__1, &d__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ a[i__ + i__ * a_dim1] = 1. - tau[i__];
+
+/* Set A(i,1:i-1) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ a[i__ + l * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of DORGL2 */
+
+} /* dorgl2_ */
+
+/* Subroutine */ int dorglq_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dorgl2_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORGLQ generates an M-by-N real matrix Q with orthonormal rows,
+ which is defined as the first M rows of a product of K elementary
+ reflectors of order N
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGELQF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. N >= M.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. M >= K >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the i-th row must contain the vector which defines
+ the elementary reflector H(i), for i = 1,2,...,k, as returned
+ by DGELQF in the first k rows of its array argument A.
+ On exit, the M-by-N matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGELQF.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,M).
+ For optimum performance LWORK >= M*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
+ lwkopt = max(1,*m) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if ((*lwork < max(1,*m) && ! lquery)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if ((nb > 1 && nb < *k)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGLQ", " ", m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGLQ", " ", m, n, k, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (((nb >= nbmin && nb < *k) && nx < *k)) {
+
+/*
+ Use blocked code after the last block.
+ The first kk rows are handled by the block method.
+*/
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(kk+1:m,1:kk) to zero. */
+
+ i__1 = kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = kk + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *m) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ dorgl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *m) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__2 = *n - i__ + 1;
+ dlarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i+ib:m,i:n) from the right */
+
+ i__2 = *m - i__ - ib + 1;
+ i__3 = *n - i__ + 1;
+ dlarfb_("Right", "Transpose", "Forward", "Rowwise", &i__2, &
+ i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+
+/* Apply H' to columns i:n of current block */
+
+ i__2 = *n - i__ + 1;
+ dorgl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set columns 1:i-1 of current block to zero */
+
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + ib - 1;
+ for (l = i__; l <= i__3; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DORGLQ */
+
+} /* dorglq_ */
+
+/* Subroutine */ int dorgqr_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dorg2r_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORGQR generates an M-by-N real matrix Q with orthonormal columns,
+ which is defined as the first N columns of a product of K elementary
+ reflectors of order M
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by DGEQRF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. M >= N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. N >= K >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the i-th column must contain the vector which
+ defines the elementary reflector H(i), for i = 1,2,...,k, as
+ returned by DGEQRF in the first k columns of its array
+ argument A.
+ On exit, the M-by-N matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQRF.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
+ lwkopt = max(1,*n) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if ((*lwork < max(1,*n) && ! lquery)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if ((nb > 1 && nb < *k)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGQR", " ", m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGQR", " ", m, n, k, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (((nb >= nbmin && nb < *k) && nx < *k)) {
+
+/*
+ Use blocked code after the last block.
+ The first kk columns are handled by the block method.
+*/
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:kk,kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = kk + 1; j <= i__1; ++j) {
+ i__2 = kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *n) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ dorg2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *n) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__2 = *m - i__ + 1;
+ dlarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i:m,i+ib:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__ - ib + 1;
+ dlarfb_("Left", "No transpose", "Forward", "Columnwise", &
+ i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &
+ work[ib + 1], &ldwork);
+ }
+
+/* Apply H to rows i:m of current block */
+
+ i__2 = *m - i__ + 1;
+ dorg2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set rows 1:i-1 of current block to zero */
+
+ i__2 = i__ + ib - 1;
+ for (j = i__; j <= i__2; ++j) {
+ i__3 = i__ - 1;
+ for (l = 1; l <= i__3; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DORGQR */
+
+} /* dorgqr_ */
+
+/* Subroutine */ int dorm2l_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, mi, ni, nq;
+ static doublereal aii;
+ static logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DORM2L overwrites the general real m by n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'T', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'T',
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGEQLF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'T': apply Q' (Transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGEQLF in the last k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQLF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "T"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORM2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[nq - *k + i__ + i__ * a_dim1];
+ a[nq - *k + i__ + i__ * a_dim1] = 1.;
+ dlarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &tau[i__], &c__[
+ c_offset], ldc, &work[1]);
+ a[nq - *k + i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DORM2L */
+
+} /* dorm2l_ */
+
+/* Subroutine */ int dorm2r_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ static doublereal aii;
+ static logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DORM2R overwrites the general real m by n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'T', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'T',
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by DGEQRF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'T': apply Q' (Transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGEQRF in the first k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQRF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "T"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORM2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if ((left && ! notran) || (! left && notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DORM2R */
+
+} /* dorm2r_ */
+
+/* Subroutine */ int dormbr_(char *vect, char *side, char *trans, integer *m,
+ integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau,
+ doublereal *c__, integer *ldc, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i1, i2, nb, mi, ni, nq, nw;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ static logical notran;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ static logical applyq;
+ static char transt[1];
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ If VECT = 'Q', DORMBR overwrites the general real M-by-N matrix C
+ with
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ If VECT = 'P', DORMBR overwrites the general real M-by-N matrix C
+ with
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': P * C C * P
+ TRANS = 'T': P**T * C C * P**T
+
+ Here Q and P**T are the orthogonal matrices determined by DGEBRD when
+ reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and
+ P**T are defined as products of elementary reflectors H(i) and G(i)
+ respectively.
+
+ Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the
+ order of the orthogonal matrix Q or P**T that is applied.
+
+ If VECT = 'Q', A is assumed to have been an NQ-by-K matrix:
+ if nq >= k, Q = H(1) H(2) . . . H(k);
+ if nq < k, Q = H(1) H(2) . . . H(nq-1).
+
+ If VECT = 'P', A is assumed to have been a K-by-NQ matrix:
+ if k < nq, P = G(1) G(2) . . . G(k);
+ if k >= nq, P = G(1) G(2) . . . G(nq-1).
+
+ Arguments
+ =========
+
+ VECT (input) CHARACTER*1
+ = 'Q': apply Q or Q**T;
+ = 'P': apply P or P**T.
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q, Q**T, P or P**T from the Left;
+ = 'R': apply Q, Q**T, P or P**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q or P;
+ = 'T': Transpose, apply Q**T or P**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ If VECT = 'Q', the number of columns in the original
+ matrix reduced by DGEBRD.
+ If VECT = 'P', the number of rows in the original
+ matrix reduced by DGEBRD.
+ K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension
+ (LDA,min(nq,K)) if VECT = 'Q'
+ (LDA,nq) if VECT = 'P'
+ The vectors which define the elementary reflectors H(i) and
+ G(i), whose products determine the matrices Q and P, as
+ returned by DGEBRD.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If VECT = 'Q', LDA >= max(1,nq);
+ if VECT = 'P', LDA >= max(1,min(nq,K)).
+
+ TAU (input) DOUBLE PRECISION array, dimension (min(nq,K))
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i) or G(i) which determines Q or P, as returned
+ by DGEBRD in the array argument TAUQ or TAUP.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q
+ or P*C or P**T*C or C*P or C*P**T.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ applyq = lsame_(vect, "Q");
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q or P and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if ((! applyq && ! lsame_(vect, "P"))) {
+ *info = -1;
+ } else if ((! left && ! lsame_(side, "R"))) {
+ *info = -2;
+ } else if ((! notran && ! lsame_(trans, "T"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*k < 0) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = min(nq,*k);
+ if ((applyq && *lda < max(1,nq)) || (! applyq && *lda < max(i__1,i__2)
+ )) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if ((*lwork < max(1,nw) && ! lquery)) {
+ *info = -13;
+ }
+ }
+
+ if (*info == 0) {
+ if (applyq) {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__1, n, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__1, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMLQ", ch__1, &i__1, n, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMLQ", ch__1, m, &i__1, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ work[1] = 1.;
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ if (applyq) {
+
+/* Apply Q */
+
+ if (nq >= *k) {
+
+/* Q was determined by a call to DGEBRD with nq >= k */
+
+ dormqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* Q was determined by a call to DGEBRD with nq < k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ dormqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1]
+ , &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ } else {
+
+/* Apply P */
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+ if (nq > *k) {
+
+/* P was determined by a call to DGEBRD with nq > k */
+
+ dormlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* P was determined by a call to DGEBRD with nq <= k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ dormlq_(side, transt, &mi, &ni, &i__1, &a[((a_dim1) << (1)) + 1],
+ lda, &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1],
+ lwork, &iinfo);
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMBR */
+
+} /* dormbr_ */
+
+/* Subroutine */ int dorml2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ static doublereal aii;
+ static logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DORML2 overwrites the general real m by n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'T', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'T',
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGELQF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'T': apply Q' (Transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension
+ (LDA,M) if SIDE = 'L',
+ (LDA,N) if SIDE = 'R'
+ The i-th row must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGELQF in the first k rows of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,K).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGELQF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "T"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORML2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DORML2 */
+
+} /* dorml2_ */
+
+/* Subroutine */ int dormlq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int dorml2_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlarfb_(char
+ *, char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran;
+ static integer ldwork;
+ static char transt[1];
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORMLQ overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGELQF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension
+ (LDA,M) if SIDE = 'L',
+ (LDA,N) if SIDE = 'R'
+ The i-th row must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGELQF in the first k rows of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,K).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGELQF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "T"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if ((*lwork < max(1,nw) && ! lquery)) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMLQ", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if ((nb > 1 && nb < *k)) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMLQ", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ dorml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__4 = nq - i__ + 1;
+ dlarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1],
+ lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ dlarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__
+ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1],
+ ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMLQ */
+
+} /* dormlq_ */
+
+/* Subroutine */ int dormql_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int dorm2l_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlarfb_(char
+ *, char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran;
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORMQL overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGEQLF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGEQLF in the last k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQLF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "T"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if ((*lwork < max(1,nw) && ! lquery)) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQL", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if ((nb > 1 && nb < *k)) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQL", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ dorm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i+ib-1) . . . H(i+1) H(i)
+*/
+
+ i__4 = nq - *k + i__ + ib - 1;
+ dlarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1]
+ , lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ dlarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, &
+ work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMQL */
+
+} /* dormql_ */
+
+/* Subroutine */ int dormqr_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int dorm2r_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlarfb_(char
+ *, char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran;
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORMQR overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by DGEQRF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGEQRF in the first k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQRF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "T"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if ((*lwork < max(1,nw) && ! lquery)) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQR", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if ((nb > 1 && nb < *k)) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQR", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ dorm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && ! notran) || (! left && notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__4 = nq - i__ + 1;
+ dlarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], t, &c__65)
+ ;
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ dlarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc *
+ c_dim1], ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMQR */
+
+} /* dormqr_ */
+
+/* Subroutine */ int dormtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i1, i2, nb, mi, ni, nq, nw;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dormql_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dormqr_(char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORMTR overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix of order nq, with nq = m if
+ SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of
+ nq-1 elementary reflectors, as returned by DSYTRD:
+
+ if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1);
+
+ if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1).
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A contains elementary reflectors
+ from DSYTRD;
+ = 'L': Lower triangle of A contains elementary reflectors
+ from DSYTRD.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension
+ (LDA,M) if SIDE = 'L'
+ (LDA,N) if SIDE = 'R'
+ The vectors which define the elementary reflectors, as
+ returned by DSYTRD.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'.
+
+ TAU (input) DOUBLE PRECISION array, dimension
+ (M-1) if SIDE = 'L'
+ (N-1) if SIDE = 'R'
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DSYTRD.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -2;
+ } else if ((! lsame_(trans, "N") && ! lsame_(trans,
+ "T"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if ((*lwork < max(1,nw) && ! lquery)) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMQL", ch__1, &i__2, n, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMQL", ch__1, m, &i__2, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__2, n, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__2, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("DORMTR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || nq == 1) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to DSYTRD with UPLO = 'U' */
+
+ i__2 = nq - 1;
+ dormql_(side, trans, &mi, &ni, &i__2, &a[((a_dim1) << (1)) + 1], lda,
+ &tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo);
+ } else {
+
+/* Q was determined by a call to DSYTRD with UPLO = 'L' */
+
+ if (left) {
+ i1 = 2;
+ i2 = 1;
+ } else {
+ i1 = 1;
+ i2 = 2;
+ }
+ i__2 = nq - 1;
+ dormqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], &
+ c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMTR */
+
+} /* dormtr_ */
+
+/* Subroutine */ int dpotf2_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer j;
+ static doublereal ajj;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DPOTF2 computes the Cholesky factorization of a real symmetric
+ positive definite matrix A.
+
+ The factorization has the form
+ A = U' * U , if UPLO = 'U', or
+ A = L * L', if UPLO = 'L',
+ where U is an upper triangular matrix and L is lower triangular.
+
+ This is the unblocked version of the algorithm, calling Level 2 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is stored.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ n by n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n by n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+
+ On exit, if INFO = 0, the factor U or L from the Cholesky
+ factorization A = U'*U or A = L*L'.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+ > 0: if INFO = k, the leading minor of order k is not
+ positive definite, and the factorization could not be
+ completed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j * a_dim1 + 1], &c__1,
+ &a[j * a_dim1 + 1], &c__1);
+ if (ajj <= 0.) {
+ a[j + j * a_dim1] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ i__3 = *n - j;
+ dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(j + 1) *
+ a_dim1 + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b15, &
+ a[j + (j + 1) * a_dim1], lda);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j + a_dim1], lda, &a[j
+ + a_dim1], lda);
+ if (ajj <= 0.) {
+ a[j + j * a_dim1] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ i__3 = j - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[j + 1 +
+ a_dim1], lda, &a[j + a_dim1], lda, &c_b15, &a[j + 1 +
+ j * a_dim1], &c__1);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of DPOTF2 */
+
+} /* dpotf2_ */
+
+/* Subroutine */ int dpotrf_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer j, jb, nb;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), dpotf2_(char *, integer *,
+ doublereal *, integer *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DPOTRF computes the Cholesky factorization of a real symmetric
+ positive definite matrix A.
+
+ The factorization has the form
+ A = U**T * U, if UPLO = 'U', or
+ A = L * L**T, if UPLO = 'L',
+ where U is an upper triangular matrix and L is lower triangular.
+
+ This is the block version of the algorithm, calling Level 3 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ N-by-N upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+
+ On exit, if INFO = 0, the factor U or L from the Cholesky
+ factorization A = U**T*U or A = L*L**T.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the leading minor of order i is not
+ positive definite, and the factorization could not be
+ completed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "DPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code. */
+
+ dpotf2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code. */
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/*
+ Update and factorize the current diagonal block and test
+ for non-positive-definiteness.
+
+ Computing MIN
+*/
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ dsyrk_("Upper", "Transpose", &jb, &i__3, &c_b151, &a[j *
+ a_dim1 + 1], lda, &c_b15, &a[j + j * a_dim1], lda);
+ dpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block row. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ dgemm_("Transpose", "No transpose", &jb, &i__3, &i__4, &
+ c_b151, &a[j * a_dim1 + 1], lda, &a[(j + jb) *
+ a_dim1 + 1], lda, &c_b15, &a[j + (j + jb) *
+ a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ dtrsm_("Left", "Upper", "Transpose", "Non-unit", &jb, &
+ i__3, &c_b15, &a[j + j * a_dim1], lda, &a[j + (j
+ + jb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/*
+ Update and factorize the current diagonal block and test
+ for non-positive-definiteness.
+
+ Computing MIN
+*/
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ dsyrk_("Lower", "No transpose", &jb, &i__3, &c_b151, &a[j +
+ a_dim1], lda, &c_b15, &a[j + j * a_dim1], lda);
+ dpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block column. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ dgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &
+ c_b151, &a[j + jb + a_dim1], lda, &a[j + a_dim1],
+ lda, &c_b15, &a[j + jb + j * a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ dtrsm_("Right", "Lower", "Transpose", "Non-unit", &i__3, &
+ jb, &c_b15, &a[j + j * a_dim1], lda, &a[j + jb +
+ j * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+ goto L40;
+
+L30:
+ *info = *info + j - 1;
+
+L40:
+ return 0;
+
+/* End of DPOTRF */
+
+} /* dpotrf_ */
+
+/* Subroutine */ int dstedc_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k, m;
+ static doublereal p;
+ static integer ii, end, lgn;
+ static doublereal eps, tiny;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer lwmin;
+ extern /* Subroutine */ int dlaed0_(integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ static integer start;
+
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlacpy_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *),
+ dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), dlasrt_(char *, integer *, doublereal *, integer *);
+ static integer liwmin, icompz;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ static doublereal orgnrm;
+ static logical lquery;
+ static integer smlsiz, dtrtrw, storez;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DSTEDC computes all eigenvalues and, optionally, eigenvectors of a
+ symmetric tridiagonal matrix using the divide and conquer method.
+ The eigenvectors of a full or band real symmetric matrix can also be
+ found if DSYTRD or DSPTRD or DSBTRD has been used to reduce this
+ matrix to tridiagonal form.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none. See DLAED3 for details.
+
+ Arguments
+ =========
+
+ COMPZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only.
+ = 'I': Compute eigenvectors of tridiagonal matrix also.
+ = 'V': Compute eigenvectors of original dense symmetric
+ matrix also. On entry, Z contains the orthogonal
+ matrix used to reduce the original matrix to
+ tridiagonal form.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N-1)
+ On entry, the subdiagonal elements of the tridiagonal matrix.
+ On exit, E has been destroyed.
+
+ Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N)
+ On entry, if COMPZ = 'V', then Z contains the orthogonal
+ matrix used in the reduction to tridiagonal form.
+ On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
+ orthonormal eigenvectors of the original symmetric matrix,
+ and if COMPZ = 'I', Z contains the orthonormal eigenvectors
+ of the symmetric tridiagonal matrix.
+ If COMPZ = 'N', then Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= 1.
+ If eigenvectors are desired, then LDZ >= max(1,N).
+
+ WORK (workspace/output) DOUBLE PRECISION array,
+ dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If COMPZ = 'N' or N <= 1 then LWORK must be at least 1.
+ If COMPZ = 'V' and N > 1 then LWORK must be at least
+ ( 1 + 3*N + 2*N*lg N + 3*N**2 ),
+ where lg( N ) = smallest integer k such
+ that 2**k >= N.
+ If COMPZ = 'I' and N > 1 then LWORK must be at least
+ ( 1 + 4*N + N**2 ).
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ IWORK (workspace/output) INTEGER array, dimension (LIWORK)
+ On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
+
+ LIWORK (input) INTEGER
+ The dimension of the array IWORK.
+ If COMPZ = 'N' or N <= 1 then LIWORK must be at least 1.
+ If COMPZ = 'V' and N > 1 then LIWORK must be at least
+ ( 6 + 6*N + 5*N*lg N ).
+ If COMPZ = 'I' and N > 1 then LIWORK must be at least
+ ( 3 + 5*N ).
+
+ If LIWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the IWORK array,
+ returns this value as the first entry of the IWORK array, and
+ no error message related to LIWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an eigenvalue while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through mod(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1 || *liwork == -1;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (*n <= 1 || icompz <= 0) {
+ liwmin = 1;
+ lwmin = 1;
+ } else {
+ lgn = (integer) (log((doublereal) (*n)) / log(2.));
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (icompz == 1) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 3 + 1 + ((*n) << (1)) * lgn + i__1 * i__1 * 3;
+ liwmin = *n * 6 + 6 + *n * 5 * lgn;
+ } else if (icompz == 2) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = ((*n) << (2)) + 1 + i__1 * i__1;
+ liwmin = *n * 5 + 3;
+ }
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) {
+ *info = -6;
+ } else if ((*lwork < lwmin && ! lquery)) {
+ *info = -8;
+ } else if ((*liwork < liwmin && ! lquery)) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEDC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ if (icompz != 0) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "DSTEDC", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ If the following conditional clause is removed, then the routine
+ will use the Divide and Conquer routine to compute only the
+ eigenvalues, which requires (3N + 3N**2) real workspace and
+ (2 + 5N + 2N lg(N)) integer workspace.
+ Since on many architectures DSTERF is much faster than any other
+ algorithm for finding eigenvalues only, it is used here
+ as the default.
+
+ If COMPZ = 'N', use DSTERF to compute the eigenvalues.
+*/
+
+ if (icompz == 0) {
+ dsterf_(n, &d__[1], &e[1], info);
+ return 0;
+ }
+
+/*
+ If N is smaller than the minimum divide size (SMLSIZ+1), then
+ solve the problem with another solver.
+*/
+
+ if (*n <= smlsiz) {
+ if (icompz == 0) {
+ dsterf_(n, &d__[1], &e[1], info);
+ return 0;
+ } else if (icompz == 2) {
+ dsteqr_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1],
+ info);
+ return 0;
+ } else {
+ dsteqr_("V", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1],
+ info);
+ return 0;
+ }
+ }
+
+/*
+ If COMPZ = 'V', the Z matrix must be stored elsewhere for later
+ use.
+*/
+
+ if (icompz == 1) {
+ storez = *n * *n + 1;
+ } else {
+ storez = 1;
+ }
+
+ if (icompz == 2) {
+ dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz);
+ }
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ return 0;
+ }
+
+ eps = EPSILON;
+
+ start = 1;
+
+/* while ( START <= N ) */
+
+L10:
+ if (start <= *n) {
+
+/*
+ Let END be the position of the next subdiagonal entry such that
+ E( END ) <= TINY or END = N if no such subdiagonal exists. The
+ matrix identified by the elements between START and END
+ constitutes an independent sub-problem.
+*/
+
+ end = start;
+L20:
+ if (end < *n) {
+ tiny = eps * sqrt((d__1 = d__[end], abs(d__1))) * sqrt((d__2 =
+ d__[end + 1], abs(d__2)));
+ if ((d__1 = e[end], abs(d__1)) > tiny) {
+ ++end;
+ goto L20;
+ }
+ }
+
+/* (Sub) Problem determined. Compute its size and solve it. */
+
+ m = end - start + 1;
+ if (m == 1) {
+ start = end + 1;
+ goto L10;
+ }
+ if (m > smlsiz) {
+ *info = smlsiz;
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", &m, &d__[start], &e[start]);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &m, &c__1, &d__[start]
+ , &m, info);
+ i__1 = m - 1;
+ i__2 = m - 1;
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &i__1, &c__1, &e[
+ start], &i__2, info);
+
+ if (icompz == 1) {
+ dtrtrw = 1;
+ } else {
+ dtrtrw = start;
+ }
+ dlaed0_(&icompz, n, &m, &d__[start], &e[start], &z__[dtrtrw +
+ start * z_dim1], ldz, &work[1], n, &work[storez], &iwork[
+ 1], info);
+ if (*info != 0) {
+ *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info % (m
+ + 1) + start - 1;
+ return 0;
+ }
+
+/* Scale back. */
+
+ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &m, &c__1, &d__[start]
+ , &m, info);
+
+ } else {
+ if (icompz == 1) {
+
+/*
+ Since QR won't update a Z matrix which is larger than the
+ length of D, we must solve the sub-problem in a workspace and
+ then multiply back into Z.
+*/
+
+ dsteqr_("I", &m, &d__[start], &e[start], &work[1], &m, &work[
+ m * m + 1], info);
+ dlacpy_("A", n, &m, &z__[start * z_dim1 + 1], ldz, &work[
+ storez], n);
+ dgemm_("N", "N", n, &m, &m, &c_b15, &work[storez], ldz, &work[
+ 1], &m, &c_b29, &z__[start * z_dim1 + 1], ldz);
+ } else if (icompz == 2) {
+ dsteqr_("I", &m, &d__[start], &e[start], &z__[start + start *
+ z_dim1], ldz, &work[1], info);
+ } else {
+ dsterf_(&m, &d__[start], &e[start], info);
+ }
+ if (*info != 0) {
+ *info = start * (*n + 1) + end;
+ return 0;
+ }
+ }
+
+ start = end + 1;
+ goto L10;
+ }
+
+/*
+ endwhile
+
+ If the problem split any number of times, then the eigenvalues
+ will not be properly ordered. Here we permute the eigenvalues
+ (and the associated eigenvectors) into ascending order.
+*/
+
+ if (m != *n) {
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ dlasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L30: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1
+ + 1], &c__1);
+ }
+/* L40: */
+ }
+ }
+ }
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of DSTEDC */
+
+} /* dstedc_ */
+
+/* Subroutine */ int dsteqr_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static doublereal b, c__, f, g;
+ static integer i__, j, k, l, m;
+ static doublereal p, r__, s;
+ static integer l1, ii, mm, lm1, mm1, nm1;
+ static doublereal rt1, rt2, eps;
+ static integer lsv;
+ static doublereal tst, eps2;
+ static integer lend, jtot;
+ extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *);
+ static doublereal anorm;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaev2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+ static integer lendm1, lendp1;
+
+ static integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlaset_(char *, integer *, integer
+ *, doublereal *, doublereal *, doublereal *, integer *);
+ static doublereal safmin;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ static doublereal safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ static integer lendsv;
+ static doublereal ssfmin;
+ static integer nmaxit, icompz;
+ static doublereal ssfmax;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DSTEQR computes all eigenvalues and, optionally, eigenvectors of a
+ symmetric tridiagonal matrix using the implicit QL or QR method.
+ The eigenvectors of a full or band symmetric matrix can also be found
+ if DSYTRD or DSPTRD or DSBTRD has been used to reduce this matrix to
+ tridiagonal form.
+
+ Arguments
+ =========
+
+ COMPZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only.
+ = 'V': Compute eigenvalues and eigenvectors of the original
+ symmetric matrix. On entry, Z must contain the
+ orthogonal matrix used to reduce the original matrix
+ to tridiagonal form.
+ = 'I': Compute eigenvalues and eigenvectors of the
+ tridiagonal matrix. Z is initialized to the identity
+ matrix.
+
+ N (input) INTEGER
+ The order of the matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N-1)
+ On entry, the (n-1) subdiagonal elements of the tridiagonal
+ matrix.
+ On exit, E has been destroyed.
+
+ Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N)
+ On entry, if COMPZ = 'V', then Z contains the orthogonal
+ matrix used in the reduction to tridiagonal form.
+ On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
+ orthonormal eigenvectors of the original symmetric matrix,
+ and if COMPZ = 'I', Z contains the orthonormal eigenvectors
+ of the symmetric tridiagonal matrix.
+ If COMPZ = 'N', then Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= 1, and if
+ eigenvectors are desired, then LDZ >= max(1,N).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2))
+ If COMPZ = 'N', then WORK is not referenced.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm has failed to find all the eigenvalues in
+ a total of 30*N iterations; if INFO = i, then i
+ elements of E have not converged to zero; on exit, D
+ and E contain the elements of a symmetric tridiagonal
+ matrix which is orthogonally similar to the original
+ matrix.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz == 2) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Determine the unit roundoff and over/underflow thresholds. */
+
+ eps = EPSILON;
+/* Computing 2nd power */
+ d__1 = eps;
+ eps2 = d__1 * d__1;
+ safmin = SAFEMINIMUM;
+ safmax = 1. / safmin;
+ ssfmax = sqrt(safmax) / 3.;
+ ssfmin = sqrt(safmin) / eps2;
+
+/*
+ Compute the eigenvalues and eigenvectors of the tridiagonal
+ matrix.
+*/
+
+ if (icompz == 2) {
+ dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz);
+ }
+
+ nmaxit = *n * 30;
+ jtot = 0;
+
+/*
+ Determine where the matrix splits and choose QL or QR iteration
+ for each block, according to whether top or bottom diagonal
+ element is smaller.
+*/
+
+ l1 = 1;
+ nm1 = *n - 1;
+
+L10:
+ if (l1 > *n) {
+ goto L160;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.;
+ }
+ if (l1 <= nm1) {
+ i__1 = nm1;
+ for (m = l1; m <= i__1; ++m) {
+ tst = (d__1 = e[m], abs(d__1));
+ if (tst == 0.) {
+ goto L30;
+ }
+ if (tst <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m
+ + 1], abs(d__2))) * eps) {
+ e[m] = 0.;
+ goto L30;
+ }
+/* L20: */
+ }
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = dlanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm == 0.) {
+ goto L10;
+ }
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend > l) {
+
+/*
+ QL Iteration
+
+ Look for small subdiagonal element.
+*/
+
+L40:
+ if (l != lend) {
+ lendm1 = lend - 1;
+ i__1 = lendm1;
+ for (m = l; m <= i__1; ++m) {
+/* Computing 2nd power */
+ d__2 = (d__1 = e[m], abs(d__1));
+ tst = d__2 * d__2;
+ if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
+ + 1], abs(d__2)) + safmin) {
+ goto L60;
+ }
+/* L50: */
+ }
+ }
+
+ m = lend;
+
+L60:
+ if (m < lend) {
+ e[m] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L80;
+ }
+
+/*
+ If remaining matrix is 2-by-2, use DLAE2 or SLAEV2
+ to compute its eigensystem.
+*/
+
+ if (m == l + 1) {
+ if (icompz > 0) {
+ dlaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
+ work[l] = c__;
+ work[*n - 1 + l] = s;
+ dlasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
+ z__[l * z_dim1 + 1], ldz);
+ } else {
+ dlae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
+ }
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.;
+ l += 2;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l + 1] - p) / (e[l] * 2.);
+ r__ = dlapy2_(&g, &c_b15);
+ g = d__[m] - p + e[l] / (g + d_sign(&r__, &g));
+
+ s = 1.;
+ c__ = 1.;
+ p = 0.;
+
+/* Inner loop */
+
+ mm1 = m - 1;
+ i__1 = l;
+ for (i__ = mm1; i__ >= i__1; --i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ dlartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m - 1) {
+ e[i__ + 1] = r__;
+ }
+ g = d__[i__ + 1] - p;
+ r__ = (d__[i__] - g) * s + c__ * 2. * b;
+ p = s * r__;
+ d__[i__ + 1] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = -s;
+ }
+
+/* L70: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = m - l + 1;
+ dlasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[l] = g;
+ goto L40;
+
+/* Eigenvalue found. */
+
+L80:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+
+ } else {
+
+/*
+ QR Iteration
+
+ Look for small superdiagonal element.
+*/
+
+L90:
+ if (l != lend) {
+ lendp1 = lend + 1;
+ i__1 = lendp1;
+ for (m = l; m >= i__1; --m) {
+/* Computing 2nd power */
+ d__2 = (d__1 = e[m - 1], abs(d__1));
+ tst = d__2 * d__2;
+ if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
+ - 1], abs(d__2)) + safmin) {
+ goto L110;
+ }
+/* L100: */
+ }
+ }
+
+ m = lend;
+
+L110:
+ if (m > lend) {
+ e[m - 1] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L130;
+ }
+
+/*
+ If remaining matrix is 2-by-2, use DLAE2 or SLAEV2
+ to compute its eigensystem.
+*/
+
+ if (m == l - 1) {
+ if (icompz > 0) {
+ dlaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
+ ;
+ work[m] = c__;
+ work[*n - 1 + m] = s;
+ dlasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
+ z__[(l - 1) * z_dim1 + 1], ldz);
+ } else {
+ dlae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
+ }
+ d__[l - 1] = rt1;
+ d__[l] = rt2;
+ e[l - 1] = 0.;
+ l += -2;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l - 1] - p) / (e[l - 1] * 2.);
+ r__ = dlapy2_(&g, &c_b15);
+ g = d__[m] - p + e[l - 1] / (g + d_sign(&r__, &g));
+
+ s = 1.;
+ c__ = 1.;
+ p = 0.;
+
+/* Inner loop */
+
+ lm1 = l - 1;
+ i__1 = lm1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ dlartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m) {
+ e[i__ - 1] = r__;
+ }
+ g = d__[i__] - p;
+ r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b;
+ p = s * r__;
+ d__[i__] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = s;
+ }
+
+/* L120: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = l - m + 1;
+ dlasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[lm1] = g;
+ goto L90;
+
+/* Eigenvalue found. */
+
+L130:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+
+ }
+
+/* Undo scaling if necessary */
+
+L140:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ } else if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ }
+
+/*
+ Check for no convergence to an eigenvalue after a total
+ of N*MAXIT iterations.
+*/
+
+ if (jtot < nmaxit) {
+ goto L10;
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L150: */
+ }
+ goto L190;
+
+/* Order eigenvalues and eigenvectors. */
+
+L160:
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ dlasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L170: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L180: */
+ }
+ }
+
+L190:
+ return 0;
+
+/* End of DSTEQR */
+
+} /* dsteqr_ */
+
+/* Subroutine */ int dsterf_(integer *n, doublereal *d__, doublereal *e,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static doublereal c__;
+ static integer i__, l, m;
+ static doublereal p, r__, s;
+ static integer l1;
+ static doublereal bb, rt1, rt2, eps, rte;
+ static integer lsv;
+ static doublereal eps2, oldc;
+ static integer lend, jtot;
+ extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ static doublereal gamma, alpha, sigma, anorm;
+
+ static integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ static doublereal oldgam, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal safmax;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ static integer lendsv;
+ static doublereal ssfmin;
+ static integer nmaxit;
+ static doublereal ssfmax;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DSTERF computes all eigenvalues of a symmetric tridiagonal matrix
+ using the Pal-Walker-Kahan variant of the QL or QR algorithm.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the n diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N-1)
+ On entry, the (n-1) subdiagonal elements of the tridiagonal
+ matrix.
+ On exit, E has been destroyed.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm failed to find all of the eigenvalues in
+ a total of 30*N iterations; if INFO = i, then i
+ elements of E have not converged to zero.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("DSTERF", &i__1);
+ return 0;
+ }
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Determine the unit roundoff for this environment. */
+
+ eps = EPSILON;
+/* Computing 2nd power */
+ d__1 = eps;
+ eps2 = d__1 * d__1;
+ safmin = SAFEMINIMUM;
+ safmax = 1. / safmin;
+ ssfmax = sqrt(safmax) / 3.;
+ ssfmin = sqrt(safmin) / eps2;
+
+/* Compute the eigenvalues of the tridiagonal matrix. */
+
+ nmaxit = *n * 30;
+ sigma = 0.;
+ jtot = 0;
+
+/*
+ Determine where the matrix splits and choose QL or QR iteration
+ for each block, according to whether top or bottom diagonal
+ element is smaller.
+*/
+
+ l1 = 1;
+
+L10:
+ if (l1 > *n) {
+ goto L170;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.;
+ }
+ i__1 = *n - 1;
+ for (m = l1; m <= i__1; ++m) {
+ if ((d__3 = e[m], abs(d__3)) <= sqrt((d__1 = d__[m], abs(d__1))) *
+ sqrt((d__2 = d__[m + 1], abs(d__2))) * eps) {
+ e[m] = 0.;
+ goto L30;
+ }
+/* L20: */
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = dlanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+ i__1 = lend - 1;
+ for (i__ = l; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ d__1 = e[i__];
+ e[i__] = d__1 * d__1;
+/* L40: */
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend >= l) {
+
+/*
+ QL Iteration
+
+ Look for small subdiagonal element.
+*/
+
+L50:
+ if (l != lend) {
+ i__1 = lend - 1;
+ for (m = l; m <= i__1; ++m) {
+ if ((d__2 = e[m], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m
+ + 1], abs(d__1))) {
+ goto L70;
+ }
+/* L60: */
+ }
+ }
+ m = lend;
+
+L70:
+ if (m < lend) {
+ e[m] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L90;
+ }
+
+/*
+ If remaining matrix is 2 by 2, use DLAE2 to compute its
+ eigenvalues.
+*/
+
+ if (m == l + 1) {
+ rte = sqrt(e[l]);
+ dlae2_(&d__[l], &rte, &d__[l + 1], &rt1, &rt2);
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.;
+ l += 2;
+ if (l <= lend) {
+ goto L50;
+ }
+ goto L150;
+ }
+
+ if (jtot == nmaxit) {
+ goto L150;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ rte = sqrt(e[l]);
+ sigma = (d__[l + 1] - p) / (rte * 2.);
+ r__ = dlapy2_(&sigma, &c_b15);
+ sigma = p - rte / (sigma + d_sign(&r__, &sigma));
+
+ c__ = 1.;
+ s = 0.;
+ gamma = d__[m] - sigma;
+ p = gamma * gamma;
+
+/* Inner loop */
+
+ i__1 = l;
+ for (i__ = m - 1; i__ >= i__1; --i__) {
+ bb = e[i__];
+ r__ = p + bb;
+ if (i__ != m - 1) {
+ e[i__ + 1] = s * r__;
+ }
+ oldc = c__;
+ c__ = p / r__;
+ s = bb / r__;
+ oldgam = gamma;
+ alpha = d__[i__];
+ gamma = c__ * (alpha - sigma) - s * oldgam;
+ d__[i__ + 1] = oldgam + (alpha - gamma);
+ if (c__ != 0.) {
+ p = gamma * gamma / c__;
+ } else {
+ p = oldc * bb;
+ }
+/* L80: */
+ }
+
+ e[l] = s * p;
+ d__[l] = sigma + gamma;
+ goto L50;
+
+/* Eigenvalue found. */
+
+L90:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L50;
+ }
+ goto L150;
+
+ } else {
+
+/*
+ QR Iteration
+
+ Look for small superdiagonal element.
+*/
+
+L100:
+ i__1 = lend + 1;
+ for (m = l; m >= i__1; --m) {
+ if ((d__2 = e[m - 1], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m
+ - 1], abs(d__1))) {
+ goto L120;
+ }
+/* L110: */
+ }
+ m = lend;
+
+L120:
+ if (m > lend) {
+ e[m - 1] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L140;
+ }
+
+/*
+ If remaining matrix is 2 by 2, use DLAE2 to compute its
+ eigenvalues.
+*/
+
+ if (m == l - 1) {
+ rte = sqrt(e[l - 1]);
+ dlae2_(&d__[l], &rte, &d__[l - 1], &rt1, &rt2);
+ d__[l] = rt1;
+ d__[l - 1] = rt2;
+ e[l - 1] = 0.;
+ l += -2;
+ if (l >= lend) {
+ goto L100;
+ }
+ goto L150;
+ }
+
+ if (jtot == nmaxit) {
+ goto L150;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ rte = sqrt(e[l - 1]);
+ sigma = (d__[l - 1] - p) / (rte * 2.);
+ r__ = dlapy2_(&sigma, &c_b15);
+ sigma = p - rte / (sigma + d_sign(&r__, &sigma));
+
+ c__ = 1.;
+ s = 0.;
+ gamma = d__[m] - sigma;
+ p = gamma * gamma;
+
+/* Inner loop */
+
+ i__1 = l - 1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ bb = e[i__];
+ r__ = p + bb;
+ if (i__ != m) {
+ e[i__ - 1] = s * r__;
+ }
+ oldc = c__;
+ c__ = p / r__;
+ s = bb / r__;
+ oldgam = gamma;
+ alpha = d__[i__ + 1];
+ gamma = c__ * (alpha - sigma) - s * oldgam;
+ d__[i__] = oldgam + (alpha - gamma);
+ if (c__ != 0.) {
+ p = gamma * gamma / c__;
+ } else {
+ p = oldc * bb;
+ }
+/* L130: */
+ }
+
+ e[l - 1] = s * p;
+ d__[l] = sigma + gamma;
+ goto L100;
+
+/* Eigenvalue found. */
+
+L140:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L100;
+ }
+ goto L150;
+
+ }
+
+/* Undo scaling if necessary */
+
+L150:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ }
+ if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ }
+
+/*
+ Check for no convergence to an eigenvalue after a total
+ of N*MAXIT iterations.
+*/
+
+ if (jtot < nmaxit) {
+ goto L10;
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L160: */
+ }
+ goto L180;
+
+/* Sort eigenvalues in increasing order. */
+
+L170:
+ dlasrt_("I", n, &d__[1], info);
+
+L180:
+ return 0;
+
+/* End of DSTERF */
+
+} /* dsterf_ */
+
+/* Subroutine */ int dsyevd_(char *jobz, char *uplo, integer *n, doublereal *
+ a, integer *lda, doublereal *w, doublereal *work, integer *lwork,
+ integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal eps;
+ static integer inde;
+ static doublereal anrm, rmin, rmax;
+ static integer lopt;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ static doublereal sigma;
+ extern logical lsame_(char *, char *);
+ static integer iinfo, lwmin, liopt;
+ static logical lower, wantz;
+ static integer indwk2, llwrk2;
+
+ static integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dstedc_(char *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *), dlacpy_(
+ char *, integer *, integer *, doublereal *, integer *, doublereal
+ *, integer *);
+ static doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal bignum;
+ static integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern doublereal dlansy_(char *, char *, integer *, doublereal *,
+ integer *, doublereal *);
+ static integer indwrk, liwmin;
+ extern /* Subroutine */ int dormtr_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *), dsytrd_(char *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *);
+ static integer llwork;
+ static doublereal smlnum;
+ static logical lquery;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DSYEVD computes all eigenvalues and, optionally, eigenvectors of a
+ real symmetric matrix A. If eigenvectors are desired, it uses a
+ divide and conquer algorithm.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Because of large use of BLAS of level 3, DSYEVD needs N**2 more
+ workspace than DSYEVX.
+
+ Arguments
+ =========
+
+ JOBZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only;
+ = 'V': Compute eigenvalues and eigenvectors.
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA, N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the
+ leading N-by-N upper triangular part of A contains the
+ upper triangular part of the matrix A. If UPLO = 'L',
+ the leading N-by-N lower triangular part of A contains
+ the lower triangular part of the matrix A.
+ On exit, if JOBZ = 'V', then if INFO = 0, A contains the
+ orthonormal eigenvectors of the matrix A.
+ If JOBZ = 'N', then on exit the lower triangle (if UPLO='L')
+ or the upper triangle (if UPLO='U') of A, including the
+ diagonal, is destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ W (output) DOUBLE PRECISION array, dimension (N)
+ If INFO = 0, the eigenvalues in ascending order.
+
+ WORK (workspace/output) DOUBLE PRECISION array,
+ dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If N <= 1, LWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LWORK must be at least 2*N+1.
+ If JOBZ = 'V' and N > 1, LWORK must be at least
+ 1 + 6*N + 2*N**2.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ IWORK (workspace/output) INTEGER array, dimension (LIWORK)
+ On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
+
+ LIWORK (input) INTEGER
+ The dimension of the array IWORK.
+ If N <= 1, LIWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LIWORK must be at least 1.
+ If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N.
+
+ If LIWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the IWORK array,
+ returns this value as the first entry of the IWORK array, and
+ no error message related to LIWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the algorithm failed to converge; i
+ off-diagonal elements of an intermediate tridiagonal
+ form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --w;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ lopt = lwmin;
+ liopt = liwmin;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + ((i__1 * i__1) << (1));
+ } else {
+ liwmin = 1;
+ lwmin = ((*n) << (1)) + 1;
+ }
+ lopt = lwmin;
+ liopt = liwmin;
+ }
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if ((*lwork < lwmin && ! lquery)) {
+ *info = -8;
+ } else if ((*liwork < liwmin && ! lquery)) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ work[1] = (doublereal) lopt;
+ iwork[1] = liopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = a[a_dim1 + 1];
+ if (wantz) {
+ a[a_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = SAFEMINIMUM;
+ eps = PRECISION;
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = dlansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ iscale = 0;
+ if ((anrm > 0. && anrm < rmin)) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ dlascl_(uplo, &c__0, &c__0, &c_b15, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ indwk2 = indwrk + *n * *n;
+ llwrk2 = *lwork - indwk2 + 1;
+
+ dsytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+ lopt = (integer) (((*n) << (1)) + work[indwrk]);
+
+/*
+ For eigenvalues only, call DSTERF. For eigenvectors, first call
+ DSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the
+ tridiagonal matrix, then call DORMTR to multiply it by the
+ Householder transformations stored in A.
+*/
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[inde], info);
+ } else {
+ dstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], &
+ llwrk2, &iwork[1], liwork, info);
+ dormtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[
+ indwrk], n, &work[indwk2], &llwrk2, &iinfo);
+ dlacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda);
+/*
+ Computing MAX
+ Computing 2nd power
+*/
+ i__3 = *n;
+ i__1 = lopt, i__2 = *n * 6 + 1 + ((i__3 * i__3) << (1));
+ lopt = max(i__1,i__2);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ d__1 = 1. / sigma;
+ dscal_(n, &d__1, &w[1], &c__1);
+ }
+
+ work[1] = (doublereal) lopt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of DSYEVD */
+
+} /* dsyevd_ */
+
+/* Subroutine */ int dsytd2_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static doublereal taui;
+ extern /* Subroutine */ int dsyr2_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static doublereal alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int dsymv_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *
+ );
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DSYTD2 reduces a real symmetric matrix A to symmetric tridiagonal
+ form T by an orthogonal similarity transformation: Q' * A * Q = T.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is stored:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ n-by-n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n-by-n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit, if UPLO = 'U', the diagonal and first superdiagonal
+ of A are overwritten by the corresponding elements of the
+ tridiagonal matrix T, and the elements above the first
+ superdiagonal, with the array TAU, represent the orthogonal
+ matrix Q as a product of elementary reflectors; if UPLO
+ = 'L', the diagonal and first subdiagonal of A are over-
+ written by the corresponding elements of the tridiagonal
+ matrix T, and the elements below the first subdiagonal, with
+ the array TAU, represent the orthogonal matrix Q as a product
+ of elementary reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ D (output) DOUBLE PRECISION array, dimension (N)
+ The diagonal elements of the tridiagonal matrix T:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix T:
+ E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
+
+ TAU (output) DOUBLE PRECISION array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n-1) . . . H(2) H(1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
+ A(1:i-1,i+1), and tau in TAU(i).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(n-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
+ and tau in TAU(i).
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( d e v2 v3 v4 ) ( d )
+ ( d e v3 v4 ) ( e d )
+ ( d e v4 ) ( v1 e d )
+ ( d e ) ( v1 v2 e d )
+ ( d ) ( v1 v2 v3 e d )
+
+ where d and e denote diagonal and off-diagonal elements of T, and vi
+ denotes an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYTD2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/*
+ Generate elementary reflector H(i) = I - tau * v * v'
+ to annihilate A(1:i-1,i+1)
+*/
+
+ dlarfg_(&i__, &a[i__ + (i__ + 1) * a_dim1], &a[(i__ + 1) * a_dim1
+ + 1], &c__1, &taui);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+
+ if (taui != 0.) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ a[i__ + (i__ + 1) * a_dim1] = 1.;
+
+/* Compute x := tau * A * v storing x in TAU(1:i) */
+
+ dsymv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) *
+ a_dim1 + 1], &c__1, &c_b29, &tau[1], &c__1)
+ ;
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ alpha = taui * -.5 * ddot_(&i__, &tau[1], &c__1, &a[(i__ + 1)
+ * a_dim1 + 1], &c__1);
+ daxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[
+ 1], &c__1);
+
+/*
+ Apply the transformation as a rank-2 update:
+ A := A - v * w' - w * v'
+*/
+
+ dsyr2_(uplo, &i__, &c_b151, &a[(i__ + 1) * a_dim1 + 1], &c__1,
+ &tau[1], &c__1, &a[a_offset], lda);
+
+ a[i__ + (i__ + 1) * a_dim1] = e[i__];
+ }
+ d__[i__ + 1] = a[i__ + 1 + (i__ + 1) * a_dim1];
+ tau[i__] = taui;
+/* L10: */
+ }
+ d__[1] = a[a_dim1 + 1];
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Generate elementary reflector H(i) = I - tau * v * v'
+ to annihilate A(i+2:n,i)
+*/
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ *
+ a_dim1], &c__1, &taui);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+
+ if (taui != 0.) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Compute x := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ dsymv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &tau[
+ i__], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ i__2 = *n - i__;
+ alpha = taui * -.5 * ddot_(&i__2, &tau[i__], &c__1, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &c__1);
+
+/*
+ Apply the transformation as a rank-2 update:
+ A := A - v * w' - w * v'
+*/
+
+ i__2 = *n - i__;
+ dsyr2_(uplo, &i__2, &c_b151, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) *
+ a_dim1], lda);
+
+ a[i__ + 1 + i__ * a_dim1] = e[i__];
+ }
+ d__[i__] = a[i__ + i__ * a_dim1];
+ tau[i__] = taui;
+/* L20: */
+ }
+ d__[*n] = a[*n + *n * a_dim1];
+ }
+
+ return 0;
+
+/* End of DSYTD2 */
+
+} /* dsytd2_ */
+
+/* Subroutine */ int dsytrd_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tau, doublereal *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, nb, kk, nx, iws;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ static logical upper;
+ extern /* Subroutine */ int dsytd2_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dsyr2k_(char *, char *, integer *, integer *, doublereal
+ *, doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlatrd_(char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DSYTRD reduces a real symmetric matrix A to real symmetric
+ tridiagonal form T by an orthogonal similarity transformation:
+ Q**T * A * Q = T.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ N-by-N upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit, if UPLO = 'U', the diagonal and first superdiagonal
+ of A are overwritten by the corresponding elements of the
+ tridiagonal matrix T, and the elements above the first
+ superdiagonal, with the array TAU, represent the orthogonal
+ matrix Q as a product of elementary reflectors; if UPLO
+ = 'L', the diagonal and first subdiagonal of A are over-
+ written by the corresponding elements of the tridiagonal
+ matrix T, and the elements below the first subdiagonal, with
+ the array TAU, represent the orthogonal matrix Q as a product
+ of elementary reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ D (output) DOUBLE PRECISION array, dimension (N)
+ The diagonal elements of the tridiagonal matrix T:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix T:
+ E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
+
+ TAU (output) DOUBLE PRECISION array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= 1.
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n-1) . . . H(2) H(1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
+ A(1:i-1,i+1), and tau in TAU(i).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(n-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
+ and tau in TAU(i).
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( d e v2 v3 v4 ) ( d )
+ ( d e v3 v4 ) ( e d )
+ ( d e v4 ) ( v1 e d )
+ ( d e ) ( v1 v2 e d )
+ ( d ) ( v1 v2 v3 e d )
+
+ where d and e denote diagonal and off-diagonal elements of T, and vi
+ denotes an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if ((*lwork < 1 && ! lquery)) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6,
+ (ftnlen)1);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYTRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nx = *n;
+ iws = 1;
+ if ((nb > 1 && nb < *n)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code
+ (last block is always handled by unblocked code).
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "DSYTRD", uplo, n, &c_n1, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *n) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: determine the
+ minimum value of NB, and reduce NB or force use of
+ unblocked code by setting NX = N.
+
+ Computing MAX
+*/
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+ nbmin = ilaenv_(&c__2, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ if (nb < nbmin) {
+ nx = *n;
+ }
+ }
+ } else {
+ nx = *n;
+ }
+ } else {
+ nb = 1;
+ }
+
+ if (upper) {
+
+/*
+ Reduce the upper triangle of A.
+ Columns 1:kk are handled by the unblocked method.
+*/
+
+ kk = *n - (*n - nx + nb - 1) / nb * nb;
+ i__1 = kk + 1;
+ i__2 = -nb;
+ for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+
+/*
+ Reduce columns i:i+nb-1 to tridiagonal form and form the
+ matrix W which is needed to update the unreduced part of
+ the matrix
+*/
+
+ i__3 = i__ + nb - 1;
+ dlatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &
+ work[1], &ldwork);
+
+/*
+ Update the unreduced submatrix A(1:i-1,1:i-1), using an
+ update of the form: A := A - V*W' - W*V'
+*/
+
+ i__3 = i__ - 1;
+ dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b151, &a[i__ *
+ a_dim1 + 1], lda, &work[1], &ldwork, &c_b15, &a[a_offset],
+ lda);
+
+/*
+ Copy superdiagonal elements back into A, and diagonal
+ elements into D
+*/
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j - 1 + j * a_dim1] = e[j - 1];
+ d__[j] = a[j + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ dsytd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__2 = *n - nx;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+
+/*
+ Reduce columns i:i+nb-1 to tridiagonal form and form the
+ matrix W which is needed to update the unreduced part of
+ the matrix
+*/
+
+ i__3 = *n - i__ + 1;
+ dlatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], &
+ tau[i__], &work[1], &ldwork);
+
+/*
+ Update the unreduced submatrix A(i+ib:n,i+ib:n), using
+ an update of the form: A := A - V*W' - W*V'
+*/
+
+ i__3 = *n - i__ - nb + 1;
+ dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b151, &a[i__ + nb +
+ i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b15, &a[
+ i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/*
+ Copy subdiagonal elements back into A, and diagonal
+ elements into D
+*/
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + 1 + j * a_dim1] = e[j];
+ d__[j] = a[j + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ i__1 = *n - i__ + 1;
+ dsytd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__],
+ &tau[i__], &iinfo);
+ }
+
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DSYTRD */
+
+} /* dsytrd_ */
+
+/* Subroutine */ int dtrevc_(char *side, char *howmny, logical *select,
+ integer *n, doublereal *t, integer *ldt, doublereal *vl, integer *
+ ldvl, doublereal *vr, integer *ldvr, integer *mm, integer *m,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k;
+ static doublereal x[4] /* was [2][2] */;
+ static integer j1, j2, n2, ii, ki, ip, is;
+ static doublereal wi, wr, rec, ulp, beta, emax;
+ static logical pair;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static logical allv;
+ static integer ierr;
+ static doublereal unfl, ovfl, smin;
+ static logical over;
+ static doublereal vmax;
+ static integer jnxt;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ static doublereal scale;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ static doublereal remax;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static logical leftv, bothv;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ static doublereal vcrit;
+ static logical somev;
+ static doublereal xnorm;
+ extern /* Subroutine */ int dlaln2_(logical *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *
+ , doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlabad_(doublereal *, doublereal *);
+
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal bignum;
+ static logical rightv;
+ static doublereal smlnum;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DTREVC computes some or all of the right and/or left eigenvectors of
+ a real upper quasi-triangular matrix T.
+
+ The right eigenvector x and the left eigenvector y of T corresponding
+ to an eigenvalue w are defined by:
+
+ T*x = w*x, y'*T = w*y'
+
+ where y' denotes the conjugate transpose of the vector y.
+
+ If all eigenvectors are requested, the routine may either return the
+ matrices X and/or Y of right or left eigenvectors of T, or the
+ products Q*X and/or Q*Y, where Q is an input orthogonal
+ matrix. If T was obtained from the real-Schur factorization of an
+ original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of
+ right or left eigenvectors of A.
+
+ T must be in Schur canonical form (as returned by DHSEQR), that is,
+ block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each
+ 2-by-2 diagonal block has its diagonal elements equal and its
+ off-diagonal elements of opposite sign. Corresponding to each 2-by-2
+ diagonal block is a complex conjugate pair of eigenvalues and
+ eigenvectors; only one eigenvector of the pair is computed, namely
+ the one corresponding to the eigenvalue with positive imaginary part.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'R': compute right eigenvectors only;
+ = 'L': compute left eigenvectors only;
+ = 'B': compute both right and left eigenvectors.
+
+ HOWMNY (input) CHARACTER*1
+ = 'A': compute all right and/or left eigenvectors;
+ = 'B': compute all right and/or left eigenvectors,
+ and backtransform them using the input matrices
+ supplied in VR and/or VL;
+ = 'S': compute selected right and/or left eigenvectors,
+ specified by the logical array SELECT.
+
+ SELECT (input/output) LOGICAL array, dimension (N)
+ If HOWMNY = 'S', SELECT specifies the eigenvectors to be
+ computed.
+ If HOWMNY = 'A' or 'B', SELECT is not referenced.
+ To select the real eigenvector corresponding to a real
+ eigenvalue w(j), SELECT(j) must be set to .TRUE.. To select
+ the complex eigenvector corresponding to a complex conjugate
+ pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must be
+ set to .TRUE.; then on exit SELECT(j) is .TRUE. and
+ SELECT(j+1) is .FALSE..
+
+ N (input) INTEGER
+ The order of the matrix T. N >= 0.
+
+ T (input) DOUBLE PRECISION array, dimension (LDT,N)
+ The upper quasi-triangular matrix T in Schur canonical form.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= max(1,N).
+
+ VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM)
+ On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must
+ contain an N-by-N matrix Q (usually the orthogonal matrix Q
+ of Schur vectors returned by DHSEQR).
+ On exit, if SIDE = 'L' or 'B', VL contains:
+ if HOWMNY = 'A', the matrix Y of left eigenvectors of T;
+ VL has the same quasi-lower triangular form
+ as T'. If T(i,i) is a real eigenvalue, then
+ the i-th column VL(i) of VL is its
+ corresponding eigenvector. If T(i:i+1,i:i+1)
+ is a 2-by-2 block whose eigenvalues are
+ complex-conjugate eigenvalues of T, then
+ VL(i)+sqrt(-1)*VL(i+1) is the complex
+ eigenvector corresponding to the eigenvalue
+ with positive real part.
+ if HOWMNY = 'B', the matrix Q*Y;
+ if HOWMNY = 'S', the left eigenvectors of T specified by
+ SELECT, stored consecutively in the columns
+ of VL, in the same order as their
+ eigenvalues.
+ A complex eigenvector corresponding to a complex eigenvalue
+ is stored in two consecutive columns, the first holding the
+ real part, and the second the imaginary part.
+ If SIDE = 'R', VL is not referenced.
+
+ LDVL (input) INTEGER
+ The leading dimension of the array VL. LDVL >= max(1,N) if
+ SIDE = 'L' or 'B'; LDVL >= 1 otherwise.
+
+ VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM)
+ On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must
+ contain an N-by-N matrix Q (usually the orthogonal matrix Q
+ of Schur vectors returned by DHSEQR).
+ On exit, if SIDE = 'R' or 'B', VR contains:
+ if HOWMNY = 'A', the matrix X of right eigenvectors of T;
+ VR has the same quasi-upper triangular form
+ as T. If T(i,i) is a real eigenvalue, then
+ the i-th column VR(i) of VR is its
+ corresponding eigenvector. If T(i:i+1,i:i+1)
+ is a 2-by-2 block whose eigenvalues are
+ complex-conjugate eigenvalues of T, then
+ VR(i)+sqrt(-1)*VR(i+1) is the complex
+ eigenvector corresponding to the eigenvalue
+ with positive real part.
+ if HOWMNY = 'B', the matrix Q*X;
+ if HOWMNY = 'S', the right eigenvectors of T specified by
+ SELECT, stored consecutively in the columns
+ of VR, in the same order as their
+ eigenvalues.
+ A complex eigenvector corresponding to a complex eigenvalue
+ is stored in two consecutive columns, the first holding the
+ real part and the second the imaginary part.
+ If SIDE = 'L', VR is not referenced.
+
+ LDVR (input) INTEGER
+ The leading dimension of the array VR. LDVR >= max(1,N) if
+ SIDE = 'R' or 'B'; LDVR >= 1 otherwise.
+
+ MM (input) INTEGER
+ The number of columns in the arrays VL and/or VR. MM >= M.
+
+ M (output) INTEGER
+ The number of columns in the arrays VL and/or VR actually
+ used to store the eigenvectors.
+ If HOWMNY = 'A' or 'B', M is set to N.
+ Each selected real eigenvector occupies one column and each
+ selected complex eigenvector occupies two columns.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (3*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The algorithm used in this program is basically backward (forward)
+ substitution, with scaling to make the the code robust against
+ possible overflow.
+
+ Each eigenvector is normalized so that the element of largest
+ magnitude has magnitude 1; here the magnitude of a complex number
+ (x,y) is taken to be |x| + |y|.
+
+ =====================================================================
+
+
+ Decode and test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1 * 1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1 * 1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1 * 1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = lsame_(side, "R") || bothv;
+ leftv = lsame_(side, "L") || bothv;
+
+ allv = lsame_(howmny, "A");
+ over = lsame_(howmny, "B");
+ somev = lsame_(howmny, "S");
+
+ *info = 0;
+ if ((! rightv && ! leftv)) {
+ *info = -1;
+ } else if (((! allv && ! over) && ! somev)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvl < 1 || (leftv && *ldvl < *n)) {
+ *info = -8;
+ } else if (*ldvr < 1 || (rightv && *ldvr < *n)) {
+ *info = -10;
+ } else {
+
+/*
+ Set M to the number of columns required to store the selected
+ eigenvectors, standardize the array SELECT if necessary, and
+ test MM.
+*/
+
+ if (somev) {
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (pair) {
+ pair = FALSE_;
+ select[j] = FALSE_;
+ } else {
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] == 0.) {
+ if (select[j]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[j] || select[j + 1]) {
+ select[j] = TRUE_;
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*mm < *m) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTREVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set the constants to control overflow. */
+
+ unfl = SAFEMINIMUM;
+ ovfl = 1. / unfl;
+ dlabad_(&unfl, &ovfl);
+ ulp = PRECISION;
+ smlnum = unfl * (*n / ulp);
+ bignum = (1. - ulp) / smlnum;
+
+/*
+ Compute 1-norm of each column of strictly upper triangular
+ part of T to control overflow in triangular solver.
+*/
+
+ work[1] = 0.;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ work[j] = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[j] += (d__1 = t[i__ + j * t_dim1], abs(d__1));
+/* L20: */
+ }
+/* L30: */
+ }
+
+/*
+ Index IP is used to specify the real or complex eigenvalue:
+ IP = 0, real eigenvalue,
+ 1, first of conjugate complex pair: (wr,wi)
+ -1, second of conjugate complex pair: (wr,wi)
+*/
+
+ n2 = (*n) << (1);
+
+ if (rightv) {
+
+/* Compute right eigenvectors. */
+
+ ip = 0;
+ is = *m;
+ for (ki = *n; ki >= 1; --ki) {
+
+ if (ip == 1) {
+ goto L130;
+ }
+ if (ki == 1) {
+ goto L40;
+ }
+ if (t[ki + (ki - 1) * t_dim1] == 0.) {
+ goto L40;
+ }
+ ip = -1;
+
+L40:
+ if (somev) {
+ if (ip == 0) {
+ if (! select[ki]) {
+ goto L130;
+ }
+ } else {
+ if (! select[ki - 1]) {
+ goto L130;
+ }
+ }
+ }
+
+/* Compute the KI-th eigenvalue (WR,WI). */
+
+ wr = t[ki + ki * t_dim1];
+ wi = 0.;
+ if (ip != 0) {
+ wi = sqrt((d__1 = t[ki + (ki - 1) * t_dim1], abs(d__1))) *
+ sqrt((d__2 = t[ki - 1 + ki * t_dim1], abs(d__2)));
+ }
+/* Computing MAX */
+ d__1 = ulp * (abs(wr) + abs(wi));
+ smin = max(d__1,smlnum);
+
+ if (ip == 0) {
+
+/* Real right eigenvector */
+
+ work[ki + *n] = 1.;
+
+/* Form right-hand side */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ work[k + *n] = -t[k + ki * t_dim1];
+/* L50: */
+ }
+
+/*
+ Solve the upper quasi-triangular system:
+ (T(1:KI-1,1:KI-1) - WR)*X = SCALE*WORK.
+*/
+
+ jnxt = ki - 1;
+ for (j = ki - 1; j >= 1; --j) {
+ if (j > jnxt) {
+ goto L60;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.) {
+ j1 = j - 1;
+ jnxt = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+ dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b15, &t[j +
+ j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
+ n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm,
+ &ierr);
+
+/*
+ Scale X(1,1) to avoid overflow when updating
+ the right-hand side.
+*/
+
+ if (xnorm > 1.) {
+ if (work[j] > bignum / xnorm) {
+ x[0] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ }
+ work[j + *n] = x[0];
+
+/* Update right-hand side */
+
+ i__1 = j - 1;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+ dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b15, &t[j -
+ 1 + (j - 1) * t_dim1], ldt, &c_b15, &c_b15, &
+ work[j - 1 + *n], n, &wr, &c_b29, x, &c__2, &
+ scale, &xnorm, &ierr);
+
+/*
+ Scale X(1,1) and X(2,1) to avoid overflow when
+ updating the right-hand side.
+*/
+
+ if (xnorm > 1.) {
+/* Computing MAX */
+ d__1 = work[j - 1], d__2 = work[j];
+ beta = max(d__1,d__2);
+ if (beta > bignum / xnorm) {
+ x[0] /= xnorm;
+ x[1] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ }
+ work[j - 1 + *n] = x[0];
+ work[j + *n] = x[1];
+
+/* Update right-hand side */
+
+ i__1 = j - 2;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[*n + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[1];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ }
+L60:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ dcopy_(&ki, &work[*n + 1], &c__1, &vr[is * vr_dim1 + 1], &
+ c__1);
+
+ ii = idamax_(&ki, &vr[is * vr_dim1 + 1], &c__1);
+ remax = 1. / (d__1 = vr[ii + is * vr_dim1], abs(d__1));
+ dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ vr[k + is * vr_dim1] = 0.;
+/* L70: */
+ }
+ } else {
+ if (ki > 1) {
+ i__1 = ki - 1;
+ dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, &
+ work[*n + 1], &c__1, &work[ki + *n], &vr[ki *
+ vr_dim1 + 1], &c__1);
+ }
+
+ ii = idamax_(n, &vr[ki * vr_dim1 + 1], &c__1);
+ remax = 1. / (d__1 = vr[ii + ki * vr_dim1], abs(d__1));
+ dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+ } else {
+
+/*
+ Complex right eigenvector.
+
+ Initial solve
+ [ (T(KI-1,KI-1) T(KI-1,KI) ) - (WR + I* WI)]*X = 0.
+ [ (T(KI,KI-1) T(KI,KI) ) ]
+*/
+
+ if ((d__1 = t[ki - 1 + ki * t_dim1], abs(d__1)) >= (d__2 = t[
+ ki + (ki - 1) * t_dim1], abs(d__2))) {
+ work[ki - 1 + *n] = 1.;
+ work[ki + n2] = wi / t[ki - 1 + ki * t_dim1];
+ } else {
+ work[ki - 1 + *n] = -wi / t[ki + (ki - 1) * t_dim1];
+ work[ki + n2] = 1.;
+ }
+ work[ki + *n] = 0.;
+ work[ki - 1 + n2] = 0.;
+
+/* Form right-hand side */
+
+ i__1 = ki - 2;
+ for (k = 1; k <= i__1; ++k) {
+ work[k + *n] = -work[ki - 1 + *n] * t[k + (ki - 1) *
+ t_dim1];
+ work[k + n2] = -work[ki + n2] * t[k + ki * t_dim1];
+/* L80: */
+ }
+
+/*
+ Solve upper quasi-triangular system:
+ (T(1:KI-2,1:KI-2) - (WR+i*WI))*X = SCALE*(WORK+i*WORK2)
+*/
+
+ jnxt = ki - 2;
+ for (j = ki - 2; j >= 1; --j) {
+ if (j > jnxt) {
+ goto L90;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.) {
+ j1 = j - 1;
+ jnxt = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+ dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b15, &t[j +
+ j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
+ n], n, &wr, &wi, x, &c__2, &scale, &xnorm, &
+ ierr);
+
+/*
+ Scale X(1,1) and X(1,2) to avoid overflow when
+ updating the right-hand side.
+*/
+
+ if (xnorm > 1.) {
+ if (work[j] > bignum / xnorm) {
+ x[0] /= xnorm;
+ x[2] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ dscal_(&ki, &scale, &work[n2 + 1], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+
+/* Update the right-hand side */
+
+ i__1 = j - 1;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ i__1 = j - 1;
+ d__1 = -x[2];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ n2 + 1], &c__1);
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+ dlaln2_(&c_false, &c__2, &c__2, &smin, &c_b15, &t[j -
+ 1 + (j - 1) * t_dim1], ldt, &c_b15, &c_b15, &
+ work[j - 1 + *n], n, &wr, &wi, x, &c__2, &
+ scale, &xnorm, &ierr);
+
+/*
+ Scale X to avoid overflow when updating
+ the right-hand side.
+*/
+
+ if (xnorm > 1.) {
+/* Computing MAX */
+ d__1 = work[j - 1], d__2 = work[j];
+ beta = max(d__1,d__2);
+ if (beta > bignum / xnorm) {
+ rec = 1. / xnorm;
+ x[0] *= rec;
+ x[2] *= rec;
+ x[1] *= rec;
+ x[3] *= rec;
+ scale *= rec;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ dscal_(&ki, &scale, &work[n2 + 1], &c__1);
+ }
+ work[j - 1 + *n] = x[0];
+ work[j + *n] = x[1];
+ work[j - 1 + n2] = x[2];
+ work[j + n2] = x[3];
+
+/* Update the right-hand side */
+
+ i__1 = j - 2;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[*n + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[1];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[2];
+ daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[n2 + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[3];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ n2 + 1], &c__1);
+ }
+L90:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ dcopy_(&ki, &work[*n + 1], &c__1, &vr[(is - 1) * vr_dim1
+ + 1], &c__1);
+ dcopy_(&ki, &work[n2 + 1], &c__1, &vr[is * vr_dim1 + 1], &
+ c__1);
+
+ emax = 0.;
+ i__1 = ki;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vr[k + (is - 1) * vr_dim1]
+ , abs(d__1)) + (d__2 = vr[k + is * vr_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L100: */
+ }
+
+ remax = 1. / emax;
+ dscal_(&ki, &remax, &vr[(is - 1) * vr_dim1 + 1], &c__1);
+ dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ vr[k + (is - 1) * vr_dim1] = 0.;
+ vr[k + is * vr_dim1] = 0.;
+/* L110: */
+ }
+
+ } else {
+
+ if (ki > 2) {
+ i__1 = ki - 2;
+ dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, &
+ work[*n + 1], &c__1, &work[ki - 1 + *n], &vr[(
+ ki - 1) * vr_dim1 + 1], &c__1);
+ i__1 = ki - 2;
+ dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, &
+ work[n2 + 1], &c__1, &work[ki + n2], &vr[ki *
+ vr_dim1 + 1], &c__1);
+ } else {
+ dscal_(n, &work[ki - 1 + *n], &vr[(ki - 1) * vr_dim1
+ + 1], &c__1);
+ dscal_(n, &work[ki + n2], &vr[ki * vr_dim1 + 1], &
+ c__1);
+ }
+
+ emax = 0.;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vr[k + (ki - 1) * vr_dim1]
+ , abs(d__1)) + (d__2 = vr[k + ki * vr_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L120: */
+ }
+ remax = 1. / emax;
+ dscal_(n, &remax, &vr[(ki - 1) * vr_dim1 + 1], &c__1);
+ dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+ }
+
+ --is;
+ if (ip != 0) {
+ --is;
+ }
+L130:
+ if (ip == 1) {
+ ip = 0;
+ }
+ if (ip == -1) {
+ ip = 1;
+ }
+/* L140: */
+ }
+ }
+
+ if (leftv) {
+
+/* Compute left eigenvectors. */
+
+ ip = 0;
+ is = 1;
+ i__1 = *n;
+ for (ki = 1; ki <= i__1; ++ki) {
+
+ if (ip == -1) {
+ goto L250;
+ }
+ if (ki == *n) {
+ goto L150;
+ }
+ if (t[ki + 1 + ki * t_dim1] == 0.) {
+ goto L150;
+ }
+ ip = 1;
+
+L150:
+ if (somev) {
+ if (! select[ki]) {
+ goto L250;
+ }
+ }
+
+/* Compute the KI-th eigenvalue (WR,WI). */
+
+ wr = t[ki + ki * t_dim1];
+ wi = 0.;
+ if (ip != 0) {
+ wi = sqrt((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1))) *
+ sqrt((d__2 = t[ki + 1 + ki * t_dim1], abs(d__2)));
+ }
+/* Computing MAX */
+ d__1 = ulp * (abs(wr) + abs(wi));
+ smin = max(d__1,smlnum);
+
+ if (ip == 0) {
+
+/* Real left eigenvector. */
+
+ work[ki + *n] = 1.;
+
+/* Form right-hand side */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ work[k + *n] = -t[ki + k * t_dim1];
+/* L160: */
+ }
+
+/*
+ Solve the quasi-triangular system:
+ (T(KI+1:N,KI+1:N) - WR)'*X = SCALE*WORK
+*/
+
+ vmax = 1.;
+ vcrit = bignum;
+
+ jnxt = ki + 1;
+ i__2 = *n;
+ for (j = ki + 1; j <= i__2; ++j) {
+ if (j < jnxt) {
+ goto L170;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.) {
+ j2 = j + 1;
+ jnxt = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/*
+ 1-by-1 diagonal block
+
+ Scale if necessary to avoid overflow when forming
+ the right-hand side.
+*/
+
+ if (work[j] > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 1;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1],
+ &c__1, &work[ki + 1 + *n], &c__1);
+
+/* Solve (T(J,J)-WR)'*X = WORK */
+
+ dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b15, &t[j +
+ j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
+ n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm,
+ &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ }
+ work[j + *n] = x[0];
+/* Computing MAX */
+ d__2 = (d__1 = work[j + *n], abs(d__1));
+ vmax = max(d__2,vmax);
+ vcrit = bignum / vmax;
+
+ } else {
+
+/*
+ 2-by-2 diagonal block
+
+ Scale if necessary to avoid overflow when forming
+ the right-hand side.
+
+ Computing MAX
+*/
+ d__1 = work[j], d__2 = work[j + 1];
+ beta = max(d__1,d__2);
+ if (beta > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 1;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1],
+ &c__1, &work[ki + 1 + *n], &c__1);
+
+ i__3 = j - ki - 1;
+ work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 1 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 1 + *n], &c__1);
+
+/*
+ Solve
+ [T(J,J)-WR T(J,J+1) ]'* X = SCALE*( WORK1 )
+ [T(J+1,J) T(J+1,J+1)-WR] ( WORK2 )
+*/
+
+ dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b15, &t[j +
+ j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
+ n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm,
+ &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + 1 + *n] = x[1];
+
+/* Computing MAX */
+ d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2
+ = work[j + 1 + *n], abs(d__2)), d__3 = max(
+ d__3,d__4);
+ vmax = max(d__3,vmax);
+ vcrit = bignum / vmax;
+
+ }
+L170:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
+ vl_dim1], &c__1);
+
+ i__2 = *n - ki + 1;
+ ii = idamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki -
+ 1;
+ remax = 1. / (d__1 = vl[ii + is * vl_dim1], abs(d__1));
+ i__2 = *n - ki + 1;
+ dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ vl[k + is * vl_dim1] = 0.;
+/* L180: */
+ }
+
+ } else {
+
+ if (ki < *n) {
+ i__2 = *n - ki;
+ dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 1) * vl_dim1
+ + 1], ldvl, &work[ki + 1 + *n], &c__1, &work[
+ ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
+ }
+
+ ii = idamax_(n, &vl[ki * vl_dim1 + 1], &c__1);
+ remax = 1. / (d__1 = vl[ii + ki * vl_dim1], abs(d__1));
+ dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+
+ }
+
+ } else {
+
+/*
+ Complex left eigenvector.
+
+ Initial solve:
+ ((T(KI,KI) T(KI,KI+1) )' - (WR - I* WI))*X = 0.
+ ((T(KI+1,KI) T(KI+1,KI+1)) )
+*/
+
+ if ((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1)) >= (d__2 =
+ t[ki + 1 + ki * t_dim1], abs(d__2))) {
+ work[ki + *n] = wi / t[ki + (ki + 1) * t_dim1];
+ work[ki + 1 + n2] = 1.;
+ } else {
+ work[ki + *n] = 1.;
+ work[ki + 1 + n2] = -wi / t[ki + 1 + ki * t_dim1];
+ }
+ work[ki + 1 + *n] = 0.;
+ work[ki + n2] = 0.;
+
+/* Form right-hand side */
+
+ i__2 = *n;
+ for (k = ki + 2; k <= i__2; ++k) {
+ work[k + *n] = -work[ki + *n] * t[ki + k * t_dim1];
+ work[k + n2] = -work[ki + 1 + n2] * t[ki + 1 + k * t_dim1]
+ ;
+/* L190: */
+ }
+
+/*
+ Solve complex quasi-triangular system:
+ ( T(KI+2,N:KI+2,N) - (WR-i*WI) )*X = WORK1+i*WORK2
+*/
+
+ vmax = 1.;
+ vcrit = bignum;
+
+ jnxt = ki + 2;
+ i__2 = *n;
+ for (j = ki + 2; j <= i__2; ++j) {
+ if (j < jnxt) {
+ goto L200;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.) {
+ j2 = j + 1;
+ jnxt = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/*
+ 1-by-1 diagonal block
+
+ Scale if necessary to avoid overflow when
+ forming the right-hand side elements.
+*/
+
+ if (work[j] > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + n2], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 2;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + *n], &c__1);
+ i__3 = j - ki - 2;
+ work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + n2], &c__1);
+
+/* Solve (T(J,J)-(WR-i*WI))*(X11+i*X12)= WK+I*WK2 */
+
+ d__1 = -wi;
+ dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b15, &t[j +
+ j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
+ n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, &
+ ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + n2], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+/* Computing MAX */
+ d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2
+ = work[j + n2], abs(d__2)), d__3 = max(d__3,
+ d__4);
+ vmax = max(d__3,vmax);
+ vcrit = bignum / vmax;
+
+ } else {
+
+/*
+ 2-by-2 diagonal block
+
+ Scale if necessary to avoid overflow when forming
+ the right-hand side elements.
+
+ Computing MAX
+*/
+ d__1 = work[j], d__2 = work[j + 1];
+ beta = max(d__1,d__2);
+ if (beta > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + n2], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 2;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + *n], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + n2], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 2 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 2 + *n], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + 1 + n2] -= ddot_(&i__3, &t[ki + 2 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 2 + n2], &c__1);
+
+/*
+ Solve 2-by-2 complex linear equation
+ ([T(j,j) T(j,j+1) ]'-(wr-i*wi)*I)*X = SCALE*B
+ ([T(j+1,j) T(j+1,j+1)] )
+*/
+
+ d__1 = -wi;
+ dlaln2_(&c_true, &c__2, &c__2, &smin, &c_b15, &t[j +
+ j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
+ n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, &
+ ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + n2], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+ work[j + 1 + *n] = x[1];
+ work[j + 1 + n2] = x[3];
+/* Computing MAX */
+ d__1 = abs(x[0]), d__2 = abs(x[2]), d__1 = max(d__1,
+ d__2), d__2 = abs(x[1]), d__1 = max(d__1,d__2)
+ , d__2 = abs(x[3]), d__1 = max(d__1,d__2);
+ vmax = max(d__1,vmax);
+ vcrit = bignum / vmax;
+
+ }
+L200:
+ ;
+ }
+
+/*
+ Copy the vector x or Q*x to VL and normalize.
+
+ L210:
+*/
+ if (! over) {
+ i__2 = *n - ki + 1;
+ dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
+ vl_dim1], &c__1);
+ i__2 = *n - ki + 1;
+ dcopy_(&i__2, &work[ki + n2], &c__1, &vl[ki + (is + 1) *
+ vl_dim1], &c__1);
+
+ emax = 0.;
+ i__2 = *n;
+ for (k = ki; k <= i__2; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vl[k + is * vl_dim1], abs(
+ d__1)) + (d__2 = vl[k + (is + 1) * vl_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L220: */
+ }
+ remax = 1. / emax;
+ i__2 = *n - ki + 1;
+ dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+ i__2 = *n - ki + 1;
+ dscal_(&i__2, &remax, &vl[ki + (is + 1) * vl_dim1], &c__1)
+ ;
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ vl[k + is * vl_dim1] = 0.;
+ vl[k + (is + 1) * vl_dim1] = 0.;
+/* L230: */
+ }
+ } else {
+ if (ki < *n - 1) {
+ i__2 = *n - ki - 1;
+ dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 2) * vl_dim1
+ + 1], ldvl, &work[ki + 2 + *n], &c__1, &work[
+ ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
+ i__2 = *n - ki - 1;
+ dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 2) * vl_dim1
+ + 1], ldvl, &work[ki + 2 + n2], &c__1, &work[
+ ki + 1 + n2], &vl[(ki + 1) * vl_dim1 + 1], &
+ c__1);
+ } else {
+ dscal_(n, &work[ki + *n], &vl[ki * vl_dim1 + 1], &
+ c__1);
+ dscal_(n, &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1
+ + 1], &c__1);
+ }
+
+ emax = 0.;
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vl[k + ki * vl_dim1], abs(
+ d__1)) + (d__2 = vl[k + (ki + 1) * vl_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L240: */
+ }
+ remax = 1. / emax;
+ dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+ dscal_(n, &remax, &vl[(ki + 1) * vl_dim1 + 1], &c__1);
+
+ }
+
+ }
+
+ ++is;
+ if (ip != 0) {
+ ++is;
+ }
+L250:
+ if (ip == -1) {
+ ip = 0;
+ }
+ if (ip == 1) {
+ ip = -1;
+ }
+
+/* L260: */
+ }
+
+ }
+
+ return 0;
+
+/* End of DTREVC */
+
+} /* dtrevc_ */
+
+integer ieeeck_(integer *ispec, real *zero, real *one)
+{
+ /* System generated locals */
+ integer ret_val;
+
+ /* Local variables */
+ static real nan1, nan2, nan3, nan4, nan5, nan6, neginf, posinf, negzro,
+ newzro;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1998
+
+
+ Purpose
+ =======
+
+ IEEECK is called from the ILAENV to verify that Infinity and
+ possibly NaN arithmetic is safe (i.e. will not trap).
+
+ Arguments
+ =========
+
+ ISPEC (input) INTEGER
+ Specifies whether to test just for inifinity arithmetic
+ or whether to test for infinity and NaN arithmetic.
+ = 0: Verify infinity arithmetic only.
+ = 1: Verify infinity and NaN arithmetic.
+
+ ZERO (input) REAL
+ Must contain the value 0.0
+ This is passed to prevent the compiler from optimizing
+ away this code.
+
+ ONE (input) REAL
+ Must contain the value 1.0
+ This is passed to prevent the compiler from optimizing
+ away this code.
+
+ RETURN VALUE: INTEGER
+ = 0: Arithmetic failed to produce the correct answers
+ = 1: Arithmetic produced the correct answers
+*/
+
+ ret_val = 1;
+
+ posinf = *one / *zero;
+ if (posinf <= *one) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ neginf = -(*one) / *zero;
+ if (neginf >= *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ negzro = *one / (neginf + *one);
+ if (negzro != *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ neginf = *one / negzro;
+ if (neginf >= *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ newzro = negzro + *zero;
+ if (newzro != *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ posinf = *one / newzro;
+ if (posinf <= *one) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ neginf *= posinf;
+ if (neginf >= *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ posinf *= posinf;
+ if (posinf <= *one) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+
+/* Return if we were only asked to check infinity arithmetic */
+
+ if (*ispec == 0) {
+ return ret_val;
+ }
+
+ nan1 = posinf + neginf;
+
+ nan2 = posinf / neginf;
+
+ nan3 = posinf / posinf;
+
+ nan4 = posinf * *zero;
+
+ nan5 = neginf * negzro;
+
+ nan6 = nan5 * 0.f;
+
+ if (nan1 == nan1) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan2 == nan2) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan3 == nan3) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan4 == nan4) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan5 == nan5) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan6 == nan6) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ return ret_val;
+} /* ieeeck_ */
+
+integer ilaenv_(integer *ispec, char *name__, char *opts, integer *n1,
+ integer *n2, integer *n3, integer *n4, ftnlen name_len, ftnlen
+ opts_len)
+{
+ /* System generated locals */
+ integer ret_val;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen);
+ integer s_cmp(char *, char *, ftnlen, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static char c1[1], c2[2], c3[3], c4[2];
+ static integer ic, nb, iz, nx;
+ static logical cname, sname;
+ static integer nbmin;
+ extern integer ieeeck_(integer *, real *, real *);
+ static char subnam[6];
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ILAENV is called from the LAPACK routines to choose problem-dependent
+ parameters for the local environment. See ISPEC for a description of
+ the parameters.
+
+ This version provides a set of parameters which should give good,
+ but not optimal, performance on many of the currently available
+ computers. Users are encouraged to modify this subroutine to set
+ the tuning parameters for their particular machine using the option
+ and problem size information in the arguments.
+
+ This routine will not function correctly if it is converted to all
+ lower case. Converting it to all upper case is allowed.
+
+ Arguments
+ =========
+
+ ISPEC (input) INTEGER
+ Specifies the parameter to be returned as the value of
+ ILAENV.
+ = 1: the optimal blocksize; if this value is 1, an unblocked
+ algorithm will give the best performance.
+ = 2: the minimum block size for which the block routine
+ should be used; if the usable block size is less than
+ this value, an unblocked routine should be used.
+ = 3: the crossover point (in a block routine, for N less
+ than this value, an unblocked routine should be used)
+ = 4: the number of shifts, used in the nonsymmetric
+ eigenvalue routines
+ = 5: the minimum column dimension for blocking to be used;
+ rectangular blocks must have dimension at least k by m,
+ where k is given by ILAENV(2,...) and m by ILAENV(5,...)
+ = 6: the crossover point for the SVD (when reducing an m by n
+ matrix to bidiagonal form, if max(m,n)/min(m,n) exceeds
+ this value, a QR factorization is used first to reduce
+ the matrix to a triangular form.)
+ = 7: the number of processors
+ = 8: the crossover point for the multishift QR and QZ methods
+ for nonsymmetric eigenvalue problems.
+ = 9: maximum size of the subproblems at the bottom of the
+ computation tree in the divide-and-conquer algorithm
+ (used by xGELSD and xGESDD)
+ =10: ieee NaN arithmetic can be trusted not to trap
+ =11: infinity arithmetic can be trusted not to trap
+
+ NAME (input) CHARACTER*(*)
+ The name of the calling subroutine, in either upper case or
+ lower case.
+
+ OPTS (input) CHARACTER*(*)
+ The character options to the subroutine NAME, concatenated
+ into a single character string. For example, UPLO = 'U',
+ TRANS = 'T', and DIAG = 'N' for a triangular routine would
+ be specified as OPTS = 'UTN'.
+
+ N1 (input) INTEGER
+ N2 (input) INTEGER
+ N3 (input) INTEGER
+ N4 (input) INTEGER
+ Problem dimensions for the subroutine NAME; these may not all
+ be required.
+
+ (ILAENV) (output) INTEGER
+ >= 0: the value of the parameter specified by ISPEC
+ < 0: if ILAENV = -k, the k-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The following conventions have been used when calling ILAENV from the
+ LAPACK routines:
+ 1) OPTS is a concatenation of all of the character options to
+ subroutine NAME, in the same order that they appear in the
+ argument list for NAME, even if they are not used in determining
+ the value of the parameter specified by ISPEC.
+ 2) The problem dimensions N1, N2, N3, N4 are specified in the order
+ that they appear in the argument list for NAME. N1 is used
+ first, N2 second, and so on, and unused problem dimensions are
+ passed a value of -1.
+ 3) The parameter value returned by ILAENV is checked for validity in
+ the calling subroutine. For example, ILAENV is used to retrieve
+ the optimal blocksize for STRTRI as follows:
+
+ NB = ILAENV( 1, 'STRTRI', UPLO // DIAG, N, -1, -1, -1 )
+ IF( NB.LE.1 ) NB = MAX( 1, N )
+
+ =====================================================================
+*/
+
+
+ switch (*ispec) {
+ case 1: goto L100;
+ case 2: goto L100;
+ case 3: goto L100;
+ case 4: goto L400;
+ case 5: goto L500;
+ case 6: goto L600;
+ case 7: goto L700;
+ case 8: goto L800;
+ case 9: goto L900;
+ case 10: goto L1000;
+ case 11: goto L1100;
+ }
+
+/* Invalid value for ISPEC */
+
+ ret_val = -1;
+ return ret_val;
+
+L100:
+
+/* Convert NAME to upper case if the first character is lower case. */
+
+ ret_val = 1;
+ s_copy(subnam, name__, (ftnlen)6, name_len);
+ ic = *(unsigned char *)subnam;
+ iz = 'Z';
+ if (iz == 90 || iz == 122) {
+
+/* ASCII character set */
+
+ if ((ic >= 97 && ic <= 122)) {
+ *(unsigned char *)subnam = (char) (ic - 32);
+ for (i__ = 2; i__ <= 6; ++i__) {
+ ic = *(unsigned char *)&subnam[i__ - 1];
+ if ((ic >= 97 && ic <= 122)) {
+ *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32);
+ }
+/* L10: */
+ }
+ }
+
+ } else if (iz == 233 || iz == 169) {
+
+/* EBCDIC character set */
+
+ if ((ic >= 129 && ic <= 137) || (ic >= 145 && ic <= 153) || (ic >=
+ 162 && ic <= 169)) {
+ *(unsigned char *)subnam = (char) (ic + 64);
+ for (i__ = 2; i__ <= 6; ++i__) {
+ ic = *(unsigned char *)&subnam[i__ - 1];
+ if ((ic >= 129 && ic <= 137) || (ic >= 145 && ic <= 153) || (
+ ic >= 162 && ic <= 169)) {
+ *(unsigned char *)&subnam[i__ - 1] = (char) (ic + 64);
+ }
+/* L20: */
+ }
+ }
+
+ } else if (iz == 218 || iz == 250) {
+
+/* Prime machines: ASCII+128 */
+
+ if ((ic >= 225 && ic <= 250)) {
+ *(unsigned char *)subnam = (char) (ic - 32);
+ for (i__ = 2; i__ <= 6; ++i__) {
+ ic = *(unsigned char *)&subnam[i__ - 1];
+ if ((ic >= 225 && ic <= 250)) {
+ *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32);
+ }
+/* L30: */
+ }
+ }
+ }
+
+ *(unsigned char *)c1 = *(unsigned char *)subnam;
+ sname = *(unsigned char *)c1 == 'S' || *(unsigned char *)c1 == 'D';
+ cname = *(unsigned char *)c1 == 'C' || *(unsigned char *)c1 == 'Z';
+ if (! (cname || sname)) {
+ return ret_val;
+ }
+ s_copy(c2, subnam + 1, (ftnlen)2, (ftnlen)2);
+ s_copy(c3, subnam + 3, (ftnlen)3, (ftnlen)3);
+ s_copy(c4, c3 + 1, (ftnlen)2, (ftnlen)2);
+
+ switch (*ispec) {
+ case 1: goto L110;
+ case 2: goto L200;
+ case 3: goto L300;
+ }
+
+L110:
+
+/*
+ ISPEC = 1: block size
+
+ In these examples, separate code is provided for setting NB for
+ real and complex. We assume that NB will take the same value in
+ single or double precision.
+*/
+
+ nb = 1;
+
+ if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ } else if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3,
+ "RQF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)
+ 3, (ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3)
+ == 0) {
+ if (sname) {
+ nb = 32;
+ } else {
+ nb = 32;
+ }
+ } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 32;
+ } else {
+ nb = 32;
+ }
+ } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 32;
+ } else {
+ nb = 32;
+ }
+ } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (s_cmp(c2, "PO", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ } else if ((sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0)) {
+ nb = 32;
+ } else if ((sname && s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0)) {
+ nb = 64;
+ }
+ } else if ((cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0)) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ nb = 64;
+ } else if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
+ nb = 32;
+ } else if (s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0) {
+ nb = 64;
+ }
+ } else if ((sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0)) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)2, (ftnlen)2) == 0) {
+ nb = 32;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)2, (ftnlen)2) == 0) {
+ nb = 32;
+ }
+ }
+ } else if ((cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0)) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)2, (ftnlen)2) == 0) {
+ nb = 32;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)2, (ftnlen)2) == 0) {
+ nb = 32;
+ }
+ }
+ } else if (s_cmp(c2, "GB", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ if (*n4 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ } else {
+ if (*n4 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ }
+ }
+ } else if (s_cmp(c2, "PB", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ if (*n2 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ } else {
+ if (*n2 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ }
+ }
+ } else if (s_cmp(c2, "TR", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (s_cmp(c2, "LA", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "UUM", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if ((sname && s_cmp(c2, "ST", (ftnlen)2, (ftnlen)2) == 0)) {
+ if (s_cmp(c3, "EBZ", (ftnlen)3, (ftnlen)3) == 0) {
+ nb = 1;
+ }
+ }
+ ret_val = nb;
+ return ret_val;
+
+L200:
+
+/* ISPEC = 2: minimum block size */
+
+ nbmin = 2;
+ if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", (
+ ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)3, (
+ ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0)
+ {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ }
+ } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 8;
+ } else {
+ nbmin = 8;
+ }
+ } else if ((sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0)) {
+ nbmin = 2;
+ }
+ } else if ((cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0)) {
+ if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
+ nbmin = 2;
+ }
+ } else if ((sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0)) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)2, (ftnlen)2) == 0) {
+ nbmin = 2;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)2, (ftnlen)2) == 0) {
+ nbmin = 2;
+ }
+ }
+ } else if ((cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0)) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)2, (ftnlen)2) == 0) {
+ nbmin = 2;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)2, (ftnlen)2) == 0) {
+ nbmin = 2;
+ }
+ }
+ }
+ ret_val = nbmin;
+ return ret_val;
+
+L300:
+
+/* ISPEC = 3: crossover point */
+
+ nx = 0;
+ if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", (
+ ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)3, (
+ ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0)
+ {
+ if (sname) {
+ nx = 128;
+ } else {
+ nx = 128;
+ }
+ } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nx = 128;
+ } else {
+ nx = 128;
+ }
+ } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nx = 128;
+ } else {
+ nx = 128;
+ }
+ }
+ } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) {
+ if ((sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0)) {
+ nx = 32;
+ }
+ } else if ((cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0)) {
+ if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
+ nx = 32;
+ }
+ } else if ((sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0)) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)2, (ftnlen)2) == 0) {
+ nx = 128;
+ }
+ }
+ } else if ((cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0)) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)2, (ftnlen)2) == 0) {
+ nx = 128;
+ }
+ }
+ }
+ ret_val = nx;
+ return ret_val;
+
+L400:
+
+/* ISPEC = 4: number of shifts (used by xHSEQR) */
+
+ ret_val = 6;
+ return ret_val;
+
+L500:
+
+/* ISPEC = 5: minimum column dimension (not used) */
+
+ ret_val = 2;
+ return ret_val;
+
+L600:
+
+/* ISPEC = 6: crossover point for SVD (used by xGELSS and xGESVD) */
+
+ ret_val = (integer) ((real) min(*n1,*n2) * 1.6f);
+ return ret_val;
+
+L700:
+
+/* ISPEC = 7: number of processors (not used) */
+
+ ret_val = 1;
+ return ret_val;
+
+L800:
+
+/* ISPEC = 8: crossover point for multishift (used by xHSEQR) */
+
+ ret_val = 50;
+ return ret_val;
+
+L900:
+
+/*
+ ISPEC = 9: maximum size of the subproblems at the bottom of the
+ computation tree in the divide-and-conquer algorithm
+ (used by xGELSD and xGESDD)
+*/
+
+ ret_val = 25;
+ return ret_val;
+
+L1000:
+
+/*
+ ISPEC = 10: ieee NaN arithmetic can be trusted not to trap
+
+ ILAENV = 0
+*/
+ ret_val = 1;
+ if (ret_val == 1) {
+ ret_val = ieeeck_(&c__0, &c_b3825, &c_b3826);
+ }
+ return ret_val;
+
+L1100:
+
+/*
+ ISPEC = 11: infinity arithmetic can be trusted not to trap
+
+ ILAENV = 0
+*/
+ ret_val = 1;
+ if (ret_val == 1) {
+ ret_val = ieeeck_(&c__1, &c_b3825, &c_b3826);
+ }
+ return ret_val;
+
+/* End of ILAENV */
+
+} /* ilaenv_ */
+
diff --git a/numpy/corelinalg/f2c.h b/numpy/corelinalg/f2c.h
new file mode 100644
index 000000000..e27d7ae57
--- /dev/null
+++ b/numpy/corelinalg/f2c.h
@@ -0,0 +1,217 @@
+/* f2c.h -- Standard Fortran to C header file */
+
+/** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed."
+
+ - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */
+
+#ifndef F2C_INCLUDE
+#define F2C_INCLUDE
+
+typedef int integer;
+typedef char *address;
+typedef short int shortint;
+typedef float real;
+typedef double doublereal;
+typedef struct { real r, i; } complex;
+typedef struct { doublereal r, i; } doublecomplex;
+typedef int logical;
+typedef short int shortlogical;
+typedef char logical1;
+typedef char integer1;
+
+#define TRUE_ (1)
+#define FALSE_ (0)
+
+/* Extern is for use with -E */
+#ifndef Extern
+#define Extern extern
+#endif
+
+/* I/O stuff */
+
+#ifdef f2c_i2
+/* for -i2 */
+typedef short flag;
+typedef short ftnlen;
+typedef short ftnint;
+#else
+typedef int flag;
+typedef int ftnlen;
+typedef int ftnint;
+#endif
+
+/*external read, write*/
+typedef struct
+{ flag cierr;
+ ftnint ciunit;
+ flag ciend;
+ char *cifmt;
+ ftnint cirec;
+} cilist;
+
+/*internal read, write*/
+typedef struct
+{ flag icierr;
+ char *iciunit;
+ flag iciend;
+ char *icifmt;
+ ftnint icirlen;
+ ftnint icirnum;
+} icilist;
+
+/*open*/
+typedef struct
+{ flag oerr;
+ ftnint ounit;
+ char *ofnm;
+ ftnlen ofnmlen;
+ char *osta;
+ char *oacc;
+ char *ofm;
+ ftnint orl;
+ char *oblnk;
+} olist;
+
+/*close*/
+typedef struct
+{ flag cerr;
+ ftnint cunit;
+ char *csta;
+} cllist;
+
+/*rewind, backspace, endfile*/
+typedef struct
+{ flag aerr;
+ ftnint aunit;
+} alist;
+
+/* inquire */
+typedef struct
+{ flag inerr;
+ ftnint inunit;
+ char *infile;
+ ftnlen infilen;
+ ftnint *inex; /*parameters in standard's order*/
+ ftnint *inopen;
+ ftnint *innum;
+ ftnint *innamed;
+ char *inname;
+ ftnlen innamlen;
+ char *inacc;
+ ftnlen inacclen;
+ char *inseq;
+ ftnlen inseqlen;
+ char *indir;
+ ftnlen indirlen;
+ char *infmt;
+ ftnlen infmtlen;
+ char *inform;
+ ftnint informlen;
+ char *inunf;
+ ftnlen inunflen;
+ ftnint *inrecl;
+ ftnint *innrec;
+ char *inblank;
+ ftnlen inblanklen;
+} inlist;
+
+#define VOID void
+
+union Multitype { /* for multiple entry points */
+ shortint h;
+ integer i;
+ real r;
+ doublereal d;
+ complex c;
+ doublecomplex z;
+ };
+
+typedef union Multitype Multitype;
+
+typedef long Long; /* No longer used; formerly in Namelist */
+
+struct Vardesc { /* for Namelist */
+ char *name;
+ char *addr;
+ ftnlen *dims;
+ int type;
+ };
+typedef struct Vardesc Vardesc;
+
+struct Namelist {
+ char *name;
+ Vardesc **vars;
+ int nvars;
+ };
+typedef struct Namelist Namelist;
+
+#ifndef abs
+#define abs(x) ((x) >= 0 ? (x) : -(x))
+#endif
+#define dabs(x) (doublereal)abs(x)
+#ifndef min
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#endif
+#ifndef max
+#define max(a,b) ((a) >= (b) ? (a) : (b))
+#endif
+#define dmin(a,b) (doublereal)min(a,b)
+#define dmax(a,b) (doublereal)max(a,b)
+
+/* procedure parameter types for -A and -C++ */
+
+#define F2C_proc_par_types 1
+#ifdef __cplusplus
+typedef int /* Unknown procedure type */ (*U_fp)(...);
+typedef shortint (*J_fp)(...);
+typedef integer (*I_fp)(...);
+typedef real (*R_fp)(...);
+typedef doublereal (*D_fp)(...), (*E_fp)(...);
+typedef /* Complex */ VOID (*C_fp)(...);
+typedef /* Double Complex */ VOID (*Z_fp)(...);
+typedef logical (*L_fp)(...);
+typedef shortlogical (*K_fp)(...);
+typedef /* Character */ VOID (*H_fp)(...);
+typedef /* Subroutine */ int (*S_fp)(...);
+#else
+typedef int /* Unknown procedure type */ (*U_fp)(void);
+typedef shortint (*J_fp)(void);
+typedef integer (*I_fp)(void);
+typedef real (*R_fp)(void);
+typedef doublereal (*D_fp)(void), (*E_fp)(void);
+typedef /* Complex */ VOID (*C_fp)(void);
+typedef /* Double Complex */ VOID (*Z_fp)(void);
+typedef logical (*L_fp)(void);
+typedef shortlogical (*K_fp)(void);
+typedef /* Character */ VOID (*H_fp)(void);
+typedef /* Subroutine */ int (*S_fp)(void);
+#endif
+/* E_fp is for real functions when -R is not specified */
+typedef VOID C_f; /* complex function */
+typedef VOID H_f; /* character function */
+typedef VOID Z_f; /* double complex function */
+typedef doublereal E_f; /* real function with -R not specified */
+
+/* undef any lower-case symbols that your C compiler predefines, e.g.: */
+
+#ifndef Skip_f2c_Undefs
+#undef cray
+#undef gcos
+#undef mc68010
+#undef mc68020
+#undef mips
+#undef pdp11
+#undef sgi
+#undef sparc
+#undef sun
+#undef sun2
+#undef sun3
+#undef sun4
+#undef u370
+#undef u3b
+#undef u3b2
+#undef u3b5
+#undef unix
+#undef vax
+#endif
+#endif
diff --git a/numpy/corelinalg/f2c_lite.c b/numpy/corelinalg/f2c_lite.c
new file mode 100644
index 000000000..6402271c9
--- /dev/null
+++ b/numpy/corelinalg/f2c_lite.c
@@ -0,0 +1,492 @@
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "f2c.h"
+
+
+extern void s_wsfe(cilist *f) {;}
+extern void e_wsfe(void) {;}
+extern void do_fio(integer *c, char *s, ftnlen l) {;}
+
+/* You'll want this if you redo the *_lite.c files with the -C option
+ * to f2c for checking array subscripts. (It's not suggested you do that
+ * for production use, of course.) */
+extern int
+s_rnge(char *var, int index, char *routine, int lineno)
+{
+ fprintf(stderr, "array index out-of-bounds for %s[%d] in routine %s:%d\n",
+ var, index, routine, lineno);
+ fflush(stderr);
+ abort();
+}
+
+
+#ifdef KR_headers
+extern double sqrt();
+double f__cabs(real, imag) double real, imag;
+#else
+#undef abs
+
+double f__cabs(double real, double imag)
+#endif
+{
+double temp;
+
+if(real < 0)
+ real = -real;
+if(imag < 0)
+ imag = -imag;
+if(imag > real){
+ temp = real;
+ real = imag;
+ imag = temp;
+}
+if((imag+real) == real)
+ return((double)real);
+
+temp = imag/real;
+temp = real*sqrt(1.0 + temp*temp); /*overflow!!*/
+return(temp);
+}
+
+
+ VOID
+#ifdef KR_headers
+d_cnjg(r, z) doublecomplex *r, *z;
+#else
+d_cnjg(doublecomplex *r, doublecomplex *z)
+#endif
+{
+r->r = z->r;
+r->i = - z->i;
+}
+
+
+#ifdef KR_headers
+double d_imag(z) doublecomplex *z;
+#else
+double d_imag(doublecomplex *z)
+#endif
+{
+return(z->i);
+}
+
+
+#define log10e 0.43429448190325182765
+
+#ifdef KR_headers
+double log();
+double d_lg10(x) doublereal *x;
+#else
+#undef abs
+
+double d_lg10(doublereal *x)
+#endif
+{
+return( log10e * log(*x) );
+}
+
+
+#ifdef KR_headers
+double d_sign(a,b) doublereal *a, *b;
+#else
+double d_sign(doublereal *a, doublereal *b)
+#endif
+{
+double x;
+x = (*a >= 0 ? *a : - *a);
+return( *b >= 0 ? x : -x);
+}
+
+
+#ifdef KR_headers
+double floor();
+integer i_dnnt(x) doublereal *x;
+#else
+#undef abs
+
+integer i_dnnt(doublereal *x)
+#endif
+{
+return( (*x)>=0 ?
+ floor(*x + .5) : -floor(.5 - *x) );
+}
+
+
+#ifdef KR_headers
+double pow();
+double pow_dd(ap, bp) doublereal *ap, *bp;
+#else
+#undef abs
+
+double pow_dd(doublereal *ap, doublereal *bp)
+#endif
+{
+return(pow(*ap, *bp) );
+}
+
+
+#ifdef KR_headers
+double pow_di(ap, bp) doublereal *ap; integer *bp;
+#else
+double pow_di(doublereal *ap, integer *bp)
+#endif
+{
+double pow, x;
+integer n;
+unsigned long u;
+
+pow = 1;
+x = *ap;
+n = *bp;
+
+if(n != 0)
+ {
+ if(n < 0)
+ {
+ n = -n;
+ x = 1/x;
+ }
+ for(u = n; ; )
+ {
+ if(u & 01)
+ pow *= x;
+ if(u >>= 1)
+ x *= x;
+ else
+ break;
+ }
+ }
+return(pow);
+}
+/* Unless compiled with -DNO_OVERWRITE, this variant of s_cat allows the
+ * target of a concatenation to appear on its right-hand side (contrary
+ * to the Fortran 77 Standard, but in accordance with Fortran 90).
+ */
+#define NO_OVERWRITE
+
+
+#ifndef NO_OVERWRITE
+
+#undef abs
+#ifdef KR_headers
+ extern char *F77_aloc();
+ extern void free();
+ extern void exit_();
+#else
+
+ extern char *F77_aloc(ftnlen, char*);
+#endif
+
+#endif /* NO_OVERWRITE */
+
+ VOID
+#ifdef KR_headers
+s_cat(lp, rpp, rnp, np, ll) char *lp, *rpp[]; ftnlen rnp[], *np, ll;
+#else
+s_cat(char *lp, char *rpp[], ftnlen rnp[], ftnlen *np, ftnlen ll)
+#endif
+{
+ ftnlen i, nc;
+ char *rp;
+ ftnlen n = *np;
+#ifndef NO_OVERWRITE
+ ftnlen L, m;
+ char *lp0, *lp1;
+
+ lp0 = 0;
+ lp1 = lp;
+ L = ll;
+ i = 0;
+ while(i < n) {
+ rp = rpp[i];
+ m = rnp[i++];
+ if (rp >= lp1 || rp + m <= lp) {
+ if ((L -= m) <= 0) {
+ n = i;
+ break;
+ }
+ lp1 += m;
+ continue;
+ }
+ lp0 = lp;
+ lp = lp1 = F77_aloc(L = ll, "s_cat");
+ break;
+ }
+ lp1 = lp;
+#endif /* NO_OVERWRITE */
+ for(i = 0 ; i < n ; ++i) {
+ nc = ll;
+ if(rnp[i] < nc)
+ nc = rnp[i];
+ ll -= nc;
+ rp = rpp[i];
+ while(--nc >= 0)
+ *lp++ = *rp++;
+ }
+ while(--ll >= 0)
+ *lp++ = ' ';
+#ifndef NO_OVERWRITE
+ if (lp0) {
+ memmove(lp0, lp1, L);
+ free(lp1);
+ }
+#endif
+ }
+
+
+/* compare two strings */
+
+#ifdef KR_headers
+integer s_cmp(a0, b0, la, lb) char *a0, *b0; ftnlen la, lb;
+#else
+integer s_cmp(char *a0, char *b0, ftnlen la, ftnlen lb)
+#endif
+{
+register unsigned char *a, *aend, *b, *bend;
+a = (unsigned char *)a0;
+b = (unsigned char *)b0;
+aend = a + la;
+bend = b + lb;
+
+if(la <= lb)
+ {
+ while(a < aend)
+ if(*a != *b)
+ return( *a - *b );
+ else
+ { ++a; ++b; }
+
+ while(b < bend)
+ if(*b != ' ')
+ return( ' ' - *b );
+ else ++b;
+ }
+
+else
+ {
+ while(b < bend)
+ if(*a == *b)
+ { ++a; ++b; }
+ else
+ return( *a - *b );
+ while(a < aend)
+ if(*a != ' ')
+ return(*a - ' ');
+ else ++a;
+ }
+return(0);
+}
+/* Unless compiled with -DNO_OVERWRITE, this variant of s_copy allows the
+ * target of an assignment to appear on its right-hand side (contrary
+ * to the Fortran 77 Standard, but in accordance with Fortran 90),
+ * as in a(2:5) = a(4:7) .
+ */
+
+
+
+/* assign strings: a = b */
+
+#ifdef KR_headers
+VOID s_copy(a, b, la, lb) register char *a, *b; ftnlen la, lb;
+#else
+void s_copy(register char *a, register char *b, ftnlen la, ftnlen lb)
+#endif
+{
+ register char *aend, *bend;
+
+ aend = a + la;
+
+ if(la <= lb)
+#ifndef NO_OVERWRITE
+ if (a <= b || a >= b + la)
+#endif
+ while(a < aend)
+ *a++ = *b++;
+#ifndef NO_OVERWRITE
+ else
+ for(b += la; a < aend; )
+ *--aend = *--b;
+#endif
+
+ else {
+ bend = b + lb;
+#ifndef NO_OVERWRITE
+ if (a <= b || a >= bend)
+#endif
+ while(b < bend)
+ *a++ = *b++;
+#ifndef NO_OVERWRITE
+ else {
+ a += lb;
+ while(b < bend)
+ *--a = *--bend;
+ a += lb;
+ }
+#endif
+ while(a < aend)
+ *a++ = ' ';
+ }
+ }
+
+
+#ifdef KR_headers
+double f__cabs();
+double z_abs(z) doublecomplex *z;
+#else
+double f__cabs(double, double);
+double z_abs(doublecomplex *z)
+#endif
+{
+return( f__cabs( z->r, z->i ) );
+}
+
+
+#ifdef KR_headers
+extern void sig_die();
+VOID z_div(c, a, b) doublecomplex *a, *b, *c;
+#else
+extern void sig_die(char*, int);
+void z_div(doublecomplex *c, doublecomplex *a, doublecomplex *b)
+#endif
+{
+double ratio, den;
+double abr, abi;
+
+if( (abr = b->r) < 0.)
+ abr = - abr;
+if( (abi = b->i) < 0.)
+ abi = - abi;
+if( abr <= abi )
+ {
+ /*Let IEEE Infinties handle this ;( */
+ /*if(abi == 0)
+ sig_die("complex division by zero", 1);*/
+ ratio = b->r / b->i ;
+ den = b->i * (1 + ratio*ratio);
+ c->r = (a->r*ratio + a->i) / den;
+ c->i = (a->i*ratio - a->r) / den;
+ }
+
+else
+ {
+ ratio = b->i / b->r ;
+ den = b->r * (1 + ratio*ratio);
+ c->r = (a->r + a->i*ratio) / den;
+ c->i = (a->i - a->r*ratio) / den;
+ }
+
+}
+
+
+#ifdef KR_headers
+double sqrt(), f__cabs();
+VOID z_sqrt(r, z) doublecomplex *r, *z;
+#else
+#undef abs
+
+extern double f__cabs(double, double);
+void z_sqrt(doublecomplex *r, doublecomplex *z)
+#endif
+{
+double mag;
+
+if( (mag = f__cabs(z->r, z->i)) == 0.)
+ r->r = r->i = 0.;
+else if(z->r > 0)
+ {
+ r->r = sqrt(0.5 * (mag + z->r) );
+ r->i = z->i / r->r / 2;
+ }
+else
+ {
+ r->i = sqrt(0.5 * (mag - z->r) );
+ if(z->i < 0)
+ r->i = - r->i;
+ r->r = z->i / r->i / 2;
+ }
+}
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+integer pow_ii(ap, bp) integer *ap, *bp;
+#else
+integer pow_ii(integer *ap, integer *bp)
+#endif
+{
+ integer pow, x, n;
+ unsigned long u;
+
+ x = *ap;
+ n = *bp;
+
+ if (n <= 0) {
+ if (n == 0 || x == 1)
+ return 1;
+ if (x != -1)
+ return x == 0 ? 1/x : 0;
+ n = -n;
+ }
+ u = n;
+ for(pow = 1; ; )
+ {
+ if(u & 01)
+ pow *= x;
+ if(u >>= 1)
+ x *= x;
+ else
+ break;
+ }
+ return(pow);
+ }
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef KR_headers
+extern void f_exit();
+VOID s_stop(s, n) char *s; ftnlen n;
+#else
+#undef abs
+#undef min
+#undef max
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
+void f_exit(void);
+
+int s_stop(char *s, ftnlen n)
+#endif
+{
+int i;
+
+if(n > 0)
+ {
+ fprintf(stderr, "STOP ");
+ for(i = 0; i<n ; ++i)
+ putc(*s++, stderr);
+ fprintf(stderr, " statement executed\n");
+ }
+#ifdef NO_ONEXIT
+f_exit();
+#endif
+exit(0);
+
+/* We cannot avoid (useless) compiler diagnostics here: */
+/* some compilers complain if there is no return statement, */
+/* and others complain that this one cannot be reached. */
+
+return 0; /* NOT REACHED */
+}
+#ifdef __cplusplus
+}
+#endif
+#ifdef __cplusplus
+}
+#endif
diff --git a/numpy/corelinalg/info.py b/numpy/corelinalg/info.py
new file mode 100644
index 000000000..740dfcc1a
--- /dev/null
+++ b/numpy/corelinalg/info.py
@@ -0,0 +1,25 @@
+"""\
+Core Linear Algebra Tools
+===========
+
+ Linear Algebra Basics:
+
+ inv --- Find the inverse of a square matrix
+ solve --- Solve a linear system of equations
+ det --- Find the determinant of a square matrix
+ lstsq --- Solve linear least-squares problem
+ pinv --- Pseudo-inverse (Moore-Penrose) using lstsq
+
+ Eigenvalues and Decompositions:
+
+ eig --- Find the eigenvalues and vectors of a square matrix
+ eigh --- Find the eigenvalues and eigenvectors of a Hermitian matrix
+ eigvals --- Find the eigenvalues of a square matrix
+ eigvalsh --- Find the eigenvalues of a Hermitian matrix.
+ svd --- Singular value decomposition of a matrix
+ cholesky --- Cholesky decomposition of a matrix
+
+"""
+
+depends = ['base']
+
diff --git a/numpy/corelinalg/lapack_litemodule.c b/numpy/corelinalg/lapack_litemodule.c
new file mode 100644
index 000000000..bf7170fbc
--- /dev/null
+++ b/numpy/corelinalg/lapack_litemodule.c
@@ -0,0 +1,692 @@
+/*This module contributed by Doug Heisterkamp
+Modified by Jim Hugunin
+More modifications by Jeff Whitaker
+*/
+
+#include "Python.h"
+#include "scipy/arrayobject.h"
+
+#ifdef NO_APPEND_FORTRAN
+# define FNAME(x) x
+#else
+# define FNAME(x) x##_
+#endif
+
+typedef struct { float r, i; } f2c_complex;
+typedef struct { double r, i; } f2c_doublecomplex;
+/* typedef long int (*L_fp)(); */
+
+extern void FNAME(dgeev)(char *jobvl, char *jobvr, int *n,
+ double a[], int *lda, double wr[], double wi[],
+ double vl[], int *ldvl, double vr[], int *ldvr,
+ double work[], int lwork[], int *info);
+extern void FNAME(zgeev)(char *jobvl, char *jobvr, int *n,
+ f2c_doublecomplex a[], int *lda,
+ f2c_doublecomplex w[],
+ f2c_doublecomplex vl[], int *ldvl,
+ f2c_doublecomplex vr[], int *ldvr,
+ f2c_doublecomplex work[], int *lwork,
+ double rwork[], int *info);
+
+extern void FNAME(dsyevd)(char *jobz, char *uplo, int *n,
+ double a[], int *lda, double w[], double work[],
+ int *lwork, int iwork[], int *liwork, int *info);
+extern void FNAME(zheevd)(char *jobz, char *uplo, int *n,
+ f2c_doublecomplex a[], int *lda,
+ double w[], f2c_doublecomplex work[],
+ int *lwork, double rwork[], int *lrwork, int iwork[],
+ int *liwork, int *info);
+
+extern void FNAME(dgelsd)(int *m, int *n, int *nrhs,
+ double a[], int *lda, double b[], int *ldb,
+ double s[], double *rcond, int *rank,
+ double work[], int *lwork, int iwork[], int *info);
+extern void FNAME(zgelsd)(int *m, int *n, int *nrhs,
+ f2c_doublecomplex a[], int *lda,
+ f2c_doublecomplex b[], int *ldb,
+ double s[], double *rcond, int *rank,
+ f2c_doublecomplex work[], int *lwork,
+ double rwork[], int iwork[], int *info);
+
+extern void FNAME(dgesv)(int *n, int *nrhs,
+ double a[], int *lda, int ipiv[],
+ double b[], int *ldb, int *info);
+extern void FNAME(zgesv)(int *n, int *nrhs,
+ f2c_doublecomplex a[], int *lda, int ipiv[],
+ f2c_doublecomplex b[], int *ldb, int *info);
+
+extern void FNAME(dgetrf)(int *m, int *n,
+ double a[], int *lda, int ipiv[], int *info);
+extern void FNAME(zgetrf)(int *m, int *n,
+ f2c_doublecomplex a[], int *lda, int ipiv[],
+ int *info);
+
+extern void FNAME(dpotrf)(char *uplo, int *n, double a[], int *lda, int *info);
+extern void FNAME(zpotrf)(char *uplo, int *n,
+ f2c_doublecomplex a[], int *lda, int *info);
+
+extern void FNAME(dgesdd)(char *jobz, int *m, int *n,
+ double a[], int *lda, double s[], double u[],
+ int *ldu, double vt[], int *ldvt, double work[],
+ int *lwork, int iwork[], int *info);
+extern void FNAME(zgesdd)(char *jobz, int *m, int *n,
+ f2c_doublecomplex a[], int *lda,
+ double s[], f2c_doublecomplex u[], int *ldu,
+ f2c_doublecomplex vt[], int *ldvt,
+ f2c_doublecomplex work[], int *lwork,
+ double rwork[], int iwork[], int *info);
+
+static PyObject *LapackError;
+
+#define TRY(E) if (!(E)) return NULL
+
+static int
+check_object(PyObject *ob, int t, char *obname,
+ char *tname, char *funname)
+{
+ if (!PyArray_Check(ob)) {
+ PyErr_Format(LapackError,
+ "Expected an array for parameter %s in lapack_lite.%s",
+ obname, funname);
+ return 0;
+ } else if (!(((PyArrayObject *)ob)->flags & CONTIGUOUS)) {
+ PyErr_Format(LapackError,
+ "Parameter %s is not contiguous in lapack_lite.%s",
+ obname, funname);
+ return 0;
+ } else if (!(((PyArrayObject *)ob)->descr->type_num == t)) {
+ PyErr_Format(LapackError,
+ "Parameter %s is not of type %s in lapack_lite.%s",
+ obname, tname, funname);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+#define CHDATA(p) ((char *) (((PyArrayObject *)p)->data))
+#define SHDATA(p) ((short int *) (((PyArrayObject *)p)->data))
+#define DDATA(p) ((double *) (((PyArrayObject *)p)->data))
+#define FDATA(p) ((float *) (((PyArrayObject *)p)->data))
+#define CDATA(p) ((f2c_complex *) (((PyArrayObject *)p)->data))
+#define ZDATA(p) ((f2c_doublecomplex *) (((PyArrayObject *)p)->data))
+#define IDATA(p) ((int *) (((PyArrayObject *)p)->data))
+
+static PyObject *
+lapack_lite_dgeev(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ char jobvl;
+ char jobvr;
+ int n;
+ PyObject *a;
+ int lda;
+ PyObject *wr;
+ PyObject *wi;
+ PyObject *vl;
+ int ldvl;
+ PyObject *vr;
+ int ldvr;
+ PyObject *work;
+ int lwork;
+ int info;
+ TRY(PyArg_ParseTuple(args,"cciOiOOOiOiOii",
+ &jobvl,&jobvr,&n,&a,&lda,&wr,&wi,&vl,&ldvl,
+ &vr,&ldvr,&work,&lwork,&info));
+
+ TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgeev"));
+ TRY(check_object(wr,PyArray_DOUBLE,"wr","PyArray_DOUBLE","dgeev"));
+ TRY(check_object(wi,PyArray_DOUBLE,"wi","PyArray_DOUBLE","dgeev"));
+ TRY(check_object(vl,PyArray_DOUBLE,"vl","PyArray_DOUBLE","dgeev"));
+ TRY(check_object(vr,PyArray_DOUBLE,"vr","PyArray_DOUBLE","dgeev"));
+ TRY(check_object(work,PyArray_DOUBLE,"work","PyArray_DOUBLE","dgeev"));
+
+ lapack_lite_status__ = 0;
+ FNAME(dgeev)(&jobvl,&jobvr,&n,DDATA(a),&lda,DDATA(wr),DDATA(wi),
+ DDATA(vl),&ldvl,DDATA(vr),&ldvr,DDATA(work),&lwork,&info);
+
+ return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i,s:i}","dgeev_",
+ lapack_lite_status__,"jobvl",jobvl,"jobvr",jobvr,
+ "n",n,"lda",lda,"ldvl",ldvl,"ldvr",ldvr,
+ "lwork",lwork,"info",info);
+}
+
+static PyObject *
+lapack_lite_dsyevd(PyObject *self, PyObject *args)
+{
+ /* Arguments */
+ /* ========= */
+
+ char jobz;
+ /* JOBZ (input) CHARACTER*1 */
+ /* = 'N': Compute eigenvalues only; */
+ /* = 'V': Compute eigenvalues and eigenvectors. */
+
+ char uplo;
+ /* UPLO (input) CHARACTER*1 */
+ /* = 'U': Upper triangle of A is stored; */
+ /* = 'L': Lower triangle of A is stored. */
+
+ int n;
+ /* N (input) INTEGER */
+ /* The order of the matrix A. N >= 0. */
+
+ PyObject *a;
+ /* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+ /* On entry, the symmetric matrix A. If UPLO = 'U', the */
+ /* leading N-by-N upper triangular part of A contains the */
+ /* upper triangular part of the matrix A. If UPLO = 'L', */
+ /* the leading N-by-N lower triangular part of A contains */
+ /* the lower triangular part of the matrix A. */
+ /* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+ /* orthonormal eigenvectors of the matrix A. */
+ /* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */
+ /* or the upper triangle (if UPLO='U') of A, including the */
+ /* diagonal, is destroyed. */
+
+ int lda;
+ /* LDA (input) INTEGER */
+ /* The leading dimension of the array A. LDA >= max(1,N). */
+
+ PyObject *w;
+ /* W (output) DOUBLE PRECISION array, dimension (N) */
+ /* If INFO = 0, the eigenvalues in ascending order. */
+
+ PyObject *work;
+ /* WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) */
+ /* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+ int lwork;
+ /* LWORK (input) INTEGER */
+ /* The length of the array WORK. LWORK >= max(1,3*N-1). */
+ /* For optimal efficiency, LWORK >= (NB+2)*N, */
+ /* where NB is the blocksize for DSYTRD returned by ILAENV. */
+
+ PyObject *iwork;
+ int liwork;
+
+ int info;
+ /* INFO (output) INTEGER */
+ /* = 0: successful exit */
+ /* < 0: if INFO = -i, the i-th argument had an illegal value */
+ /* > 0: if INFO = i, the algorithm failed to converge; i */
+ /* off-diagonal elements of an intermediate tridiagonal */
+ /* form did not converge to zero. */
+
+ int lapack_lite_status__;
+
+ TRY(PyArg_ParseTuple(args,"cciOiOOiOii",
+ &jobz,&uplo,&n,&a,&lda,&w,&work,&lwork,
+ &iwork,&liwork,&info));
+
+ TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dsyevd"));
+ TRY(check_object(w,PyArray_DOUBLE,"w","PyArray_DOUBLE","dsyevd"));
+ TRY(check_object(work,PyArray_DOUBLE,"work","PyArray_DOUBLE","dsyevd"));
+ TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","dsyevd"));
+
+ lapack_lite_status__ = 0;
+ FNAME(dsyevd)(&jobz,&uplo,&n,DDATA(a),&lda,DDATA(w),DDATA(work),
+ &lwork,IDATA(iwork),&liwork,&info);
+
+ return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i}","dsyevd_",
+ lapack_lite_status__,"jobz",jobz,"uplo",uplo,
+ "n",n,"lda",lda,"lwork",lwork,"liwork",liwork,"info",info);
+}
+
+static PyObject *
+lapack_lite_zheevd(PyObject *self, PyObject *args)
+{
+ /* Arguments */
+ /* ========= */
+
+ char jobz;
+ /* JOBZ (input) CHARACTER*1 */
+ /* = 'N': Compute eigenvalues only; */
+ /* = 'V': Compute eigenvalues and eigenvectors. */
+
+ char uplo;
+ /* UPLO (input) CHARACTER*1 */
+ /* = 'U': Upper triangle of A is stored; */
+ /* = 'L': Lower triangle of A is stored. */
+
+ int n;
+ /* N (input) INTEGER */
+ /* The order of the matrix A. N >= 0. */
+
+ PyObject *a;
+ /* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+ /* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+ /* leading N-by-N upper triangular part of A contains the */
+ /* upper triangular part of the matrix A. If UPLO = 'L', */
+ /* the leading N-by-N lower triangular part of A contains */
+ /* the lower triangular part of the matrix A. */
+ /* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+ /* orthonormal eigenvectors of the matrix A. */
+ /* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */
+ /* or the upper triangle (if UPLO='U') of A, including the */
+ /* diagonal, is destroyed. */
+
+ int lda;
+ /* LDA (input) INTEGER */
+ /* The leading dimension of the array A. LDA >= max(1,N). */
+
+ PyObject *w;
+ /* W (output) DOUBLE PRECISION array, dimension (N) */
+ /* If INFO = 0, the eigenvalues in ascending order. */
+
+ PyObject *work;
+ /* WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) */
+ /* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+ int lwork;
+ /* LWORK (input) INTEGER */
+ /* The length of the array WORK. LWORK >= max(1,3*N-1). */
+ /* For optimal efficiency, LWORK >= (NB+2)*N, */
+ /* where NB is the blocksize for DSYTRD returned by ILAENV. */
+
+ PyObject *rwork;
+ /* RWORK (workspace) DOUBLE PRECISION array, dimension (max(1, 3*N-2)) */
+ int lrwork;
+
+ PyObject *iwork;
+ int liwork;
+
+ int info;
+ /* INFO (output) INTEGER */
+ /* = 0: successful exit */
+ /* < 0: if INFO = -i, the i-th argument had an illegal value */
+ /* > 0: if INFO = i, the algorithm failed to converge; i */
+ /* off-diagonal elements of an intermediate tridiagonal */
+ /* form did not converge to zero. */
+
+ int lapack_lite_status__;
+
+ TRY(PyArg_ParseTuple(args,"cciOiOOiOiOii",
+ &jobz,&uplo,&n,&a,&lda,&w,&work,&lwork,&rwork,
+ &lrwork,&iwork,&liwork,&info));
+
+ TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zheevd"));
+ TRY(check_object(w,PyArray_DOUBLE,"w","PyArray_DOUBLE","zheevd"));
+ TRY(check_object(work,PyArray_CDOUBLE,"work","PyArray_CDOUBLE","zheevd"));
+ TRY(check_object(w,PyArray_DOUBLE,"rwork","PyArray_DOUBLE","zheevd"));
+ TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","zheevd"));
+
+ lapack_lite_status__ = 0;
+ FNAME(zheevd)(&jobz,&uplo,&n,ZDATA(a),&lda,DDATA(w),ZDATA(work),
+ &lwork,DDATA(rwork),&lrwork,IDATA(iwork),&liwork,&info);
+
+ return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i,s:i}","zheevd_",
+ lapack_lite_status__,"jobz",jobz,"uplo",uplo,"n",n,
+ "lda",lda,"lwork",lwork,"lrwork",lrwork,
+ "liwork",liwork,"info",info);
+}
+
+static PyObject *
+lapack_lite_dgelsd(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ int m;
+ int n;
+ int nrhs;
+ PyObject *a;
+ int lda;
+ PyObject *b;
+ int ldb;
+ PyObject *s;
+ double rcond;
+ int rank;
+ PyObject *work;
+ PyObject *iwork;
+ int lwork;
+ int info;
+ TRY(PyArg_ParseTuple(args,"iiiOiOiOdiOiOi",
+ &m,&n,&nrhs,&a,&lda,&b,&ldb,&s,&rcond,
+ &rank,&work,&lwork,&iwork,&info));
+
+ TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgelsd"));
+ TRY(check_object(b,PyArray_DOUBLE,"b","PyArray_DOUBLE","dgelsd"));
+ TRY(check_object(s,PyArray_DOUBLE,"s","PyArray_DOUBLE","dgelsd"));
+ TRY(check_object(work,PyArray_DOUBLE,"work","PyArray_DOUBLE","dgelsd"));
+ TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","dgelsd"));
+
+ lapack_lite_status__ = 0;
+ FNAME(dgelsd)(&m,&n,&nrhs,DDATA(a),&lda,DDATA(b),&ldb,
+ DDATA(s),&rcond,&rank,DDATA(work),&lwork,IDATA(iwork),&info);
+
+ return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:d,s:i,s:i,s:i}","dgelsd_",
+ lapack_lite_status__,"m",m,"n",n,"nrhs",nrhs,
+ "lda",lda,"ldb",ldb,"rcond",rcond,"rank",rank,
+ "lwork",lwork,"info",info);
+}
+
+static PyObject *
+lapack_lite_dgesv(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ int n;
+ int nrhs;
+ PyObject *a;
+ int lda;
+ PyObject *ipiv;
+ PyObject *b;
+ int ldb;
+ int info;
+ TRY(PyArg_ParseTuple(args,"iiOiOOii",&n,&nrhs,&a,&lda,&ipiv,&b,&ldb,&info));
+
+ TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgesv"));
+ TRY(check_object(ipiv,PyArray_INT,"ipiv","PyArray_INT","dgesv"));
+ TRY(check_object(b,PyArray_DOUBLE,"b","PyArray_DOUBLE","dgesv"));
+
+ lapack_lite_status__ = 0;
+ FNAME(dgesv)(&n,&nrhs,DDATA(a),&lda,IDATA(ipiv),DDATA(b),&ldb,&info);
+
+ return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","dgesv_",
+ lapack_lite_status__,"n",n,"nrhs",nrhs,"lda",lda,
+ "ldb",ldb,"info",info);
+}
+
+static PyObject *
+lapack_lite_dgesdd(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ char jobz;
+ int m;
+ int n;
+ PyObject *a;
+ int lda;
+ PyObject *s;
+ PyObject *u;
+ int ldu;
+ PyObject *vt;
+ int ldvt;
+ PyObject *work;
+ int lwork;
+ PyObject *iwork;
+ int info;
+ TRY(PyArg_ParseTuple(args,"ciiOiOOiOiOiOi",
+ &jobz,&m,&n,&a,&lda,&s,&u,&ldu,&vt,&ldvt,
+ &work,&lwork,&iwork,&info));
+
+ TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgesdd"));
+ TRY(check_object(s,PyArray_DOUBLE,"s","PyArray_DOUBLE","dgesdd"));
+ TRY(check_object(u,PyArray_DOUBLE,"u","PyArray_DOUBLE","dgesdd"));
+ TRY(check_object(vt,PyArray_DOUBLE,"vt","PyArray_DOUBLE","dgesdd"));
+ TRY(check_object(work,PyArray_DOUBLE,"work","PyArray_DOUBLE","dgesdd"));
+ TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","dgesdd"));
+
+ lapack_lite_status__ = 0;
+ FNAME(dgesdd)(&jobz,&m,&n,DDATA(a),&lda,DDATA(s),DDATA(u),&ldu,
+ DDATA(vt),&ldvt,DDATA(work),&lwork,IDATA(iwork),&info);
+
+ return Py_BuildValue("{s:i,s:c,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","dgesdd_",
+ lapack_lite_status__,"jobz",jobz,"m",m,"n",n,
+ "lda",lda,"ldu",ldu,"ldvt",ldvt,"lwork",lwork,
+ "info",info);
+}
+
+static PyObject *
+lapack_lite_dgetrf(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ int m;
+ int n;
+ PyObject *a;
+ int lda;
+ PyObject *ipiv;
+ int info;
+ TRY(PyArg_ParseTuple(args,"iiOiOi",&m,&n,&a,&lda,&ipiv,&info));
+
+ TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgetrf"));
+ TRY(check_object(ipiv,PyArray_INT,"ipiv","PyArray_INT","dgetrf"));
+
+ lapack_lite_status__ = 0;
+ FNAME(dgetrf)(&m,&n,DDATA(a),&lda,IDATA(ipiv),&info);
+
+ return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i}","dgetrf_",lapack_lite_status__,
+ "m",m,"n",n,"lda",lda,"info",info);
+}
+
+static PyObject *
+lapack_lite_dpotrf(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ int n;
+ PyObject *a;
+ int lda;
+ char uplo;
+ int info;
+
+ TRY(PyArg_ParseTuple(args,"ciOii",&uplo,&n,&a,&lda,&info));
+ TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dpotrf"));
+
+ lapack_lite_status__ = 0;
+ FNAME(dpotrf)(&uplo,&n,DDATA(a),&lda,&info);
+
+ return Py_BuildValue("{s:i,s:i,s:i,s:i}","dpotrf_",lapack_lite_status__,
+ "n",n,"lda",lda,"info",info);
+}
+
+static PyObject *
+lapack_lite_zgeev(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ char jobvl;
+ char jobvr;
+ int n;
+ PyObject *a;
+ int lda;
+ PyObject *w;
+ PyObject *vl;
+ int ldvl;
+ PyObject *vr;
+ int ldvr;
+ PyObject *work;
+ int lwork;
+ PyObject *rwork;
+ int info;
+ TRY(PyArg_ParseTuple(args,"cciOiOOiOiOiOi",
+ &jobvl,&jobvr,&n,&a,&lda,&w,&vl,&ldvl,
+ &vr,&ldvr,&work,&lwork,&rwork,&info));
+
+ TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgeev"));
+ TRY(check_object(w,PyArray_CDOUBLE,"w","PyArray_CDOUBLE","zgeev"));
+ TRY(check_object(vl,PyArray_CDOUBLE,"vl","PyArray_CDOUBLE","zgeev"));
+ TRY(check_object(vr,PyArray_CDOUBLE,"vr","PyArray_CDOUBLE","zgeev"));
+ TRY(check_object(work,PyArray_CDOUBLE,"work","PyArray_CDOUBLE","zgeev"));
+ TRY(check_object(rwork,PyArray_DOUBLE,"rwork","PyArray_DOUBLE","zgeev"));
+
+ lapack_lite_status__ = 0;
+ FNAME(zgeev)(&jobvl,&jobvr,&n,ZDATA(a),&lda,ZDATA(w),ZDATA(vl),&ldvl,
+ ZDATA(vr),&ldvr,ZDATA(work),&lwork,DDATA(rwork),&info);
+
+ return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i,s:i}","zgeev_",
+ lapack_lite_status__,"jobvl",jobvl,"jobvr",jobvr,
+ "n",n,"lda",lda,"ldvl",ldvl,"ldvr",ldvr,
+ "lwork",lwork,"info",info);
+}
+
+static PyObject *
+lapack_lite_zgelsd(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ int m;
+ int n;
+ int nrhs;
+ PyObject *a;
+ int lda;
+ PyObject *b;
+ int ldb;
+ PyObject *s;
+ double rcond;
+ int rank;
+ PyObject *work;
+ int lwork;
+ PyObject *rwork;
+ PyObject *iwork;
+ int info;
+ TRY(PyArg_ParseTuple(args,"iiiOiOiOdiOiOOi",
+ &m,&n,&nrhs,&a,&lda,&b,&ldb,&s,&rcond,
+ &rank,&work,&lwork,&rwork,&iwork,&info));
+
+ TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgelsd"));
+ TRY(check_object(b,PyArray_CDOUBLE,"b","PyArray_CDOUBLE","zgelsd"));
+ TRY(check_object(s,PyArray_DOUBLE,"s","PyArray_DOUBLE","zgelsd"));
+ TRY(check_object(work,PyArray_CDOUBLE,"work","PyArray_CDOUBLE","zgelsd"));
+ TRY(check_object(rwork,PyArray_DOUBLE,"rwork","PyArray_DOUBLE","zgelsd"));
+ TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","zgelsd"));
+
+ lapack_lite_status__ = 0;
+ FNAME(zgelsd)(&m,&n,&nrhs,ZDATA(a),&lda,ZDATA(b),&ldb,DDATA(s),&rcond,
+ &rank,ZDATA(work),&lwork,DDATA(rwork),IDATA(iwork),&info);
+
+ return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","zgelsd_",
+ lapack_lite_status__,"m",m,"n",n,"nrhs",nrhs,"lda",lda,
+ "ldb",ldb,"rank",rank,"lwork",lwork,"info",info);
+}
+
+static PyObject *
+lapack_lite_zgesv(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ int n;
+ int nrhs;
+ PyObject *a;
+ int lda;
+ PyObject *ipiv;
+ PyObject *b;
+ int ldb;
+ int info;
+ TRY(PyArg_ParseTuple(args,"iiOiOOii",&n,&nrhs,&a,&lda,&ipiv,&b,&ldb,&info));
+
+ TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgesv"));
+ TRY(check_object(ipiv,PyArray_INT,"ipiv","PyArray_INT","zgesv"));
+ TRY(check_object(b,PyArray_CDOUBLE,"b","PyArray_CDOUBLE","zgesv"));
+
+ lapack_lite_status__ = 0;
+ FNAME(zgesv)(&n,&nrhs,ZDATA(a),&lda,IDATA(ipiv),ZDATA(b),&ldb,&info);
+
+ return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","zgesv_",
+ lapack_lite_status__,"n",n,"nrhs",nrhs,"lda",lda,
+ "ldb",ldb,"info",info);
+}
+
+static PyObject *
+lapack_lite_zgesdd(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ char jobz;
+ int m;
+ int n;
+ PyObject *a;
+ int lda;
+ PyObject *s;
+ PyObject *u;
+ int ldu;
+ PyObject *vt;
+ int ldvt;
+ PyObject *work;
+ int lwork;
+ PyObject *rwork;
+ PyObject *iwork;
+ int info;
+ TRY(PyArg_ParseTuple(args,"ciiOiOOiOiOiOOi",
+ &jobz,&m,&n,&a,&lda,&s,&u,&ldu,
+ &vt,&ldvt,&work,&lwork,&rwork,&iwork,&info));
+
+ TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgesdd"));
+ TRY(check_object(s,PyArray_DOUBLE,"s","PyArray_DOUBLE","zgesdd"));
+ TRY(check_object(u,PyArray_CDOUBLE,"u","PyArray_CDOUBLE","zgesdd"));
+ TRY(check_object(vt,PyArray_CDOUBLE,"vt","PyArray_CDOUBLE","zgesdd"));
+ TRY(check_object(work,PyArray_CDOUBLE,"work","PyArray_CDOUBLE","zgesdd"));
+ TRY(check_object(rwork,PyArray_DOUBLE,"rwork","PyArray_DOUBLE","zgesdd"));
+ TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","zgesdd"));
+
+ lapack_lite_status__ = 0;
+ FNAME(zgesdd)(&jobz,&m,&n,ZDATA(a),&lda,DDATA(s),ZDATA(u),&ldu,
+ ZDATA(vt),&ldvt,ZDATA(work),&lwork,DDATA(rwork),
+ IDATA(iwork),&info);
+
+ return Py_BuildValue("{s:i,s:c,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","zgesdd_",
+ lapack_lite_status__,"jobz",jobz,"m",m,"n",n,
+ "lda",lda,"ldu",ldu,"ldvt",ldvt,"lwork",lwork,
+ "info",info);
+}
+
+static PyObject *
+lapack_lite_zgetrf(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ int m;
+ int n;
+ PyObject *a;
+ int lda;
+ PyObject *ipiv;
+ int info;
+ TRY(PyArg_ParseTuple(args,"iiOiOi",&m,&n,&a,&lda,&ipiv,&info));
+
+ TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgetrf"));
+ TRY(check_object(ipiv,PyArray_INT,"ipiv","PyArray_INT","zgetrf"));
+
+ lapack_lite_status__ = 0;
+ FNAME(zgetrf)(&m,&n,ZDATA(a),&lda,IDATA(ipiv),&info);
+
+ return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i}","zgetrf_",
+ lapack_lite_status__,"m",m,"n",n,"lda",lda,"info",info);
+}
+
+static PyObject *
+lapack_lite_zpotrf(PyObject *self, PyObject *args)
+{
+ int lapack_lite_status__;
+ int n;
+ PyObject *a;
+ int lda;
+ char uplo;
+ int info;
+
+ TRY(PyArg_ParseTuple(args,"ciOii",&uplo,&n,&a,&lda,&info));
+ TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zpotrf"));
+ lapack_lite_status__ = 0;
+ FNAME(zpotrf)(&uplo,&n,ZDATA(a),&lda,&info);
+
+ return Py_BuildValue("{s:i,s:i,s:i,s:i}","zpotrf_",
+ lapack_lite_status__,"n",n,"lda",lda,"info",info);
+}
+
+#define STR(x) #x
+#define lameth(name) {STR(name), lapack_lite_##name, METH_VARARGS, NULL}
+static struct PyMethodDef lapack_lite_module_methods[] = {
+ lameth(zheevd),
+ lameth(dsyevd),
+ lameth(dgeev),
+ lameth(dgelsd),
+ lameth(dgesv),
+ lameth(dgesdd),
+ lameth(dgetrf),
+ lameth(dpotrf),
+ lameth(zgeev),
+ lameth(zgelsd),
+ lameth(zgesv),
+ lameth(zgesdd),
+ lameth(zgetrf),
+ lameth(zpotrf),
+ { NULL,NULL,0}
+};
+
+static char lapack_lite_module_documentation[] = "";
+
+DL_EXPORT(void)
+initlapack_lite(void)
+{
+ PyObject *m,*d;
+ m = Py_InitModule4("lapack_lite", lapack_lite_module_methods,
+ lapack_lite_module_documentation,
+ (PyObject*)NULL,PYTHON_API_VERSION);
+ import_array();
+ d = PyModule_GetDict(m);
+ LapackError = PyErr_NewException("lapack_lite.LapackError", NULL, NULL);
+ PyDict_SetItemString(d, "LapackError", LapackError);
+
+ if (PyErr_Occurred())
+ Py_FatalError("can't initialize module lapack_lite");
+}
diff --git a/numpy/corelinalg/linalg.py b/numpy/corelinalg/linalg.py
new file mode 100644
index 000000000..9629b60f2
--- /dev/null
+++ b/numpy/corelinalg/linalg.py
@@ -0,0 +1,556 @@
+"""Lite version of scipy.linalg.
+"""
+# This module is a lite version of LinAlg.py module which contains
+# high-level Python interface to the LAPACK library. The lite version
+# only accesses the following LAPACK functions: dgesv, zgesv, dgeev,
+# zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, dpotrf.
+
+__all__ = ['LinAlgError', 'solve_linear_equations', 'solve',
+ 'inverse', 'inv', 'cholesky_decomposition', 'cholesky', 'eigenvalues',
+ 'eigvals', 'Heigenvalues', 'eigvalsh', 'generalized_inverse', 'pinv',
+ 'determinant', 'det', 'singular_value_decomposition', 'svd',
+ 'eigenvectors', 'eig', 'Heigenvectors', 'eigh','lstsq', 'linear_least_squares'
+ ]
+
+from scipy.base import *
+import lapack_lite
+import math
+
+# Error object
+class LinAlgError(Exception):
+ pass
+
+# Helper routines
+_lapack_type = {'f': 0, 'd': 1, 'F': 2, 'D': 3}
+_lapack_letter = ['s', 'd', 'c', 'z']
+_array_kind = {'i':0, 'l': 0, 'f': 0, 'd': 0, 'F': 1, 'D': 1}
+_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
+_array_type = [['f', 'd'], ['F', 'D']]
+
+def _commonType(*arrays):
+ kind = 0
+# precision = 0
+# force higher precision in lite version
+ precision = 1
+ for a in arrays:
+ t = a.dtypechar
+ kind = max(kind, _array_kind[t])
+ precision = max(precision, _array_precision[t])
+ return _array_type[kind][precision]
+
+def _castCopyAndTranspose(type, *arrays):
+ cast_arrays = ()
+ for a in arrays:
+ cast_arrays = cast_arrays + (transpose(a).astype(type),)
+ if len(cast_arrays) == 1:
+ return cast_arrays[0]
+ else:
+ return cast_arrays
+
+# _fastCopyAndTranpose is an optimized version of _castCopyAndTranspose.
+# It assumes the input is 2D (as all the calls in here are).
+
+_fastCT = fastCopyAndTranspose
+
+def _fastCopyAndTranspose(type, *arrays):
+ cast_arrays = ()
+ for a in arrays:
+ if a.dtypechar == type:
+ cast_arrays = cast_arrays + (_fastCT(a),)
+ else:
+ cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
+ if len(cast_arrays) == 1:
+ return cast_arrays[0]
+ else:
+ return cast_arrays
+
+def _assertRank2(*arrays):
+ for a in arrays:
+ if len(a.shape) != 2:
+ raise LinAlgError, 'Array must be two-dimensional'
+
+def _assertSquareness(*arrays):
+ for a in arrays:
+ if max(a.shape) != min(a.shape):
+ raise LinAlgError, 'Array must be square'
+
+
+# Linear equations
+
+def solve_linear_equations(a, b):
+ one_eq = len(b.shape) == 1
+ if one_eq:
+ b = b[:, NewAxis]
+ _assertRank2(a, b)
+ _assertSquareness(a)
+ n_eq = a.shape[0]
+ n_rhs = b.shape[1]
+ if n_eq != b.shape[0]:
+ raise LinAlgError, 'Incompatible dimensions'
+ t =_commonType(a, b)
+# lapack_routine = _findLapackRoutine('gesv', t)
+ if _array_kind[t] == 1: # Complex routines take different arguments
+ lapack_routine = lapack_lite.zgesv
+ else:
+ lapack_routine = lapack_lite.dgesv
+ a, b = _fastCopyAndTranspose(t, a, b)
+ pivots = zeros(n_eq, 'i')
+ results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
+ if results['info'] > 0:
+ raise LinAlgError, 'Singular matrix'
+ if one_eq:
+ return ravel(b) # I see no need to copy here
+ else:
+ return transpose(b) # no need to copy
+
+
+# Matrix inversion
+
+def inverse(a):
+ return solve_linear_equations(a, identity(a.shape[0]))
+
+
+# Cholesky decomposition
+
+def cholesky_decomposition(a):
+ _assertRank2(a)
+ _assertSquareness(a)
+ t =_commonType(a)
+ a = _castCopyAndTranspose(t, a)
+ m = a.shape[0]
+ n = a.shape[1]
+ if _array_kind[t] == 1:
+ lapack_routine = lapack_lite.zpotrf
+ else:
+ lapack_routine = lapack_lite.dpotrf
+ results = lapack_routine('L', n, a, m, 0)
+ if results['info'] > 0:
+ raise LinAlgError, 'Matrix is not positive definite - Cholesky decomposition cannot be computed'
+ return transpose(triu(a,k=0)).copy()
+
+
+# Eigenvalues
+
+def eigenvalues(a):
+ _assertRank2(a)
+ _assertSquareness(a)
+ t =_commonType(a)
+ real_t = _array_type[0][_array_precision[t]]
+ a = _fastCopyAndTranspose(t, a)
+ n = a.shape[0]
+ dummy = zeros((1,), t)
+ if _array_kind[t] == 1: # Complex routines take different arguments
+ lapack_routine = lapack_lite.zgeev
+ w = zeros((n,), t)
+ rwork = zeros((n,),real_t)
+ lwork = 1
+ work = zeros((lwork,), t)
+ results = lapack_routine('N', 'N', n, a, n, w,
+ dummy, 1, dummy, 1, work, -1, rwork, 0)
+ lwork = int(abs(work[0]))
+ work = zeros((lwork,), t)
+ results = lapack_routine('N', 'N', n, a, n, w,
+ dummy, 1, dummy, 1, work, lwork, rwork, 0)
+ else:
+ lapack_routine = lapack_lite.dgeev
+ wr = zeros((n,), t)
+ wi = zeros((n,), t)
+ lwork = 1
+ work = zeros((lwork,), t)
+ results = lapack_routine('N', 'N', n, a, n, wr, wi,
+ dummy, 1, dummy, 1, work, -1, 0)
+ lwork = int(work[0])
+ work = zeros((lwork,), t)
+ results = lapack_routine('N', 'N', n, a, n, wr, wi,
+ dummy, 1, dummy, 1, work, lwork, 0)
+ if logical_and.reduce(equal(wi, 0.)):
+ w = wr
+ else:
+ w = wr+1j*wi
+ if results['info'] > 0:
+ raise LinAlgError, 'Eigenvalues did not converge'
+ return w
+
+
+def Heigenvalues(a, UPLO='L'):
+ _assertRank2(a)
+ _assertSquareness(a)
+ t =_commonType(a)
+ real_t = _array_type[0][_array_precision[t]]
+ a = _castCopyAndTranspose(t, a)
+ n = a.shape[0]
+ liwork = 5*n+3
+ iwork = zeros((liwork,),'i')
+ if _array_kind[t] == 1: # Complex routines take different arguments
+ lapack_routine = lapack_lite.zheevd
+ w = zeros((n,), real_t)
+ lwork = 1
+ work = zeros((lwork,), t)
+ lrwork = 1
+ rwork = zeros((lrwork,),real_t)
+ results = lapack_routine('N', UPLO, n, a, n,w, work, -1, rwork, -1, iwork, liwork, 0)
+ lwork = int(abs(work[0]))
+ work = zeros((lwork,), t)
+ lrwork = int(rwork[0])
+ rwork = zeros((lrwork,),real_t)
+ results = lapack_routine('N', UPLO, n, a, n,w, work, lwork, rwork, lrwork, iwork, liwork, 0)
+ else:
+ lapack_routine = lapack_lite.dsyevd
+ w = zeros((n,), t)
+ lwork = 1
+ work = zeros((lwork,), t)
+ results = lapack_routine('N', UPLO, n, a, n,w, work, -1, iwork, liwork, 0)
+ lwork = int(work[0])
+ work = zeros((lwork,), t)
+ results = lapack_routine('N', UPLO, n, a, n,w, work, lwork, iwork, liwork, 0)
+ if results['info'] > 0:
+ raise LinAlgError, 'Eigenvalues did not converge'
+ return w
+
+def _convertarray(a):
+ if issubclass(a.dtype, complexfloating):
+ if a.dtypechar == 'D':
+ a = _fastCT(a)
+ else:
+ a = _fastCT(a.astype('D'))
+ else:
+ if a.dtypechar == 'd':
+ a = _fastCT(a)
+ else:
+ a = _fastCT(a.astype('d'))
+ return a, a.dtypechar
+
+# Eigenvectors
+
+def eig(a):
+ """eig(a) returns u,v where u is the eigenvalues and
+v is a matrix of eigenvectors with vector v[:,i] corresponds to
+eigenvalue u[i]. Satisfies the equation dot(a, v[:,i]) = u[i]*v[:,i]
+"""
+ a = asarray(a)
+ _assertRank2(a)
+ _assertSquareness(a)
+ a,t = _convertarray(a) # convert to float_ or complex_ type
+ wrap = a.__array_wrap__
+ real_t = 'd'
+ n = a.shape[0]
+ dummy = zeros((1,), t)
+ if t == 'D': # Complex routines take different arguments
+ lapack_routine = lapack_lite.zgeev
+ w = zeros((n,), t)
+ v = zeros((n,n), t)
+ lwork = 1
+ work = zeros((lwork,),t)
+ rwork = zeros((2*n,),real_t)
+ results = lapack_routine('N', 'V', n, a, n, w,
+ dummy, 1, v, n, work, -1, rwork, 0)
+ lwork = int(abs(work[0]))
+ work = zeros((lwork,),t)
+ results = lapack_routine('N', 'V', n, a, n, w,
+ dummy, 1, v, n, work, lwork, rwork, 0)
+ else:
+ lapack_routine = lapack_lite.dgeev
+ wr = zeros((n,), t)
+ wi = zeros((n,), t)
+ vr = zeros((n,n), t)
+ lwork = 1
+ work = zeros((lwork,),t)
+ results = lapack_routine('N', 'V', n, a, n, wr, wi,
+ dummy, 1, vr, n, work, -1, 0)
+ lwork = int(work[0])
+ work = zeros((lwork,),t)
+ results = lapack_routine('N', 'V', n, a, n, wr, wi,
+ dummy, 1, vr, n, work, lwork, 0)
+ if logical_and.reduce(equal(wi, 0.)):
+ w = wr
+ v = vr
+ else:
+ w = wr+1j*wi
+ v = array(vr,Complex)
+ ind = nonzero(
+ equal(
+ equal(wi,0.0) # true for real e-vals
+ ,0) # true for complex e-vals
+ ) # indices of complex e-vals
+ for i in range(len(ind)/2):
+ v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
+ v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
+ if results['info'] > 0:
+ raise LinAlgError, 'Eigenvalues did not converge'
+ return w,wrap(v.transpose())
+
+
+def eigh(a, UPLO='L'):
+ _assertRank2(a)
+ _assertSquareness(a)
+ t =_commonType(a)
+ real_t = _array_type[0][_array_precision[t]]
+ a = _castCopyAndTranspose(t, a)
+ wrap = a.__array_wrap__
+ n = a.shape[0]
+ liwork = 5*n+3
+ iwork = zeros((liwork,),'i')
+ if _array_kind[t] == 1: # Complex routines take different arguments
+ lapack_routine = lapack_lite.zheevd
+ w = zeros((n,), real_t)
+ lwork = 1
+ work = zeros((lwork,), t)
+ lrwork = 1
+ rwork = zeros((lrwork,),real_t)
+ results = lapack_routine('V', UPLO, n, a, n,w, work, -1, rwork, -1, iwork, liwork, 0)
+ lwork = int(abs(work[0]))
+ work = zeros((lwork,), t)
+ lrwork = int(rwork[0])
+ rwork = zeros((lrwork,),real_t)
+ results = lapack_routine('V', UPLO, n, a, n,w, work, lwork, rwork, lrwork, iwork, liwork, 0)
+ else:
+ lapack_routine = lapack_lite.dsyevd
+ w = zeros((n,), t)
+ lwork = 1
+ work = zeros((lwork,),t)
+ results = lapack_routine('V', UPLO, n, a, n,w, work, -1, iwork, liwork, 0)
+ lwork = int(work[0])
+ work = zeros((lwork,),t)
+ results = lapack_routine('V', UPLO, n, a, n,w, work, lwork, iwork, liwork, 0)
+ if results['info'] > 0:
+ raise LinAlgError, 'Eigenvalues did not converge'
+ return w,wrap(a.transpose())
+
+
+# Singular value decomposition
+
+def svd(a, full_matrices = 1):
+ _assertRank2(a)
+ n = a.shape[1]
+ m = a.shape[0]
+ t =_commonType(a)
+ real_t = _array_type[0][_array_precision[t]]
+ a = _fastCopyAndTranspose(t, a)
+ wrap = a.__array_wrap__
+ if full_matrices:
+ nu = m
+ nvt = n
+ option = 'A'
+ else:
+ nu = min(n,m)
+ nvt = min(n,m)
+ option = 'S'
+ s = zeros((min(n,m),), real_t)
+ u = zeros((nu, m), t)
+ vt = zeros((n, nvt), t)
+ iwork = zeros((8*min(m,n),), 'i')
+ if _array_kind[t] == 1: # Complex routines take different arguments
+ lapack_routine = lapack_lite.zgesdd
+ rwork = zeros((5*min(m,n)*min(m,n) + 5*min(m,n),), real_t)
+ lwork = 1
+ work = zeros((lwork,), t)
+ results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
+ work, -1, rwork, iwork, 0)
+ lwork = int(abs(work[0]))
+ work = zeros((lwork,), t)
+ results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
+ work, lwork, rwork, iwork, 0)
+ else:
+ lapack_routine = lapack_lite.dgesdd
+ lwork = 1
+ work = zeros((lwork,), t)
+ results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
+ work, -1, iwork, 0)
+ lwork = int(work[0])
+ work = zeros((lwork,), t)
+ results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
+ work, lwork, iwork, 0)
+ if results['info'] > 0:
+ raise LinAlgError, 'SVD did not converge'
+ return wrap(transpose(u)), s, \
+ wrap(transpose(vt)) # why copy here?
+
+
+# Generalized inverse
+
+def generalized_inverse(a, rcond = 1.e-10):
+ a = array(a, copy=0)
+ if a.dtypechar in typecodes['Complex']:
+ a = conjugate(a)
+ u, s, vt = svd(a, 0)
+ m = u.shape[0]
+ n = vt.shape[1]
+ cutoff = rcond*maximum.reduce(s)
+ for i in range(min(n,m)):
+ if s[i] > cutoff:
+ s[i] = 1./s[i]
+ else:
+ s[i] = 0.;
+ wrap = a.__array_wrap__
+ return wrap(dot(transpose(vt),
+ multiply(s[:, NewAxis],transpose(u))))
+
+# Determinant
+
+def determinant(a):
+ _assertRank2(a)
+ _assertSquareness(a)
+ t =_commonType(a)
+ a = _fastCopyAndTranspose(t, a)
+ n = a.shape[0]
+ if _array_kind[t] == 1:
+ lapack_routine = lapack_lite.zgetrf
+ else:
+ lapack_routine = lapack_lite.dgetrf
+ pivots = zeros((n,), 'i')
+ results = lapack_routine(n, n, a, n, pivots, 0)
+ sign = add.reduce(not_equal(pivots,
+ arrayrange(1, n+1))) % 2
+ return (1.-2.*sign)*multiply.reduce(diagonal(a),axis=-1)
+
+# Linear Least Squares
+
+def linear_least_squares(a, b, rcond=1.e-10):
+ """returns x,resids,rank,s
+where x minimizes 2-norm(|b - Ax|)
+ resids is the sum square residuals
+ rank is the rank of A
+ s is the rank of the singular values of A in descending order
+
+If b is a matrix then x is also a matrix with corresponding columns.
+If the rank of A is less than the number of columns of A or greater than
+the number of rows, then residuals will be returned as an empty array
+otherwise resids = sum((b-dot(A,x)**2).
+Singular values less than s[0]*rcond are treated as zero.
+"""
+ a = asarray(a)
+ b = asarray(b)
+ one_eq = len(b.shape) == 1
+ if one_eq:
+ b = b[:, NewAxis]
+ _assertRank2(a, b)
+ m = a.shape[0]
+ n = a.shape[1]
+ n_rhs = b.shape[1]
+ ldb = max(n,m)
+ if m != b.shape[0]:
+ raise LinAlgError, 'Incompatible dimensions'
+ t =_commonType(a, b)
+ real_t = _array_type[0][_array_precision[t]]
+ bstar = zeros((ldb,n_rhs),t)
+ bstar[:b.shape[0],:n_rhs] = b.copy()
+ a,bstar = _castCopyAndTranspose(t, a, bstar)
+ s = zeros((min(m,n),),real_t)
+ nlvl = max( 0, int( math.log( float(min( m,n ))/2. ) ) + 1 )
+ iwork = zeros((3*min(m,n)*nlvl+11*min(m,n),), 'i')
+ if _array_kind[t] == 1: # Complex routines take different arguments
+ lapack_routine = lapack_lite.zgelsd
+ lwork = 1
+ rwork = zeros((lwork,), real_t)
+ work = zeros((lwork,),t)
+ results = lapack_routine( m, n, n_rhs, a, m, bstar,ldb , s, rcond,
+ 0,work,-1,rwork,iwork,0 )
+ lwork = int(abs(work[0]))
+ rwork = zeros((lwork,),real_t)
+ a_real = zeros((m,n),real_t)
+ bstar_real = zeros((ldb,n_rhs,),real_t)
+ results = lapack_lite.dgelsd( m, n, n_rhs, a_real, m, bstar_real,ldb , s, rcond,
+ 0,rwork,-1,iwork,0 )
+ lrwork = int(rwork[0])
+ work = zeros((lwork,), t)
+ rwork = zeros((lrwork,), real_t)
+ results = lapack_routine( m, n, n_rhs, a, m, bstar,ldb , s, rcond,
+ 0,work,lwork,rwork,iwork,0 )
+ else:
+ lapack_routine = lapack_lite.dgelsd
+ lwork = 1
+ work = zeros((lwork,), t)
+ results = lapack_routine( m, n, n_rhs, a, m, bstar,ldb , s, rcond,
+ 0,work,-1,iwork,0 )
+ lwork = int(work[0])
+ work = zeros((lwork,), t)
+ results = lapack_routine( m, n, n_rhs, a, m, bstar,ldb , s, rcond,
+ 0,work,lwork,iwork,0 )
+ if results['info'] > 0:
+ raise LinAlgError, 'SVD did not converge in Linear Least Squares'
+ resids = array([],t)
+ if one_eq:
+ x = ravel(bstar)[:n].copy()
+ if (results['rank']==n) and (m>n):
+ resids = array([sum((ravel(bstar)[n:])**2)])
+ else:
+ x = transpose(bstar)[:n,:].copy()
+ if (results['rank']==n) and (m>n):
+ resids = sum((transpose(bstar)[n:,:])**2).copy()
+ return x,resids,results['rank'],s[:min(n,m)].copy()
+
+def singular_value_decomposition(A, full_matrices=0):
+ return svd(A, 0)
+
+def eigenvectors(A):
+ w, v = eig(A)
+ return w, transpose(v)
+
+def Heigenvectors(A):
+ w, v = eigh(A)
+ return w, transpose(v)
+
+inv = inverse
+solve = solve_linear_equations
+cholesky = cholesky_decomposition
+eigvals = eigenvalues
+eigvalsh = Heigenvalues
+pinv = generalized_inverse
+det = determinant
+lstsq = linear_least_squares
+
+if __name__ == '__main__':
+ def test(a, b):
+
+ print "All numbers printed should be (almost) zero:"
+
+ x = solve_linear_equations(a, b)
+ check = b - matrixmultiply(a, x)
+ print check
+
+
+ a_inv = inverse(a)
+ check = matrixmultiply(a, a_inv)-identity(a.shape[0])
+ print check
+
+
+ ev = eigenvalues(a)
+
+ evalues, evectors = eig(a)
+ check = ev-evalues
+ print check
+
+ evectors = transpose(evectors)
+ check = matrixmultiply(a, evectors)-evectors*evalues
+ print check
+
+
+ u, s, vt = svd(a,0)
+ check = a - matrixmultiply(u*s, vt)
+ print check
+
+
+ a_ginv = generalized_inverse(a)
+ check = matrixmultiply(a, a_ginv)-identity(a.shape[0])
+ print check
+
+
+ det = determinant(a)
+ check = det-multiply.reduce(evalues)
+ print check
+
+ x, residuals, rank, sv = linear_least_squares(a, b)
+ check = b - matrixmultiply(a, x)
+ print check
+ print rank-a.shape[0]
+ print sv-s
+
+ a = array([[1.,2.], [3.,4.]])
+ b = array([2., 1.])
+ test(a, b)
+
+ a = a+0j
+ b = b+0j
+ test(a, b)
+
+
diff --git a/numpy/corelinalg/setup.py b/numpy/corelinalg/setup.py
new file mode 100644
index 000000000..b1a8c6c67
--- /dev/null
+++ b/numpy/corelinalg/setup.py
@@ -0,0 +1,31 @@
+
+from os.path import join
+
+def configuration(parent_package='',top_path=None):
+ from scipy.distutils.misc_util import Configuration
+ from scipy.distutils.system_info import get_info
+ config = Configuration('corelinalg',parent_package,top_path)
+
+ # Configure lapack_lite
+ lapack_info = get_info('lapack_opt',0)
+ def get_lapack_lite_sources(ext, build_dir):
+ if not lapack_info:
+ print "### Warning: Using unoptimized lapack ###"
+ return ext.depends[:-1]
+ else:
+ return ext.depends[:1]
+
+ config.add_extension('lapack_lite',
+ sources = [get_lapack_lite_sources],
+ depends= ['lapack_litemodule.c',
+ 'zlapack_lite.c', 'dlapack_lite.c',
+ 'blas_lite.c', 'dlamch.c',
+ 'f2c_lite.c','f2c.h'],
+ extra_info = lapack_info
+ )
+
+ return config
+
+if __name__ == '__main__':
+ from scipy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/numpy/corelinalg/zlapack_lite.c b/numpy/corelinalg/zlapack_lite.c
new file mode 100644
index 000000000..4549f68b5
--- /dev/null
+++ b/numpy/corelinalg/zlapack_lite.c
@@ -0,0 +1,26018 @@
+/*
+NOTE: This is generated code. Look in Misc/lapack_lite for information on
+ remaking this file.
+*/
+#include "f2c.h"
+
+#ifdef HAVE_CONFIG
+#include "config.h"
+#else
+extern doublereal dlamch_(char *);
+#define EPSILON dlamch_("Epsilon")
+#define SAFEMINIMUM dlamch_("Safe minimum")
+#define PRECISION dlamch_("Precision")
+#define BASE dlamch_("Base")
+#endif
+
+extern doublereal dlapy2_(doublereal *x, doublereal *y);
+
+
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublecomplex c_b59 = {0.,0.};
+static doublecomplex c_b60 = {1.,0.};
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__0 = 0;
+static integer c__8 = 8;
+static integer c__4 = 4;
+static integer c__65 = 65;
+static integer c__6 = 6;
+static integer c__9 = 9;
+static doublereal c_b324 = 0.;
+static doublereal c_b1015 = 1.;
+static integer c__15 = 15;
+static logical c_false = FALSE_;
+static doublereal c_b1294 = -1.;
+static doublereal c_b2210 = .5;
+
+/* Subroutine */ int zdrot_(integer *n, doublecomplex *cx, integer *incx,
+ doublecomplex *cy, integer *incy, doublereal *c__, doublereal *s)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static doublecomplex ctemp;
+
+
+/*
+ applies a plane rotation, where the cos and sin (c and s) are real
+ and the vectors cx and cy are complex.
+ jack dongarra, linpack, 3/11/78.
+
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if ((*incx == 1 && *incy == 1)) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ z__2.r = *c__ * cx[i__2].r, z__2.i = *c__ * cx[i__2].i;
+ i__3 = iy;
+ z__3.r = *s * cy[i__3].r, z__3.i = *s * cy[i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__2 = iy;
+ i__3 = iy;
+ z__2.r = *c__ * cy[i__3].r, z__2.i = *c__ * cy[i__3].i;
+ i__4 = ix;
+ z__3.r = *s * cx[i__4].r, z__3.i = *s * cx[i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ cy[i__2].r = z__1.r, cy[i__2].i = z__1.i;
+ i__2 = ix;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ z__2.r = *c__ * cx[i__2].r, z__2.i = *c__ * cx[i__2].i;
+ i__3 = i__;
+ z__3.r = *s * cy[i__3].r, z__3.i = *s * cy[i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ z__2.r = *c__ * cy[i__3].r, z__2.i = *c__ * cy[i__3].i;
+ i__4 = i__;
+ z__3.r = *s * cx[i__4].r, z__3.i = *s * cx[i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ cy[i__2].r = z__1.r, cy[i__2].i = z__1.i;
+ i__2 = i__;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+/* L30: */
+ }
+ return 0;
+} /* zdrot_ */
+
+/* Subroutine */ int zgebak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, doublereal *scale, integer *m, doublecomplex *v,
+ integer *ldv, integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ static integer i__, k;
+ static doublereal s;
+ static integer ii;
+ extern logical lsame_(char *, char *);
+ static logical leftv;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), xerbla_(char *, integer *),
+ zdscal_(integer *, doublereal *, doublecomplex *, integer *);
+ static logical rightv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZGEBAK forms the right or left eigenvectors of a complex general
+ matrix by backward transformation on the computed eigenvectors of the
+ balanced matrix output by ZGEBAL.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ Specifies the type of backward transformation required:
+ = 'N', do nothing, return immediately;
+ = 'P', do backward transformation for permutation only;
+ = 'S', do backward transformation for scaling only;
+ = 'B', do backward transformations for both permutation and
+ scaling.
+ JOB must be the same as the argument JOB supplied to ZGEBAL.
+
+ SIDE (input) CHARACTER*1
+ = 'R': V contains right eigenvectors;
+ = 'L': V contains left eigenvectors.
+
+ N (input) INTEGER
+ The number of rows of the matrix V. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ The integers ILO and IHI determined by ZGEBAL.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ SCALE (input) DOUBLE PRECISION array, dimension (N)
+ Details of the permutation and scaling factors, as returned
+ by ZGEBAL.
+
+ M (input) INTEGER
+ The number of columns of the matrix V. M >= 0.
+
+ V (input/output) COMPLEX*16 array, dimension (LDV,M)
+ On entry, the matrix of right or left eigenvectors to be
+ transformed, as returned by ZHSEIN or ZTREVC.
+ On exit, V is overwritten by the transformed eigenvectors.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V. LDV >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Decode and Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --scale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1 * 1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S"))
+ && ! lsame_(job, "B"))) {
+ *info = -1;
+ } else if ((! rightv && ! leftv)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*ldv < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if (lsame_(job, "S") || lsame_(job, "B")) {
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = scale[i__];
+ zdscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = 1. / scale[i__];
+ zdscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+
+ }
+
+/*
+ Backward permutation
+
+ For I = ILO-1 step -1 until 1,
+ IHI+1 step 1 until N do --
+*/
+
+L30:
+ if (lsame_(job, "P") || lsame_(job, "B")) {
+ if (rightv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if ((i__ >= *ilo && i__ <= *ihi)) {
+ goto L40;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = (integer) scale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ zswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+ }
+
+ if (leftv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if ((i__ >= *ilo && i__ <= *ihi)) {
+ goto L50;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = (integer) scale[i__];
+ if (k == i__) {
+ goto L50;
+ }
+ zswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L50:
+ ;
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZGEBAK */
+
+} /* zgebak_ */
+
+/* Subroutine */ int zgebal_(char *job, integer *n, doublecomplex *a, integer
+ *lda, integer *ilo, integer *ihi, doublereal *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), z_abs(doublecomplex *);
+
+ /* Local variables */
+ static doublereal c__, f, g;
+ static integer i__, j, k, l, m;
+ static doublereal r__, s, ca, ra;
+ static integer ica, ira, iexc;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ static doublereal sfmin1, sfmin2, sfmax1, sfmax2;
+
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ static logical noconv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZGEBAL balances a general complex matrix A. This involves, first,
+ permuting A by a similarity transformation to isolate eigenvalues
+ in the first 1 to ILO-1 and last IHI+1 to N elements on the
+ diagonal; and second, applying a diagonal similarity transformation
+ to rows and columns ILO to IHI to make the rows and columns as
+ close in norm as possible. Both steps are optional.
+
+ Balancing may reduce the 1-norm of the matrix, and improve the
+ accuracy of the computed eigenvalues and/or eigenvectors.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ Specifies the operations to be performed on A:
+ = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0
+ for i = 1,...,N;
+ = 'P': permute only;
+ = 'S': scale only;
+ = 'B': both permute and scale.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the input matrix A.
+ On exit, A is overwritten by the balanced matrix.
+ If JOB = 'N', A is not referenced.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ ILO (output) INTEGER
+ IHI (output) INTEGER
+ ILO and IHI are set to integers such that on exit
+ A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N.
+ If JOB = 'N' or 'S', ILO = 1 and IHI = N.
+
+ SCALE (output) DOUBLE PRECISION array, dimension (N)
+ Details of the permutations and scaling factors applied to
+ A. If P(j) is the index of the row and column interchanged
+ with row and column j and D(j) is the scaling factor
+ applied to row and column j, then
+ SCALE(j) = P(j) for j = 1,...,ILO-1
+ = D(j) for j = ILO,...,IHI
+ = P(j) for j = IHI+1,...,N.
+ The order in which the interchanges are made is N to IHI+1,
+ then 1 to ILO-1.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The permutations consist of row and column interchanges which put
+ the matrix in the form
+
+ ( T1 X Y )
+ P A P = ( 0 B Z )
+ ( 0 0 T2 )
+
+ where T1 and T2 are upper triangular matrices whose eigenvalues lie
+ along the diagonal. The column indices ILO and IHI mark the starting
+ and ending columns of the submatrix B. Balancing consists of applying
+ a diagonal similarity transformation inv(D) * B * D to make the
+ 1-norms of each row of B and its corresponding column nearly equal.
+ The output matrix is
+
+ ( T1 X*D Y )
+ ( 0 inv(D)*B*D inv(D)*Z ).
+ ( 0 0 T2 )
+
+ Information about the permutations P and the diagonal matrix D is
+ returned in the vector SCALE.
+
+ This subroutine is based on the EISPACK routine CBAL.
+
+ Modified by Tzu-Yi Chen, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --scale;
+
+ /* Function Body */
+ *info = 0;
+ if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S"))
+ && ! lsame_(job, "B"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEBAL", &i__1);
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+
+ if (*n == 0) {
+ goto L210;
+ }
+
+ if (lsame_(job, "N")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scale[i__] = 1.;
+/* L10: */
+ }
+ goto L210;
+ }
+
+ if (lsame_(job, "S")) {
+ goto L120;
+ }
+
+/* Permutation to isolate eigenvalues if possible */
+
+ goto L50;
+
+/* Row and column exchange. */
+
+L20:
+ scale[m] = (doublereal) j;
+ if (j == m) {
+ goto L30;
+ }
+
+ zswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ i__1 = *n - k + 1;
+ zswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+
+L30:
+ switch (iexc) {
+ case 1: goto L40;
+ case 2: goto L80;
+ }
+
+/* Search for rows isolating an eigenvalue and push them down. */
+
+L40:
+ if (l == 1) {
+ goto L210;
+ }
+ --l;
+
+L50:
+ for (j = l; j >= 1; --j) {
+
+ i__1 = l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ == j) {
+ goto L60;
+ }
+ i__2 = j + i__ * a_dim1;
+ if (a[i__2].r != 0. || d_imag(&a[j + i__ * a_dim1]) != 0.) {
+ goto L70;
+ }
+L60:
+ ;
+ }
+
+ m = l;
+ iexc = 1;
+ goto L20;
+L70:
+ ;
+ }
+
+ goto L90;
+
+/* Search for columns isolating an eigenvalue and push them left. */
+
+L80:
+ ++k;
+
+L90:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+
+ i__2 = l;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (i__ == j) {
+ goto L100;
+ }
+ i__3 = i__ + j * a_dim1;
+ if (a[i__3].r != 0. || d_imag(&a[i__ + j * a_dim1]) != 0.) {
+ goto L110;
+ }
+L100:
+ ;
+ }
+
+ m = k;
+ iexc = 2;
+ goto L20;
+L110:
+ ;
+ }
+
+L120:
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ scale[i__] = 1.;
+/* L130: */
+ }
+
+ if (lsame_(job, "P")) {
+ goto L210;
+ }
+
+/*
+ Balance the submatrix in rows K to L.
+
+ Iterative loop for norm reduction
+*/
+
+ sfmin1 = SAFEMINIMUM / PRECISION;
+ sfmax1 = 1. / sfmin1;
+ sfmin2 = sfmin1 * 8.;
+ sfmax2 = 1. / sfmin2;
+L140:
+ noconv = FALSE_;
+
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ c__ = 0.;
+ r__ = 0.;
+
+ i__2 = l;
+ for (j = k; j <= i__2; ++j) {
+ if (j == i__) {
+ goto L150;
+ }
+ i__3 = j + i__ * a_dim1;
+ c__ += (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[j + i__ *
+ a_dim1]), abs(d__2));
+ i__3 = i__ + j * a_dim1;
+ r__ += (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__ + j *
+ a_dim1]), abs(d__2));
+L150:
+ ;
+ }
+ ica = izamax_(&l, &a[i__ * a_dim1 + 1], &c__1);
+ ca = z_abs(&a[ica + i__ * a_dim1]);
+ i__2 = *n - k + 1;
+ ira = izamax_(&i__2, &a[i__ + k * a_dim1], lda);
+ ra = z_abs(&a[i__ + (ira + k - 1) * a_dim1]);
+
+/* Guard against zero C or R due to underflow. */
+
+ if (c__ == 0. || r__ == 0.) {
+ goto L200;
+ }
+ g = r__ / 8.;
+ f = 1.;
+ s = c__ + r__;
+L160:
+/* Computing MAX */
+ d__1 = max(f,c__);
+/* Computing MIN */
+ d__2 = min(r__,g);
+ if (c__ >= g || max(d__1,ca) >= sfmax2 || min(d__2,ra) <= sfmin2) {
+ goto L170;
+ }
+ f *= 8.;
+ c__ *= 8.;
+ ca *= 8.;
+ r__ /= 8.;
+ g /= 8.;
+ ra /= 8.;
+ goto L160;
+
+L170:
+ g = c__ / 8.;
+L180:
+/* Computing MIN */
+ d__1 = min(f,c__), d__1 = min(d__1,g);
+ if (g < r__ || max(r__,ra) >= sfmax2 || min(d__1,ca) <= sfmin2) {
+ goto L190;
+ }
+ f /= 8.;
+ c__ /= 8.;
+ g /= 8.;
+ ca /= 8.;
+ r__ *= 8.;
+ ra *= 8.;
+ goto L180;
+
+/* Now balance. */
+
+L190:
+ if (c__ + r__ >= s * .95) {
+ goto L200;
+ }
+ if ((f < 1. && scale[i__] < 1.)) {
+ if (f * scale[i__] <= sfmin1) {
+ goto L200;
+ }
+ }
+ if ((f > 1. && scale[i__] > 1.)) {
+ if (scale[i__] >= sfmax1 / f) {
+ goto L200;
+ }
+ }
+ g = 1. / f;
+ scale[i__] *= f;
+ noconv = TRUE_;
+
+ i__2 = *n - k + 1;
+ zdscal_(&i__2, &g, &a[i__ + k * a_dim1], lda);
+ zdscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1);
+
+L200:
+ ;
+ }
+
+ if (noconv) {
+ goto L140;
+ }
+
+L210:
+ *ilo = k;
+ *ihi = l;
+
+ return 0;
+
+/* End of ZGEBAL */
+
+} /* zgebal_ */
+
+/* Subroutine */ int zgebd2_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *d__, doublereal *e, doublecomplex *tauq,
+ doublecomplex *taup, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__;
+ static doublecomplex alpha;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), zlacgv_(integer *, doublecomplex *,
+ integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZGEBD2 reduces a complex general m by n matrix A to upper or lower
+ real bidiagonal form B by a unitary transformation: Q' * A * P = B.
+
+ If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns in the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the m by n general matrix to be reduced.
+ On exit,
+ if m >= n, the diagonal and the first superdiagonal are
+ overwritten with the upper bidiagonal matrix B; the
+ elements below the diagonal, with the array TAUQ, represent
+ the unitary matrix Q as a product of elementary
+ reflectors, and the elements above the first superdiagonal,
+ with the array TAUP, represent the unitary matrix P as
+ a product of elementary reflectors;
+ if m < n, the diagonal and the first subdiagonal are
+ overwritten with the lower bidiagonal matrix B; the
+ elements below the first subdiagonal, with the array TAUQ,
+ represent the unitary matrix Q as a product of
+ elementary reflectors, and the elements above the diagonal,
+ with the array TAUP, represent the unitary matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The diagonal elements of the bidiagonal matrix B:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (min(M,N)-1)
+ The off-diagonal elements of the bidiagonal matrix B:
+ if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
+ if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
+
+ TAUQ (output) COMPLEX*16 array dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix Q. See Further Details.
+
+ TAUP (output) COMPLEX*16 array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix P. See Further Details.
+
+ WORK (workspace) COMPLEX*16 array, dimension (max(M,N))
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ If m >= n,
+
+ Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are complex scalars, and v and u are complex
+ vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in
+ A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in
+ A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n,
+
+ Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are complex scalars, v and u are complex vectors;
+ v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);
+ u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The contents of A on exit are illustrated by the following examples:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
+ ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
+ ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
+ ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
+ ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
+ ( v1 v2 v3 v4 v5 )
+
+ where d and e denote diagonal and off-diagonal elements of B, vi
+ denotes an element of the vector defining H(i), and ui an element of
+ the vector defining G(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEBD2", &i__1);
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1, &
+ tauq[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Apply H(i)' to A(i:m,i+1:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ d_cnjg(&z__1, &tauq[i__]);
+ zlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &z__1,
+ &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = d__[i__3], a[i__2].i = 0.;
+
+ if (i__ < *n) {
+
+/*
+ Generate elementary reflector G(i) to annihilate
+ A(i,i+2:n)
+*/
+
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Apply G(i) to A(i+1:m,i+1:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ zlarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.;
+ } else {
+ i__2 = i__;
+ taup[i__2].r = 0., taup[i__2].i = 0.;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Apply G(i) to A(i+1:m,i:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+/* Computing MIN */
+ i__4 = i__ + 1;
+ zlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &taup[
+ i__], &a[min(i__4,*m) + i__ * a_dim1], lda, &work[1]);
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = d__[i__3], a[i__2].i = 0.;
+
+ if (i__ < *m) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(i+2:m,i)
+*/
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1,
+ &tauq[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Apply H(i)' to A(i+1:m,i+1:n) from the left */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ d_cnjg(&z__1, &tauq[i__]);
+ zlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &z__1, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &
+ work[1]);
+ i__2 = i__ + 1 + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.;
+ } else {
+ i__2 = i__;
+ tauq[i__2].r = 0., tauq[i__2].i = 0.;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of ZGEBD2 */
+
+} /* zgebd2_ */
+
+/* Subroutine */ int zgebrd_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *d__, doublereal *e, doublecomplex *tauq,
+ doublecomplex *taup, doublecomplex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__, j, nb, nx;
+ static doublereal ws;
+ static integer nbmin, iinfo, minmn;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zgebd2_(integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *),
+ xerbla_(char *, integer *), zlabrd_(integer *, integer *,
+ integer *, doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwrkx, ldwrky, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZGEBRD reduces a general complex M-by-N matrix A to upper or lower
+ bidiagonal form B by a unitary transformation: Q**H * A * P = B.
+
+ If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns in the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the M-by-N general matrix to be reduced.
+ On exit,
+ if m >= n, the diagonal and the first superdiagonal are
+ overwritten with the upper bidiagonal matrix B; the
+ elements below the diagonal, with the array TAUQ, represent
+ the unitary matrix Q as a product of elementary
+ reflectors, and the elements above the first superdiagonal,
+ with the array TAUP, represent the unitary matrix P as
+ a product of elementary reflectors;
+ if m < n, the diagonal and the first subdiagonal are
+ overwritten with the lower bidiagonal matrix B; the
+ elements below the first subdiagonal, with the array TAUQ,
+ represent the unitary matrix Q as a product of
+ elementary reflectors, and the elements above the diagonal,
+ with the array TAUP, represent the unitary matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The diagonal elements of the bidiagonal matrix B:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (min(M,N)-1)
+ The off-diagonal elements of the bidiagonal matrix B:
+ if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
+ if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
+
+ TAUQ (output) COMPLEX*16 array dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix Q. See Further Details.
+
+ TAUP (output) COMPLEX*16 array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix P. See Further Details.
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK. LWORK >= max(1,M,N).
+ For optimum performance LWORK >= (M+N)*NB, where NB
+ is the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ If m >= n,
+
+ Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are complex scalars, and v and u are complex
+ vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in
+ A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in
+ A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n,
+
+ Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are complex scalars, and v and u are complex
+ vectors; v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in
+ A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The contents of A on exit are illustrated by the following examples:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
+ ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
+ ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
+ ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
+ ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
+ ( v1 v2 v3 v4 v5 )
+
+ where d and e denote diagonal and off-diagonal elements of B, vi
+ denotes an element of the vector defining H(i), and ui an element of
+ the vector defining G(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "ZGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = max(i__1,i__2);
+ lwkopt = (*m + *n) * nb;
+ d__1 = (doublereal) lwkopt;
+ work[1].r = d__1, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if ((*lwork < max(i__1,*n) && ! lquery)) {
+ *info = -10;
+ }
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEBRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ ws = (doublereal) max(*m,*n);
+ ldwrkx = *m;
+ ldwrky = *n;
+
+ if ((nb > 1 && nb < minmn)) {
+
+/*
+ Set the crossover point NX.
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "ZGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+
+/* Determine when to switch from blocked to unblocked code. */
+
+ if (nx < minmn) {
+ ws = (doublereal) ((*m + *n) * nb);
+ if ((doublereal) (*lwork) < ws) {
+
+/*
+ Not enough work space for the optimal NB, consider using
+ a smaller block size.
+*/
+
+ nbmin = ilaenv_(&c__2, "ZGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ if (*lwork >= (*m + *n) * nbmin) {
+ nb = *lwork / (*m + *n);
+ } else {
+ nb = 1;
+ nx = minmn;
+ }
+ }
+ }
+ } else {
+ nx = minmn;
+ }
+
+ i__1 = minmn - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/*
+ Reduce rows and columns i:i+ib-1 to bidiagonal form and return
+ the matrices X and Y which are needed to update the unreduced
+ part of the matrix
+*/
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ + 1;
+ zlabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[
+ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx
+ * nb + 1], &ldwrky);
+
+/*
+ Update the trailing submatrix A(i+ib:m,i+ib:n), using
+ an update of the form A := A - V*Y' - X*U'
+*/
+
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &i__3, &i__4, &nb, &
+ z__1, &a[i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb +
+ nb + 1], &ldwrky, &c_b60, &a[i__ + nb + (i__ + nb) * a_dim1],
+ lda);
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &z__1, &
+ work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, &
+ c_b60, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy diagonal and off-diagonal elements of B back into A */
+
+ if (*m >= *n) {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = d__[i__5], a[i__4].i = 0.;
+ i__4 = j + (j + 1) * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = d__[i__5], a[i__4].i = 0.;
+ i__4 = j + 1 + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.;
+/* L20: */
+ }
+ }
+/* L30: */
+ }
+
+/* Use unblocked code to reduce the remainder of the matrix */
+
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ zgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &
+ tauq[i__], &taup[i__], &work[1], &iinfo);
+ work[1].r = ws, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEBRD */
+
+} /* zgebrd_ */
+
+/* Subroutine */ int zgeev_(char *jobvl, char *jobvr, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *w, doublecomplex *vl,
+ integer *ldvl, doublecomplex *vr, integer *ldvr, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3, i__4;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, k, ihi;
+ static doublereal scl;
+ static integer ilo;
+ static doublereal dum[1], eps;
+ static doublecomplex tmp;
+ static integer ibal;
+ static char side[1];
+ static integer maxb;
+ static doublereal anrm;
+ static integer ierr, itau, iwrk, nout;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
+ static logical scalea;
+
+ static doublereal cscale;
+ extern /* Subroutine */ int zgebak_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublecomplex *, integer *,
+ integer *), zgebal_(char *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublereal *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical select[1];
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ static doublereal bignum;
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int zgehrd_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *), zlacpy_(char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *);
+ static integer minwrk, maxwrk;
+ static logical wantvl;
+ static doublereal smlnum;
+ static integer hswork, irwork;
+ extern /* Subroutine */ int zhseqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), ztrevc_(char *, char *, logical *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublecomplex *,
+ doublereal *, integer *);
+ static logical lquery, wantvr;
+ extern /* Subroutine */ int zunghr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZGEEV computes for an N-by-N complex nonsymmetric matrix A, the
+ eigenvalues and, optionally, the left and/or right eigenvectors.
+
+ The right eigenvector v(j) of A satisfies
+ A * v(j) = lambda(j) * v(j)
+ where lambda(j) is its eigenvalue.
+ The left eigenvector u(j) of A satisfies
+ u(j)**H * A = lambda(j) * u(j)**H
+ where u(j)**H denotes the conjugate transpose of u(j).
+
+ The computed eigenvectors are normalized to have Euclidean norm
+ equal to 1 and largest component real.
+
+ Arguments
+ =========
+
+ JOBVL (input) CHARACTER*1
+ = 'N': left eigenvectors of A are not computed;
+ = 'V': left eigenvectors of are computed.
+
+ JOBVR (input) CHARACTER*1
+ = 'N': right eigenvectors of A are not computed;
+ = 'V': right eigenvectors of A are computed.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the N-by-N matrix A.
+ On exit, A has been overwritten.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ W (output) COMPLEX*16 array, dimension (N)
+ W contains the computed eigenvalues.
+
+ VL (output) COMPLEX*16 array, dimension (LDVL,N)
+ If JOBVL = 'V', the left eigenvectors u(j) are stored one
+ after another in the columns of VL, in the same order
+ as their eigenvalues.
+ If JOBVL = 'N', VL is not referenced.
+ u(j) = VL(:,j), the j-th column of VL.
+
+ LDVL (input) INTEGER
+ The leading dimension of the array VL. LDVL >= 1; if
+ JOBVL = 'V', LDVL >= N.
+
+ VR (output) COMPLEX*16 array, dimension (LDVR,N)
+ If JOBVR = 'V', the right eigenvectors v(j) are stored one
+ after another in the columns of VR, in the same order
+ as their eigenvalues.
+ If JOBVR = 'N', VR is not referenced.
+ v(j) = VR(:,j), the j-th column of VR.
+
+ LDVR (input) INTEGER
+ The leading dimension of the array VR. LDVR >= 1; if
+ JOBVR = 'V', LDVR >= N.
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,2*N).
+ For good performance, LWORK must generally be larger.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ RWORK (workspace) DOUBLE PRECISION array, dimension (2*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = i, the QR algorithm failed to compute all the
+ eigenvalues, and no eigenvectors have been computed;
+ elements and i+1:N of W contain eigenvalues which have
+ converged.
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --w;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1 * 1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1 * 1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ if ((! wantvl && ! lsame_(jobvl, "N"))) {
+ *info = -1;
+ } else if ((! wantvr && ! lsame_(jobvr, "N"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldvl < 1 || (wantvl && *ldvl < *n)) {
+ *info = -8;
+ } else if (*ldvr < 1 || (wantvr && *ldvr < *n)) {
+ *info = -10;
+ }
+
+/*
+ Compute workspace
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ CWorkspace refers to complex workspace, and RWorkspace to real
+ workspace. NB refers to the optimal block size for the
+ immediately following subroutine, as returned by ILAENV.
+ HSWORK refers to the workspace preferred by ZHSEQR, as
+ calculated below. HSWORK is computed assuming ILO=1 and IHI=N,
+ the worst case.)
+*/
+
+ minwrk = 1;
+ if ((*info == 0 && (*lwork >= 1 || lquery))) {
+ maxwrk = *n + *n * ilaenv_(&c__1, "ZGEHRD", " ", n, &c__1, n, &c__0, (
+ ftnlen)6, (ftnlen)1);
+ if ((! wantvl && ! wantvr)) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n) << (1);
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ilaenv_(&c__8, "ZHSEQR", "EN", n, &c__1, n, &c_n1, (ftnlen)
+ 6, (ftnlen)2);
+ maxb = max(i__1,2);
+/*
+ Computing MIN
+ Computing MAX
+*/
+ i__3 = 2, i__4 = ilaenv_(&c__4, "ZHSEQR", "EN", n, &c__1, n, &
+ c_n1, (ftnlen)6, (ftnlen)2);
+ i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
+ k = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = k * (k + 2), i__2 = (*n) << (1);
+ hswork = max(i__1,i__2);
+ maxwrk = max(maxwrk,hswork);
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n) << (1);
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "ZUNGHR",
+ " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ilaenv_(&c__8, "ZHSEQR", "SV", n, &c__1, n, &c_n1, (ftnlen)
+ 6, (ftnlen)2);
+ maxb = max(i__1,2);
+/*
+ Computing MIN
+ Computing MAX
+*/
+ i__3 = 2, i__4 = ilaenv_(&c__4, "ZHSEQR", "SV", n, &c__1, n, &
+ c_n1, (ftnlen)6, (ftnlen)2);
+ i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
+ k = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = k * (k + 2), i__2 = (*n) << (1);
+ hswork = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = max(maxwrk,hswork), i__2 = (*n) << (1);
+ maxwrk = max(i__1,i__2);
+ }
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ }
+ if ((*lwork < minwrk && ! lquery)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = PRECISION;
+ smlnum = SAFEMINIMUM;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if ((anrm > 0. && anrm < smlnum)) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ zlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/*
+ Balance the matrix
+ (CWorkspace: none)
+ (RWorkspace: need N)
+*/
+
+ ibal = 1;
+ zgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr);
+
+/*
+ Reduce to upper Hessenberg form
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: none)
+*/
+
+ itau = 1;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ zgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvl) {
+
+/*
+ Want left eigenvectors
+ Copy Householder vectors to VL
+*/
+
+ *(unsigned char *)side = 'L';
+ zlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/*
+ Generate unitary matrix in VL
+ (CWorkspace: need 2*N-1, prefer N+(N-1)*NB)
+ (RWorkspace: none)
+*/
+
+ i__1 = *lwork - iwrk + 1;
+ zunghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/*
+ Perform QR iteration, accumulating Schur vectors in VL
+ (CWorkspace: need 1, prefer HSWORK (see comments) )
+ (RWorkspace: none)
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vl[
+ vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/*
+ Want left and right eigenvectors
+ Copy Schur vectors to VR
+*/
+
+ *(unsigned char *)side = 'B';
+ zlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/*
+ Want right eigenvectors
+ Copy Householder vectors to VR
+*/
+
+ *(unsigned char *)side = 'R';
+ zlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/*
+ Generate unitary matrix in VR
+ (CWorkspace: need 2*N-1, prefer N+(N-1)*NB)
+ (RWorkspace: none)
+*/
+
+ i__1 = *lwork - iwrk + 1;
+ zunghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/*
+ Perform QR iteration, accumulating Schur vectors in VR
+ (CWorkspace: need 1, prefer HSWORK (see comments) )
+ (RWorkspace: none)
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/*
+ Compute eigenvalues only
+ (CWorkspace: need 1, prefer HSWORK (see comments) )
+ (RWorkspace: none)
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from ZHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if (wantvl || wantvr) {
+
+/*
+ Compute left and/or right eigenvectors
+ (CWorkspace: need 2*N)
+ (RWorkspace: need 2*N)
+*/
+
+ irwork = ibal + *n;
+ ztrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &rwork[irwork],
+ &ierr);
+ }
+
+ if (wantvl) {
+
+/*
+ Undo balancing of left eigenvectors
+ (CWorkspace: none)
+ (RWorkspace: need N)
+*/
+
+ zgebak_("B", "L", n, &ilo, &ihi, &rwork[ibal], n, &vl[vl_offset],
+ ldvl, &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1. / dznrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ zdscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vl_dim1;
+/* Computing 2nd power */
+ d__1 = vl[i__3].r;
+/* Computing 2nd power */
+ d__2 = d_imag(&vl[k + i__ * vl_dim1]);
+ rwork[irwork + k - 1] = d__1 * d__1 + d__2 * d__2;
+/* L10: */
+ }
+ k = idamax_(n, &rwork[irwork], &c__1);
+ d_cnjg(&z__2, &vl[k + i__ * vl_dim1]);
+ d__1 = sqrt(rwork[irwork + k - 1]);
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ tmp.r = z__1.r, tmp.i = z__1.i;
+ zscal_(n, &tmp, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = k + i__ * vl_dim1;
+ i__3 = k + i__ * vl_dim1;
+ d__1 = vl[i__3].r;
+ z__1.r = d__1, z__1.i = 0.;
+ vl[i__2].r = z__1.r, vl[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/*
+ Undo balancing of right eigenvectors
+ (CWorkspace: none)
+ (RWorkspace: need N)
+*/
+
+ zgebak_("B", "R", n, &ilo, &ihi, &rwork[ibal], n, &vr[vr_offset],
+ ldvr, &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1. / dznrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ zdscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vr_dim1;
+/* Computing 2nd power */
+ d__1 = vr[i__3].r;
+/* Computing 2nd power */
+ d__2 = d_imag(&vr[k + i__ * vr_dim1]);
+ rwork[irwork + k - 1] = d__1 * d__1 + d__2 * d__2;
+/* L30: */
+ }
+ k = idamax_(n, &rwork[irwork], &c__1);
+ d_cnjg(&z__2, &vr[k + i__ * vr_dim1]);
+ d__1 = sqrt(rwork[irwork + k - 1]);
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ tmp.r = z__1.r, tmp.i = z__1.i;
+ zscal_(n, &tmp, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = k + i__ * vr_dim1;
+ i__3 = k + i__ * vr_dim1;
+ d__1 = vr[i__3].r;
+ z__1.r = d__1, z__1.i = 0.;
+ vr[i__2].r = z__1.r, vr[i__2].i = z__1.i;
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ zlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[*info + 1]
+ , &i__2, &ierr);
+ if (*info > 0) {
+ i__1 = ilo - 1;
+ zlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[1], n,
+ &ierr);
+ }
+ }
+
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEEV */
+
+} /* zgeev_ */
+
+/* Subroutine */ int zgehd2_(integer *n, integer *ilo, integer *ihi,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__;
+ static doublecomplex alpha;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZGEHD2 reduces a complex general matrix A to upper Hessenberg form H
+ by a unitary similarity transformation: Q' * A * Q = H .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that A is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to ZGEBAL; otherwise they should be
+ set to 1 and N respectively. See Further Details.
+ 1 <= ILO <= IHI <= max(1,N).
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the n by n general matrix to be reduced.
+ On exit, the upper triangle and the first subdiagonal of A
+ are overwritten with the upper Hessenberg matrix H, and the
+ elements below the first subdiagonal, with the array TAU,
+ represent the unitary matrix Q as a product of elementary
+ reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) COMPLEX*16 array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) COMPLEX*16 array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of (ihi-ilo) elementary
+ reflectors
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
+ exit in A(i+2:ihi,i), and tau in TAU(i).
+
+ The contents of A are illustrated by the following example, with
+ n = 7, ilo = 2 and ihi = 6:
+
+ on entry, on exit,
+
+ ( a a a a a a a ) ( a a h h h h a )
+ ( a a a a a a ) ( a h h h h a )
+ ( a a a a a a ) ( h h h h h h )
+ ( a a a a a a ) ( v2 h h h h h )
+ ( a a a a a a ) ( v2 v3 h h h h )
+ ( a a a a a a ) ( v2 v3 v4 h h h )
+ ( a ) ( a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEHD2", &i__1);
+ return 0;
+ }
+
+ i__1 = *ihi - 1;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+
+/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *ihi - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[
+ i__]);
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
+
+ i__2 = *ihi - i__;
+ zlarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);
+
+/* Apply H(i)' to A(i+1:ihi,i+1:n) from the left */
+
+ i__2 = *ihi - i__;
+ i__3 = *n - i__;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &z__1,
+ &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+/* L10: */
+ }
+
+ return 0;
+
+/* End of ZGEHD2 */
+
+} /* zgehd2_ */
+
+/* Subroutine */ int zgehrd_(integer *n, integer *ilo, integer *ihi,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__;
+ static doublecomplex t[4160] /* was [65][64] */;
+ static integer ib;
+ static doublecomplex ei;
+ static integer nb, nh, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zgehd2_(integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ zlahrd_(integer *, integer *, integer *, doublecomplex *, integer
+ *, doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZGEHRD reduces a complex general matrix A to upper Hessenberg form H
+ by a unitary similarity transformation: Q' * A * Q = H .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that A is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to ZGEBAL; otherwise they should be
+ set to 1 and N respectively. See Further Details.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the N-by-N general matrix to be reduced.
+ On exit, the upper triangle and the first subdiagonal of A
+ are overwritten with the upper Hessenberg matrix H, and the
+ elements below the first subdiagonal, with the array TAU,
+ represent the unitary matrix Q as a product of elementary
+ reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) COMPLEX*16 array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to
+ zero.
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of (ihi-ilo) elementary
+ reflectors
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
+ exit in A(i+2:ihi,i), and tau in TAU(i).
+
+ The contents of A are illustrated by the following example, with
+ n = 7, ilo = 2 and ihi = 6:
+
+ on entry, on exit,
+
+ ( a a a a a a a ) ( a a h h h h a )
+ ( a a a a a a ) ( a h h h h a )
+ ( a a a a a a ) ( h h h h h h )
+ ( a a a a a a ) ( v2 h h h h h )
+ ( a a a a a a ) ( v2 v3 h h h h )
+ ( a a a a a a ) ( v2 v3 v4 h h h )
+ ( a ) ( a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = min(i__1,i__2);
+ lwkopt = *n * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if ((*lwork < max(1,*n) && ! lquery)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEHRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0., tau[i__2].i = 0.;
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = max(1,*ihi); i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0., tau[i__2].i = 0.;
+/* L20: */
+ }
+
+/* Quick return if possible */
+
+ nh = *ihi - *ilo + 1;
+ if (nh <= 1) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ iws = 1;
+ if ((nb > 1 && nb < nh)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code
+ (last block is always handled by unblocked code).
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "ZGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < nh) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ iws = *n * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: determine the
+ minimum value of NB, and reduce NB or force use of
+ unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGEHRD", " ", n, ilo, ihi, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ if (*lwork >= *n * nbmin) {
+ nb = *lwork / *n;
+ } else {
+ nb = 1;
+ }
+ }
+ }
+ }
+ ldwork = *n;
+
+ if (nb < nbmin || nb >= nh) {
+
+/* Use unblocked code below */
+
+ i__ = *ilo;
+
+ } else {
+
+/* Use blocked code */
+
+ i__1 = *ihi - 1 - nx;
+ i__2 = nb;
+ for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *ihi - i__;
+ ib = min(i__3,i__4);
+
+/*
+ Reduce columns i:i+ib-1 to Hessenberg form, returning the
+ matrices V and T of the block reflector H = I - V*T*V'
+ which performs the reduction, and also the matrix Y = A*V*T
+*/
+
+ zlahrd_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, &
+ c__65, &work[1], &ldwork);
+
+/*
+ Apply the block reflector H to A(1:ihi,i+ib:ihi) from the
+ right, computing A := A - Y * V'. V(i+ib,ib-1) must be set
+ to 1.
+*/
+
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ ei.r = a[i__3].r, ei.i = a[i__3].i;
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ a[i__3].r = 1., a[i__3].i = 0.;
+ i__3 = *ihi - i__ - ib + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", ihi, &i__3, &ib, &
+ z__1, &work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda,
+ &c_b60, &a[(i__ + ib) * a_dim1 + 1], lda);
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ a[i__3].r = ei.r, a[i__3].i = ei.i;
+
+/*
+ Apply the block reflector H to A(i+1:ihi,i+ib:n) from the
+ left
+*/
+
+ i__3 = *ihi - i__;
+ i__4 = *n - i__ - ib + 1;
+ zlarfb_("Left", "Conjugate transpose", "Forward", "Columnwise", &
+ i__3, &i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &
+ c__65, &a[i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &
+ ldwork);
+/* L30: */
+ }
+ }
+
+/* Use unblocked code to reduce the rest of the matrix */
+
+ zgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGEHRD */
+
+} /* zgehrd_ */
+
+/* Subroutine */ int zgelq2_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, k;
+ static doublecomplex alpha;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), zlacgv_(integer *, doublecomplex *,
+ integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZGELQ2 computes an LQ factorization of a complex m by n matrix A:
+ A = L * Q.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, the elements on and below the diagonal of the array
+ contain the m by min(m,n) lower trapezoidal matrix L (L is
+ lower triangular if m <= n); the elements above the diagonal,
+ with the array TAU, represent the unitary matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) COMPLEX*16 array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) COMPLEX*16 array, dimension (M)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(k)' . . . H(2)' H(1)', where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in
+ A(i,i+1:n), and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGELQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &tau[i__]
+ );
+ if (i__ < *m) {
+
+/* Apply H(i) to A(i+1:m,i:n) from the right */
+
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ zlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[
+ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+/* L10: */
+ }
+ return 0;
+
+/* End of ZGELQ2 */
+
+} /* zgelq2_ */
+
+/* Subroutine */ int zgelqf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zgelq2_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZGELQF computes an LQ factorization of a complex M-by-N matrix A:
+ A = L * Q.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and below the diagonal of the array
+ contain the m-by-min(m,n) lower trapezoidal matrix L (L is
+ lower triangular if m <= n); the elements above the diagonal,
+ with the array TAU, represent the unitary matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) COMPLEX*16 array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,M).
+ For optimum performance LWORK >= M*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(k)' . . . H(2)' H(1)', where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in
+ A(i,i+1:n), and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "ZGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *m * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if ((*lwork < max(1,*m) && ! lquery)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGELQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if ((nb > 1 && nb < k)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZGELQF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGELQF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (((nb >= nbmin && nb < k) && nx < k)) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/*
+ Compute the LQ factorization of the current block
+ A(i:i+ib-1,i:n)
+*/
+
+ i__3 = *n - i__ + 1;
+ zgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *m) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__3 = *n - i__ + 1;
+ zlarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i+ib:m,i:n) from the right */
+
+ i__3 = *m - i__ - ib + 1;
+ i__4 = *n - i__ + 1;
+ zlarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3,
+ &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ zgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+ , &iinfo);
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZGELQF */
+
+} /* zgelqf_ */
+
+/* Subroutine */ int zgelsd_(integer *m, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublereal *s, doublereal *rcond, integer *rank, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer ie, il, mm;
+ static doublereal eps, anrm, bnrm;
+ static integer itau, iascl, ibscl;
+ static doublereal sfmin;
+ static integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlaset_(char *, integer *, integer
+ *, doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *), zgebrd_(integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ static doublereal bignum;
+ extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+ ), zlalsd_(char *, integer *, integer *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublereal *, integer *,
+ doublecomplex *, doublereal *, integer *, integer *),
+ zlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublecomplex *, integer *, integer *), zgeqrf_(integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *);
+ static integer minwrk, maxwrk;
+ static doublereal smlnum;
+ extern /* Subroutine */ int zunmbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+ );
+ static logical lquery;
+ static integer nrwork, smlsiz;
+ extern /* Subroutine */ int zunmlq_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ ZGELSD computes the minimum-norm solution to a real linear least
+ squares problem:
+ minimize 2-norm(| b - A*x |)
+ using the singular value decomposition (SVD) of A. A is an M-by-N
+ matrix which may be rank-deficient.
+
+ Several right hand side vectors b and solution vectors x can be
+ handled in a single call; they are stored as the columns of the
+ M-by-NRHS right hand side matrix B and the N-by-NRHS solution
+ matrix X.
+
+ The problem is solved in three steps:
+ (1) Reduce the coefficient matrix A to bidiagonal form with
+ Householder tranformations, reducing the original problem
+ into a "bidiagonal least squares problem" (BLS)
+ (2) Solve the BLS using a divide and conquer approach.
+ (3) Apply back all the Householder tranformations to solve
+ the original least squares problem.
+
+ The effective rank of A is determined by treating as zero those
+ singular values which are less than RCOND times the largest singular
+ value.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrices B and X. NRHS >= 0.
+
+ A (input) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, A has been destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (input/output) COMPLEX*16 array, dimension (LDB,NRHS)
+ On entry, the M-by-NRHS right hand side matrix B.
+ On exit, B is overwritten by the N-by-NRHS solution matrix X.
+ If m >= n and RANK = n, the residual sum-of-squares for
+ the solution in the i-th column is given by the sum of
+ squares of elements n+1:m in that column.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,M,N).
+
+ S (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The singular values of A in decreasing order.
+ The condition number of A in the 2-norm = S(1)/S(min(m,n)).
+
+ RCOND (input) DOUBLE PRECISION
+ RCOND is used to determine the effective rank of A.
+ Singular values S(i) <= RCOND*S(1) are treated as zero.
+ If RCOND < 0, machine precision is used instead.
+
+ RANK (output) INTEGER
+ The effective rank of A, i.e., the number of singular values
+ which are greater than RCOND*S(1).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK must be at least 1.
+ The exact minimum amount of workspace needed depends on M,
+ N and NRHS. As long as LWORK is at least
+ 2 * N + N * NRHS
+ if M is greater than or equal to N or
+ 2 * M + M * NRHS
+ if M is less than N, the code will execute correctly.
+ For good performance, LWORK should generally be larger.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ RWORK (workspace) DOUBLE PRECISION array, dimension at least
+ 10*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS +
+ (SMLSIZ+1)**2
+ if M is greater than or equal to N or
+ 10*M + 2*M*SMLSIZ + 8*M*NLVL + 3*SMLSIZ*NRHS +
+ (SMLSIZ+1)**2
+ if M is less than N, the code will execute correctly.
+ SMLSIZ is returned by ILAENV and is equal to the maximum
+ size of the subproblems at the bottom of the computation
+ tree (usually about 25), and
+ NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
+
+ IWORK (workspace) INTEGER array, dimension (LIWORK)
+ LIWORK >= 3 * MINMN * NLVL + 11 * MINMN,
+ where MINMN = MIN( M,N ).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: the algorithm for computing the SVD failed to converge;
+ if INFO = i, i off-diagonal elements of an intermediate
+ bidiagonal form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input arguments.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ --s;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ mnthr = ilaenv_(&c__6, "ZGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+ smlsiz = ilaenv_(&c__9, "ZGELSD", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ Compute workspace.
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.)
+*/
+
+ minwrk = 1;
+ if (*info == 0) {
+ maxwrk = 0;
+ mm = *m;
+ if ((*m >= *n && *m >= mnthr)) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *nrhs * ilaenv_(&c__1, "ZUNMQR", "LC", m,
+ nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/*
+ Path 1 - overdetermined or exactly determined.
+
+ Computing MAX
+*/
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + (mm + *n) * ilaenv_(&c__1,
+ "ZGEBRD", " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1)
+ ;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *nrhs * ilaenv_(&c__1,
+ "ZUNMBR", "QLC", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)
+ 3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + (*n - 1) * ilaenv_(&c__1,
+ "ZUNMBR", "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ((*n) << (1)) + mm, i__2 = ((*n) << (1)) + *n * *nrhs;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+ if (*n >= mnthr) {
+
+/*
+ Path 2a - underdetermined, with many more columns
+ than rows.
+*/
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &c_n1,
+ &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + ((*m) << (1))
+ * ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *nrhs *
+ ilaenv_(&c__1, "ZUNMBR", "QLC", m, nrhs, m, &c_n1, (
+ ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + (*m - 1) *
+ ilaenv_(&c__1, "ZUNMLQ", "LC", n, nrhs, m, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (1));
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - underdetermined. */
+
+ maxwrk = ((*m) << (1)) + (*n + *m) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *nrhs * ilaenv_(&c__1,
+ "ZUNMBR", "QLC", m, nrhs, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = ((*m) << (1)) + *n, i__2 = ((*m) << (1)) + *m * *nrhs;
+ minwrk = max(i__1,i__2);
+ }
+ minwrk = min(minwrk,maxwrk);
+ d__1 = (doublereal) maxwrk;
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ if ((*lwork < minwrk && ! lquery)) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ goto L10;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters. */
+
+ eps = PRECISION;
+ sfmin = SAFEMINIMUM;
+ smlnum = sfmin / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+
+ anrm = zlange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if ((anrm > 0. && anrm < smlnum)) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ zlaset_("F", &i__1, nrhs, &c_b59, &c_b59, &b[b_offset], ldb);
+ dlaset_("F", &minmn, &c__1, &c_b324, &c_b324, &s[1], &c__1)
+ ;
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = zlange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if ((bnrm > 0. && bnrm < smlnum)) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure B(M+1:N,:) = 0 */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ zlaset_("F", &i__1, nrhs, &c_b59, &c_b59, &b[*m + 1 + b_dim1], ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R.
+ (RWorkspace: need N)
+ (CWorkspace: need N, prefer N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/*
+ Multiply B by transpose(Q).
+ (RWorkspace: need N)
+ (CWorkspace: need NRHS, prefer NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zunmqr_("L", "C", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ zlaset_("L", &i__1, &i__2, &c_b59, &c_b59, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+ ie = 1;
+ nrwork = ie + *n;
+
+/*
+ Bidiagonalize R in A.
+ (RWorkspace: need N)
+ (CWorkspace: need 2*N+MM, prefer 2*N+(MM+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(&mm, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of R.
+ (CWorkspace: need 2*N+NRHS, prefer 2*N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "C", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ zlalsd_("U", &smlsiz, n, nrhs, &s[1], &rwork[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = ((*m) << (1)) - 4, i__1 = max(i__1,i__2), i__1 =
+ max(i__1,*nrhs), i__2 = *n - *m * 3;
+ if ((*n >= mnthr && *lwork >= ((*m) << (2)) + *m * *m + max(i__1,i__2)
+ )) {
+
+/*
+ Path 2a - underdetermined, with many more columns than rows
+ and sufficient workspace for an efficient algorithm.
+*/
+
+ ldwork = *m;
+/*
+ Computing MAX
+ Computing MAX
+*/
+ i__3 = *m, i__4 = ((*m) << (1)) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = ((*m) << (2)) + *m * *lda + max(i__3,i__4), i__2 = *m * *
+ lda + *m + *m * *nrhs;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/*
+ Compute A=L*Q.
+ (CWorkspace: need 2*M, prefer M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ zlaset_("U", &i__1, &i__2, &c_b59, &c_b59, &work[il + ldwork], &
+ ldwork);
+ itauq = il + ldwork * *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+ ie = 1;
+ nrwork = ie + *m;
+
+/*
+ Bidiagonalize L in WORK(IL).
+ (RWorkspace: need M)
+ (CWorkspace: need M*M+4*M, prefer M*M+4*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, m, &work[il], &ldwork, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of L.
+ (CWorkspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "C", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ zlalsd_("U", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1],
+ info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ zlaset_("F", &i__1, nrhs, &c_b59, &c_b59, &b[*m + 1 + b_dim1],
+ ldb);
+ nwork = itau + *m;
+
+/*
+ Multiply transpose(Q) by B.
+ (CWorkspace: need NRHS, prefer NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zunmlq_("L", "C", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+ ie = 1;
+ nrwork = ie + *m;
+
+/*
+ Bidiagonalize A.
+ (RWorkspace: need M)
+ (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors.
+ (CWorkspace: need 2*M+NRHS, prefer 2*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "C", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ zlalsd_("L", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1],
+ info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ zlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ zlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ d__1 = (doublereal) maxwrk;
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ return 0;
+
+/* End of ZGELSD */
+
+} /* zgelsd_ */
+
+/* Subroutine */ int zgeqr2_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, k;
+ static doublecomplex alpha;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZGEQR2 computes a QR factorization of a complex m by n matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(m,n) by n upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the unitary matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) COMPLEX*16 array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) COMPLEX*16 array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEQR2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1]
+ , &c__1, &tau[i__]);
+ if (i__ < *n) {
+
+/* Apply H(i)' to A(i:m,i+1:n) from the left */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &z__1,
+ &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of ZGEQR2 */
+
+} /* zgeqr2_ */
+
+/* Subroutine */ int zgeqrf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zgeqr2_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZGEQRF computes a QR factorization of a complex M-by-N matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(M,N)-by-N upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the unitary matrix Q as a
+ product of min(m,n) elementary reflectors (see Further
+ Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) COMPLEX*16 array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "ZGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *n * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if ((*lwork < max(1,*n) && ! lquery)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if ((nb > 1 && nb < k)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZGEQRF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGEQRF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (((nb >= nbmin && nb < k) && nx < k)) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/*
+ Compute the QR factorization of the current block
+ A(i:m,i:i+ib-1)
+*/
+
+ i__3 = *m - i__ + 1;
+ zgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *n) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__3 = *m - i__ + 1;
+ zlarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i:m,i+ib:n) from the left */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ - ib + 1;
+ zlarfb_("Left", "Conjugate transpose", "Forward", "Columnwise"
+ , &i__3, &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &
+ work[1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda,
+ &work[ib + 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ zgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+ , &iinfo);
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEQRF */
+
+} /* zgeqrf_ */
+
+/* Subroutine */ int zgesdd_(char *jobz, integer *m, integer *n,
+ doublecomplex *a, integer *lda, doublereal *s, doublecomplex *u,
+ integer *ldu, doublecomplex *vt, integer *ldvt, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, ie, il, ir, iu, blk;
+ static doublereal dum[1], eps;
+ static integer iru, ivt, iscl;
+ static doublereal anrm;
+ static integer idum[1], ierr, itau, irvt;
+ extern logical lsame_(char *, char *);
+ static integer chunk, minmn;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ static integer wrkbl, itaup, itauq;
+ static logical wntqa;
+ static integer nwork;
+ static logical wntqn, wntqo, wntqs;
+ extern /* Subroutine */ int zlacp2_(char *, integer *, integer *,
+ doublereal *, integer *, doublecomplex *, integer *);
+ static integer mnthr1, mnthr2;
+ extern /* Subroutine */ int dbdsdc_(char *, char *, integer *, doublereal
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), xerbla_(char *, integer *),
+ zgebrd_(integer *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static doublereal bignum;
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+ ), zlacrm_(integer *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *, doublecomplex *, integer *, doublereal *)
+ , zlarcm_(integer *, integer *, doublereal *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *), zlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublecomplex *, integer *,
+ integer *), zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+ );
+ static integer ldwrkl;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *);
+ static integer ldwrkr, minwrk, ldwrku, maxwrk;
+ extern /* Subroutine */ int zungbr_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+ static integer ldwkvt;
+ static doublereal smlnum;
+ static logical wntqas;
+ extern /* Subroutine */ int zunmbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+ ), zunglq_(integer *, integer *, integer *
+ , doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+ static logical lquery;
+ static integer nrwork;
+ extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ ZGESDD computes the singular value decomposition (SVD) of a complex
+ M-by-N matrix A, optionally computing the left and/or right singular
+ vectors, by using divide-and-conquer method. The SVD is written
+
+ A = U * SIGMA * conjugate-transpose(V)
+
+ where SIGMA is an M-by-N matrix which is zero except for its
+ min(m,n) diagonal elements, U is an M-by-M unitary matrix, and
+ V is an N-by-N unitary matrix. The diagonal elements of SIGMA
+ are the singular values of A; they are real and non-negative, and
+ are returned in descending order. The first min(m,n) columns of
+ U and V are the left and right singular vectors of A.
+
+ Note that the routine returns VT = V**H, not V.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ JOBZ (input) CHARACTER*1
+ Specifies options for computing all or part of the matrix U:
+ = 'A': all M columns of U and all N rows of V**H are
+ returned in the arrays U and VT;
+ = 'S': the first min(M,N) columns of U and the first
+ min(M,N) rows of V**H are returned in the arrays U
+ and VT;
+ = 'O': If M >= N, the first N columns of U are overwritten
+ on the array A and all rows of V**H are returned in
+ the array VT;
+ otherwise, all columns of U are returned in the
+ array U and the first M rows of V**H are overwritten
+ in the array VT;
+ = 'N': no columns of U or rows of V**H are computed.
+
+ M (input) INTEGER
+ The number of rows of the input matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the input matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit,
+ if JOBZ = 'O', A is overwritten with the first N columns
+ of U (the left singular vectors, stored
+ columnwise) if M >= N;
+ A is overwritten with the first M rows
+ of V**H (the right singular vectors, stored
+ rowwise) otherwise.
+ if JOBZ .ne. 'O', the contents of A are destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ S (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The singular values of A, sorted so that S(i) >= S(i+1).
+
+ U (output) COMPLEX*16 array, dimension (LDU,UCOL)
+ UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N;
+ UCOL = min(M,N) if JOBZ = 'S'.
+ If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M
+ unitary matrix U;
+ if JOBZ = 'S', U contains the first min(M,N) columns of U
+ (the left singular vectors, stored columnwise);
+ if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= 1; if
+ JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M.
+
+ VT (output) COMPLEX*16 array, dimension (LDVT,N)
+ If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the
+ N-by-N unitary matrix V**H;
+ if JOBZ = 'S', VT contains the first min(M,N) rows of
+ V**H (the right singular vectors, stored rowwise);
+ if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= 1; if
+ JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N;
+ if JOBZ = 'S', LDVT >= min(M,N).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= 1.
+ if JOBZ = 'N', LWORK >= 2*min(M,N)+max(M,N).
+ if JOBZ = 'O',
+ LWORK >= 2*min(M,N)*min(M,N)+2*min(M,N)+max(M,N).
+ if JOBZ = 'S' or 'A',
+ LWORK >= min(M,N)*min(M,N)+2*min(M,N)+max(M,N).
+ For good performance, LWORK should generally be larger.
+ If LWORK < 0 but other input arguments are legal, WORK(1)
+ returns the optimal LWORK.
+
+ RWORK (workspace) DOUBLE PRECISION array, dimension (LRWORK)
+ If JOBZ = 'N', LRWORK >= 7*min(M,N).
+ Otherwise, LRWORK >= 5*min(M,N)*min(M,N) + 5*min(M,N)
+
+ IWORK (workspace) INTEGER array, dimension (8*min(M,N))
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The updating process of DBDSDC did not converge.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ mnthr1 = (integer) (minmn * 17. / 9.);
+ mnthr2 = (integer) (minmn * 5. / 3.);
+ wntqa = lsame_(jobz, "A");
+ wntqs = lsame_(jobz, "S");
+ wntqas = wntqa || wntqs;
+ wntqo = lsame_(jobz, "O");
+ wntqn = lsame_(jobz, "N");
+ minwrk = 1;
+ maxwrk = 1;
+ lquery = *lwork == -1;
+
+ if (! (wntqa || wntqs || wntqo || wntqn)) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldu < 1 || (wntqas && *ldu < *m) || ((wntqo && *m < *n) && *
+ ldu < *m)) {
+ *info = -8;
+ } else if (*ldvt < 1 || (wntqa && *ldvt < *n) || (wntqs && *ldvt < minmn)
+ || ((wntqo && *m >= *n) && *ldvt < *n)) {
+ *info = -10;
+ }
+
+/*
+ Compute workspace
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ CWorkspace refers to complex workspace, and RWorkspace to
+ real workspace. NB refers to the optimal block size for the
+ immediately following subroutine, as returned by ILAENV.)
+*/
+
+ if (((*info == 0 && *m > 0) && *n > 0)) {
+ if (*m >= *n) {
+
+/*
+ There is no complex work space needed for bidiagonal SVD
+ The real work space needed for bidiagonal SVD is BDSPAC,
+ BDSPAC = 3*N*N + 4*N
+*/
+
+ if (*m >= mnthr1) {
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) *
+ ilaenv_(&c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl;
+ minwrk = *n * 3;
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) *
+ ilaenv_(&c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *n + *n * *n + wrkbl;
+ minwrk = ((*n) << (1)) * *n + *n * 3;
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) *
+ ilaenv_(&c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = *n * *n + *n * 3;
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, m, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) *
+ ilaenv_(&c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = *n * *n + ((*n) << (1)) + *m;
+ }
+ } else if (*m >= mnthr2) {
+
+/* Path 5 (M much larger than N, but not as much as MNTHR1) */
+
+ maxwrk = ((*n) << (1)) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ minwrk = ((*n) << (1)) + *m;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, n, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *n * *n;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, n, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+
+/* Path 6 (M at least N, but not much larger) */
+
+ maxwrk = ((*n) << (1)) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ minwrk = ((*n) << (1)) + *m;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *n * *n;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "QLN", m, m, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ } else {
+
+/*
+ There is no complex work space needed for bidiagonal SVD
+ The real work space needed for bidiagonal SVD is BDSPAC,
+ BDSPAC = 3*M*M + 4*M
+*/
+
+ if (*n >= mnthr1) {
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + ((*m) << (1)) *
+ ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3;
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "ZUNGLQ",
+ " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + ((*m) << (1)) *
+ ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *n + *m * *m + wrkbl;
+ minwrk = ((*m) << (1)) * *m + *m * 3;
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "ZUNGLQ",
+ " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + ((*m) << (1)) *
+ ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = *m * *m + *m * 3;
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "ZUNGLQ",
+ " ", n, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + ((*m) << (1)) *
+ ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = *m * *m + ((*m) << (1)) + *n;
+ }
+ } else if (*n >= mnthr2) {
+
+/* Path 5t (N much larger than M, but not as much as MNTHR1) */
+
+ maxwrk = ((*m) << (1)) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ minwrk = ((*m) << (1)) + *n;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, n, m, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *m * *m;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, n, m, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, m, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+
+/* Path 6t (N greater than M, but not much larger) */
+
+ maxwrk = ((*m) << (1)) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ minwrk = ((*m) << (1)) + *n;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", m, n, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, m, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *m * *m;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "PRC", m, n, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "QLN", m, m, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "PRC", n, n, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "QLN", m, m, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ }
+ maxwrk = max(maxwrk,minwrk);
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ }
+
+ if ((*lwork < minwrk && ! lquery)) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGESDD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ if (*lwork >= 1) {
+ work[1].r = 1., work[1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = PRECISION;
+ smlnum = sqrt(SAFEMINIMUM) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if ((anrm > 0. && anrm < smlnum)) {
+ iscl = 1;
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/*
+ A has at least as many rows as columns. If A has sufficiently
+ more rows than columns, first reduce using the QR
+ decomposition (if sufficient workspace available)
+*/
+
+ if (*m >= mnthr1) {
+
+ if (wntqn) {
+
+/*
+ Path 1 (M much larger than N, JOBZ='N')
+ No singular vectors to be computed
+*/
+
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: need 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out below R */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ zlaset_("L", &i__1, &i__2, &c_b59, &c_b59, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A
+ (CWorkspace: need 3*N, prefer 2*N+2*N*NB)
+ (RWorkspace: need N)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nrwork = ie + *n;
+
+/*
+ Perform bidiagonal SVD, compute singular values only
+ (CWorkspace: 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ dbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/*
+ Path 2 (M much larger than N, JOBZ='O')
+ N left singular vectors to be overwritten on A and
+ N right singular vectors to be computed in VT
+*/
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ if (*lwork >= *m * *n + *n * *n + *n * 3) {
+
+/* WORK(IR) is M by N */
+
+ ldwrkr = *m;
+ } else {
+ ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
+ }
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (CWorkspace: need N*N+2*N, prefer M*N+N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy R to WORK( IR ), zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ zlaset_("L", &i__1, &i__2, &c_b59, &c_b59, &work[ir + 1], &
+ ldwrkr);
+
+/*
+ Generate Q in A
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in WORK(IR)
+ (CWorkspace: need N*N+3*N, prefer M*N+2*N+2*N*NB)
+ (RWorkspace: need N)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of R in WORK(IRU) and computing right singular vectors
+ of R in WORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix WORK(IU)
+ Overwrite WORK(IU) by the left singular vectors of R
+ (CWorkspace: need 2*N*N+3*N, prefer M*N+N*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by the right singular vectors of R
+ (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Multiply Q in A by left singular vectors of R in
+ WORK(IU), storing result in WORK(IR) and copying to A
+ (CWorkspace: need 2*N*N, prefer N*N+M*N)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *m;
+ i__2 = ldwrkr;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ zgemm_("N", "N", &chunk, n, n, &c_b60, &a[i__ + a_dim1],
+ lda, &work[iu], &ldwrku, &c_b59, &work[ir], &
+ ldwrkr);
+ zlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Path 3 (M much larger than N, JOBZ='S')
+ N left singular vectors to be computed in U and
+ N right singular vectors to be computed in VT
+*/
+
+ ir = 1;
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ zlaset_("L", &i__2, &i__1, &c_b59, &c_b59, &work[ir + 1], &
+ ldwrkr);
+
+/*
+ Generate Q in A
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in WORK(IR)
+ (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB)
+ (RWorkspace: need N)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of R
+ (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of R
+ (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply Q in A by left singular vectors of R in
+ WORK(IR), storing result in U
+ (CWorkspace: need N*N)
+ (RWorkspace: 0)
+*/
+
+ zlacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
+ zgemm_("N", "N", m, n, n, &c_b60, &a[a_offset], lda, &work[ir]
+ , &ldwrkr, &c_b59, &u[u_offset], ldu);
+
+ } else if (wntqa) {
+
+/*
+ Path 4 (M much larger than N, JOBZ='A')
+ M left singular vectors to be computed in U and
+ N right singular vectors to be computed in VT
+*/
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ itau = iu + ldwrku * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R, copying result to U
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+/*
+ Generate Q in U
+ (CWorkspace: need N+M, prefer N+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
+ &i__2, &ierr);
+
+/* Produce R in A, zeroing out below it */
+
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ zlaset_("L", &i__2, &i__1, &c_b59, &c_b59, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A
+ (CWorkspace: need 3*N, prefer 2*N+2*N*NB)
+ (RWorkspace: need N)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix WORK(IU)
+ Overwrite WORK(IU) by left singular vectors of R
+ (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of R
+ (CWorkspace: need 3*N, prefer 2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply Q in U by left singular vectors of R in
+ WORK(IU), storing result in A
+ (CWorkspace: need N*N)
+ (RWorkspace: 0)
+*/
+
+ zgemm_("N", "N", m, n, n, &c_b60, &u[u_offset], ldu, &work[iu]
+ , &ldwrku, &c_b59, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+ }
+
+ } else if (*m >= mnthr2) {
+
+/*
+ MNTHR2 <= M < MNTHR1
+
+ Path 5 (M much larger than N, but not as much as MNTHR1)
+ Reduce to bidiagonal form without QR decomposition, use
+ ZUNGBR and matrix multiplication to compute singular vectors
+*/
+
+ ie = 1;
+ nrwork = ie + *n;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize A
+ (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB)
+ (RWorkspace: need N)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/*
+ Compute singular values only
+ (Cworkspace: 0)
+ (Rworkspace: need BDSPAC)
+*/
+
+ dbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+
+/*
+ Copy A to VT, generate P**H
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: 0)
+*/
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Generate Q in A
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *n * 3) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ } else {
+
+/* WORK(IU) is LDWRKU by N */
+
+ ldwrku = (*lwork - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply real matrix RWORK(IRVT) by P**H in VT,
+ storing the result in WORK(IU), copying to VT
+ (Cworkspace: need 0)
+ (Rworkspace: need 3*N*N)
+*/
+
+ zlarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &work[iu]
+ , &ldwrku, &rwork[nrwork]);
+ zlacpy_("F", n, n, &work[iu], &ldwrku, &vt[vt_offset], ldvt);
+
+/*
+ Multiply Q in A by real matrix RWORK(IRU), storing the
+ result in WORK(IU), copying to A
+ (CWorkspace: need N*N, prefer M*N)
+ (Rworkspace: need 3*N*N, prefer N*N+2*M*N)
+*/
+
+ nrwork = irvt;
+ i__2 = *m;
+ i__1 = ldwrku;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrku);
+ zlacrm_(&chunk, n, &a[i__ + a_dim1], lda, &rwork[iru], n,
+ &work[iu], &ldwrku, &rwork[nrwork]);
+ zlacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Copy A to VT, generate P**H
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: 0)
+*/
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__1, &ierr);
+
+/*
+ Copy A to U, generate Q
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: 0)
+*/
+
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zungbr_("Q", m, n, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply real matrix RWORK(IRVT) by P**H in VT,
+ storing the result in A, copying to VT
+ (Cworkspace: need 0)
+ (Rworkspace: need 3*N*N)
+*/
+
+ zlarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ zlacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/*
+ Multiply Q in U by real matrix RWORK(IRU), storing the
+ result in A, copying to U
+ (CWorkspace: need 0)
+ (Rworkspace: need N*N+2*M*N)
+*/
+
+ nrwork = irvt;
+ zlacrm_(m, n, &u[u_offset], ldu, &rwork[iru], n, &a[a_offset],
+ lda, &rwork[nrwork]);
+ zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ } else {
+
+/*
+ Copy A to VT, generate P**H
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: 0)
+*/
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__1, &ierr);
+
+/*
+ Copy A to U, generate Q
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: 0)
+*/
+
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply real matrix RWORK(IRVT) by P**H in VT,
+ storing the result in A, copying to VT
+ (Cworkspace: need 0)
+ (Rworkspace: need 3*N*N)
+*/
+
+ zlarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ zlacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/*
+ Multiply Q in U by real matrix RWORK(IRU), storing the
+ result in A, copying to U
+ (CWorkspace: 0)
+ (Rworkspace: need 3*N*N)
+*/
+
+ nrwork = irvt;
+ zlacrm_(m, n, &u[u_offset], ldu, &rwork[iru], n, &a[a_offset],
+ lda, &rwork[nrwork]);
+ zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ }
+
+ } else {
+
+/*
+ M .LT. MNTHR2
+
+ Path 6 (M at least N, but not much larger)
+ Reduce to bidiagonal form without QR decomposition
+ Use ZUNMBR to compute singular vectors
+*/
+
+ ie = 1;
+ nrwork = ie + *n;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize A
+ (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB)
+ (RWorkspace: need N)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, &ierr);
+ if (wntqn) {
+
+/*
+ Compute singular values only
+ (Cworkspace: 0)
+ (Rworkspace: need BDSPAC)
+*/
+
+ dbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ if (*lwork >= *m * *n + *n * 3) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ } else {
+
+/* WORK( IU ) is LDWRKU by N */
+
+ ldwrku = (*lwork - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of A
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: need 0)
+*/
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+ if (*lwork >= *m * *n + *n * 3) {
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix WORK(IU)
+ Overwrite WORK(IU) by left singular vectors of A, copying
+ to A
+ (Cworkspace: need M*N+2*N, prefer M*N+N+N*NB)
+ (Rworkspace: need 0)
+*/
+
+ zlaset_("F", m, n, &c_b59, &c_b59, &work[iu], &ldwrku);
+ zlacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__1, &
+ ierr);
+ zlacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
+ } else {
+
+/*
+ Generate Q in A
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: need 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[nwork], &i__1, &ierr);
+
+/*
+ Multiply Q in A by real matrix RWORK(IRU), storing the
+ result in WORK(IU), copying to A
+ (CWorkspace: need N*N, prefer M*N)
+ (Rworkspace: need 3*N*N, prefer N*N+2*M*N)
+*/
+
+ nrwork = irvt;
+ i__1 = *m;
+ i__2 = ldwrku;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrku);
+ zlacrm_(&chunk, n, &a[i__ + a_dim1], lda, &rwork[iru],
+ n, &work[iu], &ldwrku, &rwork[nrwork]);
+ zlacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L30: */
+ }
+ }
+
+ } else if (wntqs) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of A
+ (CWorkspace: need 3*N, prefer 2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlaset_("F", m, n, &c_b59, &c_b59, &u[u_offset], ldu);
+ zlacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of A
+ (CWorkspace: need 3*N, prefer 2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+ } else {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Set the right corner of U to identity matrix */
+
+ zlaset_("F", m, m, &c_b59, &c_b59, &u[u_offset], ldu);
+ i__2 = *m - *n;
+ i__1 = *m - *n;
+ zlaset_("F", &i__2, &i__1, &c_b59, &c_b60, &u[*n + 1 + (*n +
+ 1) * u_dim1], ldu);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of A
+ (CWorkspace: need 2*N+M, prefer 2*N+M*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of A
+ (CWorkspace: need 3*N, prefer 2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+ }
+
+ }
+
+ } else {
+
+/*
+ A has more columns than rows. If A has sufficiently more
+ columns than rows, first reduce using the LQ decomposition
+ (if sufficient workspace available)
+*/
+
+ if (*n >= mnthr1) {
+
+ if (wntqn) {
+
+/*
+ Path 1t (N much larger than M, JOBZ='N')
+ No singular vectors to be computed
+*/
+
+ itau = 1;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (CWorkspace: need 2*M, prefer M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Zero out above L */
+
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ zlaset_("U", &i__2, &i__1, &c_b59, &c_b59, &a[((a_dim1) << (1)
+ ) + 1], lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in A
+ (CWorkspace: need 3*M, prefer 2*M+2*M*NB)
+ (RWorkspace: need M)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+ nrwork = ie + *m;
+
+/*
+ Perform bidiagonal SVD, compute singular values only
+ (CWorkspace: 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ dbdsdc_("U", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/*
+ Path 2t (N much larger than M, JOBZ='O')
+ M right singular vectors to be overwritten on A and
+ M left singular vectors to be computed in U
+*/
+
+ ivt = 1;
+ ldwkvt = *m;
+
+/* WORK(IVT) is M by M */
+
+ il = ivt + ldwkvt * *m;
+ if (*lwork >= *m * *n + *m * *m + *m * 3) {
+
+/* WORK(IL) M by N */
+
+ ldwrkl = *m;
+ chunk = *n;
+ } else {
+
+/* WORK(IL) is M by CHUNK */
+
+ ldwrkl = *m;
+ chunk = (*lwork - *m * *m - *m * 3) / *m;
+ }
+ itau = il + ldwrkl * chunk;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (CWorkspace: need 2*M, prefer M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy L to WORK(IL), zeroing about above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ zlaset_("U", &i__2, &i__1, &c_b59, &c_b59, &work[il + ldwrkl],
+ &ldwrkl);
+
+/*
+ Generate Q in A
+ (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL)
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB)
+ (RWorkspace: need M)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(m, m, &work[il], &ldwrkl, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ dbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix WORK(IU)
+ Overwrite WORK(IU) by the left singular vectors of L
+ (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT)
+ Overwrite WORK(IVT) by the right singular vectors of L
+ (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IL) by Q
+ in A, storing result in WORK(IL) and copying to A
+ (CWorkspace: need 2*M*M, prefer M*M+M*N))
+ (RWorkspace: 0)
+*/
+
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ zgemm_("N", "N", m, &blk, m, &c_b60, &work[ivt], m, &a[
+ i__ * a_dim1 + 1], lda, &c_b59, &work[il], &
+ ldwrkl);
+ zlacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
+ + 1], lda);
+/* L40: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Path 3t (N much larger than M, JOBZ='S')
+ M right singular vectors to be computed in VT and
+ M left singular vectors to be computed in U
+*/
+
+ il = 1;
+
+/* WORK(IL) is M by M */
+
+ ldwrkl = *m;
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (CWorkspace: need 2*M, prefer M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ zlaset_("U", &i__1, &i__2, &c_b59, &c_b59, &work[il + ldwrkl],
+ &ldwrkl);
+
+/*
+ Generate Q in A
+ (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL)
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB)
+ (RWorkspace: need M)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, m, &work[il], &ldwrkl, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ dbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of L
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by left singular vectors of L
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Copy VT to WORK(IL), multiply right singular vectors of L
+ in WORK(IL) by Q in A, storing result in VT
+ (CWorkspace: need M*M)
+ (RWorkspace: 0)
+*/
+
+ zlacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
+ zgemm_("N", "N", m, n, m, &c_b60, &work[il], &ldwrkl, &a[
+ a_offset], lda, &c_b59, &vt[vt_offset], ldvt);
+
+ } else if (wntqa) {
+
+/*
+ Path 9t (N much larger than M, JOBZ='A')
+ N right singular vectors to be computed in VT and
+ M left singular vectors to be computed in U
+*/
+
+ ivt = 1;
+
+/* WORK(IVT) is M by M */
+
+ ldwkvt = *m;
+ itau = ivt + ldwkvt * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q, copying result to VT
+ (CWorkspace: need 2*M, prefer M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/*
+ Generate Q in VT
+ (CWorkspace: need M+N, prefer M+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
+ nwork], &i__1, &ierr);
+
+/* Produce L in A, zeroing out above it */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ zlaset_("U", &i__1, &i__2, &c_b59, &c_b59, &a[((a_dim1) << (1)
+ ) + 1], lda);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in A
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB)
+ (RWorkspace: need M)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ dbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of L
+ (CWorkspace: need 3*M, prefer 2*M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT)
+ Overwrite WORK(IVT) by right singular vectors of L
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ zlacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", m, m, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IVT) by
+ Q in VT, storing result in A
+ (CWorkspace: need M*M)
+ (RWorkspace: 0)
+*/
+
+ zgemm_("N", "N", m, n, m, &c_b60, &work[ivt], &ldwkvt, &vt[
+ vt_offset], ldvt, &c_b59, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+ }
+
+ } else if (*n >= mnthr2) {
+
+/*
+ MNTHR2 <= N < MNTHR1
+
+ Path 5t (N much larger than M, but not as much as MNTHR1)
+ Reduce to bidiagonal form without QR decomposition, use
+ ZUNGBR and matrix multiplication to compute singular vectors
+*/
+
+
+ ie = 1;
+ nrwork = ie + *m;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A
+ (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB)
+ (RWorkspace: M)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, &ierr);
+
+ if (wntqn) {
+
+/*
+ Compute singular values only
+ (Cworkspace: 0)
+ (Rworkspace: need BDSPAC)
+*/
+
+ dbdsdc_("L", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ ivt = nwork;
+
+/*
+ Copy A to U, generate Q
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/*
+ Generate P**H in A
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ zungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ nwork], &i__1, &ierr);
+
+ ldwkvt = *m;
+ if (*lwork >= *m * *n + *m * 3) {
+
+/* WORK( IVT ) is M by N */
+
+ nwork = ivt + ldwkvt * *n;
+ chunk = *n;
+ } else {
+
+/* WORK( IVT ) is M by CHUNK */
+
+ chunk = (*lwork - *m * 3) / *m;
+ nwork = ivt + ldwkvt * chunk;
+ }
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply Q in U by real matrix RWORK(IRVT)
+ storing the result in WORK(IVT), copying to U
+ (Cworkspace: need 0)
+ (Rworkspace: need 2*M*M)
+*/
+
+ zlacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &work[ivt], &
+ ldwkvt, &rwork[nrwork]);
+ zlacpy_("F", m, m, &work[ivt], &ldwkvt, &u[u_offset], ldu);
+
+/*
+ Multiply RWORK(IRVT) by P**H in A, storing the
+ result in WORK(IVT), copying to A
+ (CWorkspace: need M*M, prefer M*N)
+ (Rworkspace: need 2*M*M, prefer 2*M*N)
+*/
+
+ nrwork = iru;
+ i__1 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ zlarcm_(m, &blk, &rwork[irvt], m, &a[i__ * a_dim1 + 1],
+ lda, &work[ivt], &ldwkvt, &rwork[nrwork]);
+ zlacpy_("F", m, &blk, &work[ivt], &ldwkvt, &a[i__ *
+ a_dim1 + 1], lda);
+/* L50: */
+ }
+ } else if (wntqs) {
+
+/*
+ Copy A to U, generate Q
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+/*
+ Copy A to VT, generate P**H
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zungbr_("P", m, n, m, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply Q in U by real matrix RWORK(IRU), storing the
+ result in A, copying to U
+ (CWorkspace: need 0)
+ (Rworkspace: need 3*M*M)
+*/
+
+ zlacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &a[a_offset],
+ lda, &rwork[nrwork]);
+ zlacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+
+/*
+ Multiply real matrix RWORK(IRVT) by P**H in VT,
+ storing the result in A, copying to VT
+ (Cworkspace: need 0)
+ (Rworkspace: need M*M+2*M*N)
+*/
+
+ nrwork = iru;
+ zlarcm_(m, n, &rwork[irvt], m, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ } else {
+
+/*
+ Copy A to U, generate Q
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+/*
+ Copy A to VT, generate P**H
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zungbr_("P", n, n, m, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply Q in U by real matrix RWORK(IRU), storing the
+ result in A, copying to U
+ (CWorkspace: need 0)
+ (Rworkspace: need 3*M*M)
+*/
+
+ zlacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &a[a_offset],
+ lda, &rwork[nrwork]);
+ zlacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+
+/*
+ Multiply real matrix RWORK(IRVT) by P**H in VT,
+ storing the result in A, copying to VT
+ (Cworkspace: need 0)
+ (Rworkspace: need M*M+2*M*N)
+*/
+
+ zlarcm_(m, n, &rwork[irvt], m, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ }
+
+ } else {
+
+/*
+ N .LT. MNTHR2
+
+ Path 6t (N greater than M, but not much larger)
+ Reduce to bidiagonal form without LQ decomposition
+ Use ZUNMBR to compute singular vectors
+*/
+
+ ie = 1;
+ nrwork = ie + *m;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A
+ (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB)
+ (RWorkspace: M)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/*
+ Compute singular values only
+ (Cworkspace: 0)
+ (Rworkspace: need BDSPAC)
+*/
+
+ dbdsdc_("L", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ ldwkvt = *m;
+ ivt = nwork;
+ if (*lwork >= *m * *n + *m * 3) {
+
+/* WORK( IVT ) is M by N */
+
+ zlaset_("F", m, n, &c_b59, &c_b59, &work[ivt], &ldwkvt);
+ nwork = ivt + ldwkvt * *n;
+ } else {
+
+/* WORK( IVT ) is M by CHUNK */
+
+ chunk = (*lwork - *m * 3) / *m;
+ nwork = ivt + ldwkvt * chunk;
+ }
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of A
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: need 0)
+*/
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *m * 3) {
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT)
+ Overwrite WORK(IVT) by right singular vectors of A,
+ copying to A
+ (Cworkspace: need M*N+2*M, prefer M*N+M+M*NB)
+ (Rworkspace: need 0)
+*/
+
+ zlacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", m, n, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
+ &ierr);
+ zlacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
+ } else {
+
+/*
+ Generate P**H in A
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: need 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ zungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Multiply Q in A by real matrix RWORK(IRU), storing the
+ result in WORK(IU), copying to A
+ (CWorkspace: need M*M, prefer M*N)
+ (Rworkspace: need 3*M*M, prefer M*M+2*M*N)
+*/
+
+ nrwork = iru;
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ zlarcm_(m, &blk, &rwork[irvt], m, &a[i__ * a_dim1 + 1]
+ , lda, &work[ivt], &ldwkvt, &rwork[nrwork]);
+ zlacpy_("F", m, &blk, &work[ivt], &ldwkvt, &a[i__ *
+ a_dim1 + 1], lda);
+/* L60: */
+ }
+ }
+ } else if (wntqs) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of A
+ (CWorkspace: need 3*M, prefer 2*M+M*NB)
+ (RWorkspace: M*M)
+*/
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of A
+ (CWorkspace: need 3*M, prefer 2*M+M*NB)
+ (RWorkspace: M*M)
+*/
+
+ zlaset_("F", m, n, &c_b59, &c_b59, &vt[vt_offset], ldvt);
+ zlacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", m, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of A
+ (CWorkspace: need 3*M, prefer 2*M+M*NB)
+ (RWorkspace: M*M)
+*/
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/* Set the right corner of VT to identity matrix */
+
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ zlaset_("F", &i__1, &i__2, &c_b59, &c_b60, &vt[*m + 1 + (*m +
+ 1) * vt_dim1], ldvt);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of A
+ (CWorkspace: need 2*M+N, prefer 2*M+N*NB)
+ (RWorkspace: M*M)
+*/
+
+ zlaset_("F", n, n, &c_b59, &c_b59, &vt[vt_offset], ldvt);
+ zlacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGESDD */
+
+} /* zgesdd_ */
+
+/* Subroutine */ int zgesv_(integer *n, integer *nrhs, doublecomplex *a,
+ integer *lda, integer *ipiv, doublecomplex *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int xerbla_(char *, integer *), zgetrf_(
+ integer *, integer *, doublecomplex *, integer *, integer *,
+ integer *), zgetrs_(char *, integer *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ ZGESV computes the solution to a complex system of linear equations
+ A * X = B,
+ where A is an N-by-N matrix and X and B are N-by-NRHS matrices.
+
+ The LU decomposition with partial pivoting and row interchanges is
+ used to factor A as
+ A = P * L * U,
+ where P is a permutation matrix, L is unit lower triangular, and U is
+ upper triangular. The factored form of A is then used to solve the
+ system of equations A * X = B.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of linear equations, i.e., the order of the
+ matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the N-by-N coefficient matrix A.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ IPIV (output) INTEGER array, dimension (N)
+ The pivot indices that define the permutation matrix P;
+ row i of the matrix was interchanged with row IPIV(i).
+
+ B (input/output) COMPLEX*16 array, dimension (LDB,NRHS)
+ On entry, the N-by-NRHS matrix of right hand side matrix B.
+ On exit, if INFO = 0, the N-by-NRHS solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, U(i,i) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, so the solution could not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGESV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of A. */
+
+ zgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[
+ b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of ZGESV */
+
+} /* zgesv_ */
+
+/* Subroutine */ int zgetf2_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer j, jp;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgeru_(integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), zswap_(integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZGETF2 computes an LU factorization of a general m-by-n matrix A
+ using partial pivoting with row interchanges.
+
+ The factorization has the form
+ A = P * L * U
+ where P is a permutation matrix, L is lower triangular with unit
+ diagonal elements (lower trapezoidal if m > n), and U is upper
+ triangular (upper trapezoidal if m < n).
+
+ This is the right-looking Level 2 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the m by n matrix to be factored.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ IPIV (output) INTEGER array, dimension (min(M,N))
+ The pivot indices; for 1 <= i <= min(M,N), row i of the
+ matrix was interchanged with row IPIV(i).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+ > 0: if INFO = k, U(k,k) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, and division by zero will occur if it is used
+ to solve a system of equations.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGETF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot and test for singularity. */
+
+ i__2 = *m - j + 1;
+ jp = j - 1 + izamax_(&i__2, &a[j + j * a_dim1], &c__1);
+ ipiv[j] = jp;
+ i__2 = jp + j * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+
+/* Apply the interchange to columns 1:N. */
+
+ if (jp != j) {
+ zswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
+ }
+
+/* Compute elements J+1:M of J-th column. */
+
+ if (j < *m) {
+ i__2 = *m - j;
+ z_div(&z__1, &c_b60, &a[j + j * a_dim1]);
+ zscal_(&i__2, &z__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+ } else if (*info == 0) {
+
+ *info = j;
+ }
+
+ if (j < min(*m,*n)) {
+
+/* Update trailing submatrix. */
+
+ i__2 = *m - j;
+ i__3 = *n - j;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__2, &i__3, &z__1, &a[j + 1 + j * a_dim1], &c__1, &a[j +
+ (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda)
+ ;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of ZGETF2 */
+
+} /* zgetf2_ */
+
+/* Subroutine */ int zgetrf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__, j, jb, nb, iinfo;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *
+ , doublecomplex *, integer *),
+ zgetf2_(integer *, integer *, doublecomplex *, integer *, integer
+ *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZGETRF computes an LU factorization of a general M-by-N matrix A
+ using partial pivoting with row interchanges.
+
+ The factorization has the form
+ A = P * L * U
+ where P is a permutation matrix, L is lower triangular with unit
+ diagonal elements (lower trapezoidal if m > n), and U is upper
+ triangular (upper trapezoidal if m < n).
+
+ This is the right-looking Level 3 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the M-by-N matrix to be factored.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ IPIV (output) INTEGER array, dimension (min(M,N))
+ The pivot indices; for 1 <= i <= min(M,N), row i of the
+ matrix was interchanged with row IPIV(i).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, U(i,i) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, and division by zero will occur if it is used
+ to solve a system of equations.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGETRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "ZGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ if (nb <= 1 || nb >= min(*m,*n)) {
+
+/* Use unblocked code. */
+
+ zgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info);
+ } else {
+
+/* Use blocked code. */
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = min(*m,*n) - j + 1;
+ jb = min(i__3,nb);
+
+/*
+ Factor diagonal and subdiagonal blocks and test for exact
+ singularity.
+*/
+
+ i__3 = *m - j + 1;
+ zgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo);
+
+/* Adjust INFO and the pivot indices. */
+
+ if ((*info == 0 && iinfo > 0)) {
+ *info = iinfo + j - 1;
+ }
+/* Computing MIN */
+ i__4 = *m, i__5 = j + jb - 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = j - 1 + ipiv[i__];
+/* L10: */
+ }
+
+/* Apply interchanges to columns 1:J-1. */
+
+ i__3 = j - 1;
+ i__4 = j + jb - 1;
+ zlaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
+
+ if (j + jb <= *n) {
+
+/* Apply interchanges to columns J+JB:N. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j + jb - 1;
+ zlaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, &
+ ipiv[1], &c__1);
+
+/* Compute block row of U. */
+
+ i__3 = *n - j - jb + 1;
+ ztrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &
+ c_b60, &a[j + j * a_dim1], lda, &a[j + (j + jb) *
+ a_dim1], lda);
+ if (j + jb <= *m) {
+
+/* Update trailing submatrix. */
+
+ i__3 = *m - j - jb + 1;
+ i__4 = *n - j - jb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "No transpose", &i__3, &i__4, &jb,
+ &z__1, &a[j + jb + j * a_dim1], lda, &a[j + (j +
+ jb) * a_dim1], lda, &c_b60, &a[j + jb + (j + jb) *
+ a_dim1], lda);
+ }
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of ZGETRF */
+
+} /* zgetrf_ */
+
+/* Subroutine */ int zgetrs_(char *trans, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *b,
+ integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+ static logical notran;
+ extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZGETRS solves a system of linear equations
+ A * X = B, A**T * X = B, or A**H * X = B
+ with a general N-by-N matrix A using the LU factorization computed
+ by ZGETRF.
+
+ Arguments
+ =========
+
+ TRANS (input) CHARACTER*1
+ Specifies the form of the system of equations:
+ = 'N': A * X = B (No transpose)
+ = 'T': A**T * X = B (Transpose)
+ = 'C': A**H * X = B (Conjugate transpose)
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input) COMPLEX*16 array, dimension (LDA,N)
+ The factors L and U from the factorization A = P*L*U
+ as computed by ZGETRF.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ IPIV (input) INTEGER array, dimension (N)
+ The pivot indices from ZGETRF; for 1<=i<=N, row i of the
+ matrix was interchanged with row IPIV(i).
+
+ B (input/output) COMPLEX*16 array, dimension (LDB,NRHS)
+ On entry, the right hand side matrix B.
+ On exit, the solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (((! notran && ! lsame_(trans, "T")) && ! lsame_(
+ trans, "C"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (notran) {
+
+/*
+ Solve A * X = B.
+
+ Apply row interchanges to the right hand sides.
+*/
+
+ zlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
+
+/* Solve L*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b60, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b60, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/*
+ Solve A**T * X = B or A**H * X = B.
+
+ Solve U'*X = B, overwriting B with X.
+*/
+
+ ztrsm_("Left", "Upper", trans, "Non-unit", n, nrhs, &c_b60, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Lower", trans, "Unit", n, nrhs, &c_b60, &a[a_offset],
+ lda, &b[b_offset], ldb);
+
+/* Apply row interchanges to the solution vectors. */
+
+ zlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
+ }
+
+ return 0;
+
+/* End of ZGETRS */
+
+} /* zgetrs_ */
+
+/* Subroutine */ int zheevd_(char *jobz, char *uplo, integer *n,
+ doublecomplex *a, integer *lda, doublereal *w, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *lrwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal eps;
+ static integer inde;
+ static doublereal anrm;
+ static integer imax;
+ static doublereal rmin, rmax;
+ static integer lopt;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ static doublereal sigma;
+ extern logical lsame_(char *, char *);
+ static integer iinfo, lwmin, liopt;
+ static logical lower;
+ static integer llrwk, lropt;
+ static logical wantz;
+ static integer indwk2, llwrk2;
+
+ static integer iscale;
+ static doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal bignum;
+ extern doublereal zlanhe_(char *, char *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ static integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), zlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublecomplex *, integer *,
+ integer *), zstedc_(char *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, integer *, integer *, integer
+ *);
+ static integer indrwk, indwrk, liwmin;
+ extern /* Subroutine */ int zhetrd_(char *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *,
+ doublecomplex *, integer *, integer *), zlacpy_(char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *);
+ static integer lrwmin, llwork;
+ static doublereal smlnum;
+ static logical lquery;
+ extern /* Subroutine */ int zunmtr_(char *, char *, char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZHEEVD computes all eigenvalues and, optionally, eigenvectors of a
+ complex Hermitian matrix A. If eigenvectors are desired, it uses a
+ divide and conquer algorithm.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ JOBZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only;
+ = 'V': Compute eigenvalues and eigenvectors.
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA, N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the
+ leading N-by-N upper triangular part of A contains the
+ upper triangular part of the matrix A. If UPLO = 'L',
+ the leading N-by-N lower triangular part of A contains
+ the lower triangular part of the matrix A.
+ On exit, if JOBZ = 'V', then if INFO = 0, A contains the
+ orthonormal eigenvectors of the matrix A.
+ If JOBZ = 'N', then on exit the lower triangle (if UPLO='L')
+ or the upper triangle (if UPLO='U') of A, including the
+ diagonal, is destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ W (output) DOUBLE PRECISION array, dimension (N)
+ If INFO = 0, the eigenvalues in ascending order.
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK.
+ If N <= 1, LWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LWORK must be at least N + 1.
+ If JOBZ = 'V' and N > 1, LWORK must be at least 2*N + N**2.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ RWORK (workspace/output) DOUBLE PRECISION array,
+ dimension (LRWORK)
+ On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK.
+
+ LRWORK (input) INTEGER
+ The dimension of the array RWORK.
+ If N <= 1, LRWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LRWORK must be at least N.
+ If JOBZ = 'V' and N > 1, LRWORK must be at least
+ 1 + 5*N + 2*N**2.
+
+ If LRWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the RWORK array,
+ returns this value as the first entry of the RWORK array, and
+ no error message related to LRWORK is issued by XERBLA.
+
+ IWORK (workspace/output) INTEGER array, dimension (LIWORK)
+ On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
+
+ LIWORK (input) INTEGER
+ The dimension of the array IWORK.
+ If N <= 1, LIWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LIWORK must be at least 1.
+ If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N.
+
+ If LIWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the IWORK array,
+ returns this value as the first entry of the IWORK array, and
+ no error message related to LIWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the algorithm failed to converge; i
+ off-diagonal elements of an intermediate tridiagonal
+ form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --w;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ lwmin = 1;
+ lrwmin = 1;
+ liwmin = 1;
+ lopt = lwmin;
+ lropt = lrwmin;
+ liopt = liwmin;
+ } else {
+ if (wantz) {
+ lwmin = ((*n) << (1)) + *n * *n;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + ((i__1 * i__1) << (1));
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n + 1;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ lopt = lwmin;
+ lropt = lrwmin;
+ liopt = liwmin;
+ }
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if ((*lwork < lwmin && ! lquery)) {
+ *info = -8;
+ } else if ((*lrwork < lrwmin && ! lquery)) {
+ *info = -10;
+ } else if ((*liwork < liwmin && ! lquery)) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ work[1].r = (doublereal) lopt, work[1].i = 0.;
+ rwork[1] = (doublereal) lropt;
+ iwork[1] = liopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ if (wantz) {
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = SAFEMINIMUM;
+ eps = PRECISION;
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = zlanhe_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
+ iscale = 0;
+ if ((anrm > 0. && anrm < rmin)) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ zlascl_(uplo, &c__0, &c__0, &c_b1015, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call ZHETRD to reduce Hermitian matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = 1;
+ indwrk = indtau + *n;
+ indrwk = inde + *n;
+ indwk2 = indwrk + *n * *n;
+ llwork = *lwork - indwrk + 1;
+ llwrk2 = *lwork - indwk2 + 1;
+ llrwk = *lrwork - indrwk + 1;
+ zhetrd_(uplo, n, &a[a_offset], lda, &w[1], &rwork[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+/* Computing MAX */
+ i__1 = indwrk;
+ d__1 = (doublereal) lopt, d__2 = (doublereal) (*n) + work[i__1].r;
+ lopt = (integer) max(d__1,d__2);
+
+/*
+ For eigenvalues only, call DSTERF. For eigenvectors, first call
+ ZSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the
+ tridiagonal matrix, then call ZUNMTR to multiply it to the
+ Householder transformations represented as Householder vectors in
+ A.
+*/
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ zstedc_("I", n, &w[1], &rwork[inde], &work[indwrk], n, &work[indwk2],
+ &llwrk2, &rwork[indrwk], &llrwk, &iwork[1], liwork, info);
+ zunmtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[
+ indwrk], n, &work[indwk2], &llwrk2, &iinfo);
+ zlacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda);
+/*
+ Computing MAX
+ Computing 2nd power
+*/
+ i__3 = *n;
+ i__4 = indwk2;
+ i__1 = lopt, i__2 = *n + i__3 * i__3 + (integer) work[i__4].r;
+ lopt = max(i__1,i__2);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+ work[1].r = (doublereal) lopt, work[1].i = 0.;
+ rwork[1] = (doublereal) lropt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of ZHEEVD */
+
+} /* zheevd_ */
+
+/* Subroutine */ int zhetd2_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *d__, doublereal *e, doublecomplex *tau,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Local variables */
+ static integer i__;
+ static doublecomplex taui;
+ extern /* Subroutine */ int zher2_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ static doublecomplex alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zhemv_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(
+ char *, integer *), zlarfg_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ ZHETD2 reduces a complex Hermitian matrix A to real symmetric
+ tridiagonal form T by a unitary similarity transformation:
+ Q' * A * Q = T.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ Hermitian matrix A is stored:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the leading
+ n-by-n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n-by-n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit, if UPLO = 'U', the diagonal and first superdiagonal
+ of A are overwritten by the corresponding elements of the
+ tridiagonal matrix T, and the elements above the first
+ superdiagonal, with the array TAU, represent the unitary
+ matrix Q as a product of elementary reflectors; if UPLO
+ = 'L', the diagonal and first subdiagonal of A are over-
+ written by the corresponding elements of the tridiagonal
+ matrix T, and the elements below the first subdiagonal, with
+ the array TAU, represent the unitary matrix Q as a product
+ of elementary reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ D (output) DOUBLE PRECISION array, dimension (N)
+ The diagonal elements of the tridiagonal matrix T:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix T:
+ E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
+
+ TAU (output) COMPLEX*16 array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n-1) . . . H(2) H(1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
+ A(1:i-1,i+1), and tau in TAU(i).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(n-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
+ and tau in TAU(i).
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( d e v2 v3 v4 ) ( d )
+ ( d e v3 v4 ) ( e d )
+ ( d e v4 ) ( v1 e d )
+ ( d e ) ( v1 v2 e d )
+ ( d ) ( v1 v2 v3 e d )
+
+ where d and e denote diagonal and off-diagonal elements of T, and vi
+ denotes an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHETD2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A */
+
+ i__1 = *n + *n * a_dim1;
+ i__2 = *n + *n * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/*
+ Generate elementary reflector H(i) = I - tau * v * v'
+ to annihilate A(1:i-1,i+1)
+*/
+
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__1].r, alpha.i = a[i__1].i;
+ zlarfg_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &taui);
+ i__1 = i__;
+ e[i__1] = alpha.r;
+
+ if (taui.r != 0. || taui.i != 0.) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+
+/* Compute x := tau * A * v storing x in TAU(1:i) */
+
+ zhemv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) *
+ a_dim1 + 1], &c__1, &c_b59, &tau[1], &c__1)
+ ;
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ z__3.r = -.5, z__3.i = -0.;
+ z__2.r = z__3.r * taui.r - z__3.i * taui.i, z__2.i = z__3.r *
+ taui.i + z__3.i * taui.r;
+ zdotc_(&z__4, &i__, &tau[1], &c__1, &a[(i__ + 1) * a_dim1 + 1]
+ , &c__1);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r *
+ z__4.i + z__2.i * z__4.r;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ zaxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[
+ 1], &c__1);
+
+/*
+ Apply the transformation as a rank-2 update:
+ A := A - v * w' - w * v'
+*/
+
+ z__1.r = -1., z__1.i = -0.;
+ zher2_(uplo, &i__, &z__1, &a[(i__ + 1) * a_dim1 + 1], &c__1, &
+ tau[1], &c__1, &a[a_offset], lda);
+
+ } else {
+ i__1 = i__ + i__ * a_dim1;
+ i__2 = i__ + i__ * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ }
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ i__2 = i__;
+ a[i__1].r = e[i__2], a[i__1].i = 0.;
+ i__1 = i__ + 1;
+ i__2 = i__ + 1 + (i__ + 1) * a_dim1;
+ d__[i__1] = a[i__2].r;
+ i__1 = i__;
+ tau[i__1].r = taui.r, tau[i__1].i = taui.i;
+/* L10: */
+ }
+ i__1 = a_dim1 + 1;
+ d__[1] = a[i__1].r;
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__1 = a_dim1 + 1;
+ i__2 = a_dim1 + 1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Generate elementary reflector H(i) = I - tau * v * v'
+ to annihilate A(i+2:n,i)
+*/
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1, &
+ taui);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+
+ if (taui.r != 0. || taui.i != 0.) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute x := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ zhemv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b59, &tau[
+ i__], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ z__3.r = -.5, z__3.i = -0.;
+ z__2.r = z__3.r * taui.r - z__3.i * taui.i, z__2.i = z__3.r *
+ taui.i + z__3.i * taui.r;
+ i__2 = *n - i__;
+ zdotc_(&z__4, &i__2, &tau[i__], &c__1, &a[i__ + 1 + i__ *
+ a_dim1], &c__1);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r *
+ z__4.i + z__2.i * z__4.r;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ i__2 = *n - i__;
+ zaxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &c__1);
+
+/*
+ Apply the transformation as a rank-2 update:
+ A := A - v * w' - w * v'
+*/
+
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zher2_(uplo, &i__2, &z__1, &a[i__ + 1 + i__ * a_dim1], &c__1,
+ &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda);
+
+ } else {
+ i__2 = i__ + 1 + (i__ + 1) * a_dim1;
+ i__3 = i__ + 1 + (i__ + 1) * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ i__2 = i__ + 1 + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.;
+ i__2 = i__;
+ i__3 = i__ + i__ * a_dim1;
+ d__[i__2] = a[i__3].r;
+ i__2 = i__;
+ tau[i__2].r = taui.r, tau[i__2].i = taui.i;
+/* L20: */
+ }
+ i__1 = *n;
+ i__2 = *n + *n * a_dim1;
+ d__[i__1] = a[i__2].r;
+ }
+
+ return 0;
+
+/* End of ZHETD2 */
+
+} /* zhetd2_ */
+
+/* Subroutine */ int zhetrd_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *d__, doublereal *e, doublecomplex *tau,
+ doublecomplex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__, j, nb, kk, nx, iws;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ static logical upper;
+ extern /* Subroutine */ int zhetd2_(char *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *), zher2k_(char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublereal *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlatrd_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *,
+ doublecomplex *, integer *);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZHETRD reduces a complex Hermitian matrix A to real symmetric
+ tridiagonal form T by a unitary similarity transformation:
+ Q**H * A * Q = T.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the leading
+ N-by-N upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit, if UPLO = 'U', the diagonal and first superdiagonal
+ of A are overwritten by the corresponding elements of the
+ tridiagonal matrix T, and the elements above the first
+ superdiagonal, with the array TAU, represent the unitary
+ matrix Q as a product of elementary reflectors; if UPLO
+ = 'L', the diagonal and first subdiagonal of A are over-
+ written by the corresponding elements of the tridiagonal
+ matrix T, and the elements below the first subdiagonal, with
+ the array TAU, represent the unitary matrix Q as a product
+ of elementary reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ D (output) DOUBLE PRECISION array, dimension (N)
+ The diagonal elements of the tridiagonal matrix T:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix T:
+ E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
+
+ TAU (output) COMPLEX*16 array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= 1.
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n-1) . . . H(2) H(1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
+ A(1:i-1,i+1), and tau in TAU(i).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(n-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
+ and tau in TAU(i).
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( d e v2 v3 v4 ) ( d )
+ ( d e v3 v4 ) ( e d )
+ ( d e v4 ) ( v1 e d )
+ ( d e ) ( v1 v2 e d )
+ ( d ) ( v1 v2 v3 e d )
+
+ where d and e denote diagonal and off-diagonal elements of T, and vi
+ denotes an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if ((*lwork < 1 && ! lquery)) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6,
+ (ftnlen)1);
+ lwkopt = *n * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHETRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nx = *n;
+ iws = 1;
+ if ((nb > 1 && nb < *n)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code
+ (last block is always handled by unblocked code).
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "ZHETRD", uplo, n, &c_n1, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *n) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: determine the
+ minimum value of NB, and reduce NB or force use of
+ unblocked code by setting NX = N.
+
+ Computing MAX
+*/
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+ nbmin = ilaenv_(&c__2, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ if (nb < nbmin) {
+ nx = *n;
+ }
+ }
+ } else {
+ nx = *n;
+ }
+ } else {
+ nb = 1;
+ }
+
+ if (upper) {
+
+/*
+ Reduce the upper triangle of A.
+ Columns 1:kk are handled by the unblocked method.
+*/
+
+ kk = *n - (*n - nx + nb - 1) / nb * nb;
+ i__1 = kk + 1;
+ i__2 = -nb;
+ for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+
+/*
+ Reduce columns i:i+nb-1 to tridiagonal form and form the
+ matrix W which is needed to update the unreduced part of
+ the matrix
+*/
+
+ i__3 = i__ + nb - 1;
+ zlatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &
+ work[1], &ldwork);
+
+/*
+ Update the unreduced submatrix A(1:i-1,1:i-1), using an
+ update of the form: A := A - V*W' - W*V'
+*/
+
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zher2k_(uplo, "No transpose", &i__3, &nb, &z__1, &a[i__ * a_dim1
+ + 1], lda, &work[1], &ldwork, &c_b1015, &a[a_offset], lda);
+
+/*
+ Copy superdiagonal elements back into A, and diagonal
+ elements into D
+*/
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j - 1 + j * a_dim1;
+ i__5 = j - 1;
+ a[i__4].r = e[i__5], a[i__4].i = 0.;
+ i__4 = j;
+ i__5 = j + j * a_dim1;
+ d__[i__4] = a[i__5].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ zhetd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__2 = *n - nx;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+
+/*
+ Reduce columns i:i+nb-1 to tridiagonal form and form the
+ matrix W which is needed to update the unreduced part of
+ the matrix
+*/
+
+ i__3 = *n - i__ + 1;
+ zlatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], &
+ tau[i__], &work[1], &ldwork);
+
+/*
+ Update the unreduced submatrix A(i+nb:n,i+nb:n), using
+ an update of the form: A := A - V*W' - W*V'
+*/
+
+ i__3 = *n - i__ - nb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zher2k_(uplo, "No transpose", &i__3, &nb, &z__1, &a[i__ + nb +
+ i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b1015, &a[
+ i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/*
+ Copy subdiagonal elements back into A, and diagonal
+ elements into D
+*/
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + 1 + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.;
+ i__4 = j;
+ i__5 = j + j * a_dim1;
+ d__[i__4] = a[i__5].r;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ i__1 = *n - i__ + 1;
+ zhetd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__],
+ &tau[i__], &iinfo);
+ }
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZHETRD */
+
+} /* zhetrd_ */
+
+/* Subroutine */ int zhseqr_(char *job, char *compz, integer *n, integer *ilo,
+ integer *ihi, doublecomplex *h__, integer *ldh, doublecomplex *w,
+ doublecomplex *z__, integer *ldz, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4[2],
+ i__5, i__6;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+ char ch__1[2];
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__, j, k, l;
+ static doublecomplex s[225] /* was [15][15] */, v[16];
+ static integer i1, i2, ii, nh, nr, ns, nv;
+ static doublecomplex vv[16];
+ static integer itn;
+ static doublecomplex tau;
+ static integer its;
+ static doublereal ulp, tst1;
+ static integer maxb, ierr;
+ static doublereal unfl;
+ static doublecomplex temp;
+ static doublereal ovfl;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ static integer itemp;
+ static doublereal rtemp;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ static logical initz, wantt, wantz;
+ static doublereal rwork[1];
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *), zlarfg_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern doublereal zlanhs_(char *, integer *, doublecomplex *, integer *,
+ doublereal *);
+ extern /* Subroutine */ int zlahqr_(logical *, logical *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *),
+ zlacpy_(char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlaset_(char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ integer *), zlarfx_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *);
+ static doublereal smlnum;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZHSEQR computes the eigenvalues of a complex upper Hessenberg
+ matrix H, and, optionally, the matrices T and Z from the Schur
+ decomposition H = Z T Z**H, where T is an upper triangular matrix
+ (the Schur form), and Z is the unitary matrix of Schur vectors.
+
+ Optionally Z may be postmultiplied into an input unitary matrix Q,
+ so that this routine can give the Schur factorization of a matrix A
+ which has been reduced to the Hessenberg form H by the unitary
+ matrix Q: A = Q*H*Q**H = (QZ)*T*(QZ)**H.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ = 'E': compute eigenvalues only;
+ = 'S': compute eigenvalues and the Schur form T.
+
+ COMPZ (input) CHARACTER*1
+ = 'N': no Schur vectors are computed;
+ = 'I': Z is initialized to the unit matrix and the matrix Z
+ of Schur vectors of H is returned;
+ = 'V': Z must contain an unitary matrix Q on entry, and
+ the product Q*Z is returned.
+
+ N (input) INTEGER
+ The order of the matrix H. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that H is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to ZGEBAL, and then passed to CGEHRD
+ when the matrix output by ZGEBAL is reduced to Hessenberg
+ form. Otherwise ILO and IHI should be set to 1 and N
+ respectively.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ H (input/output) COMPLEX*16 array, dimension (LDH,N)
+ On entry, the upper Hessenberg matrix H.
+ On exit, if JOB = 'S', H contains the upper triangular matrix
+ T from the Schur decomposition (the Schur form). If
+ JOB = 'E', the contents of H are unspecified on exit.
+
+ LDH (input) INTEGER
+ The leading dimension of the array H. LDH >= max(1,N).
+
+ W (output) COMPLEX*16 array, dimension (N)
+ The computed eigenvalues. If JOB = 'S', the eigenvalues are
+ stored in the same order as on the diagonal of the Schur form
+ returned in H, with W(i) = H(i,i).
+
+ Z (input/output) COMPLEX*16 array, dimension (LDZ,N)
+ If COMPZ = 'N': Z is not referenced.
+ If COMPZ = 'I': on entry, Z need not be set, and on exit, Z
+ contains the unitary matrix Z of the Schur vectors of H.
+ If COMPZ = 'V': on entry Z must contain an N-by-N matrix Q,
+ which is assumed to be equal to the unit matrix except for
+ the submatrix Z(ILO:IHI,ILO:IHI); on exit Z contains Q*Z.
+ Normally Q is the unitary matrix generated by ZUNGHR after
+ the call to ZGEHRD which formed the Hessenberg matrix H.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z.
+ LDZ >= max(1,N) if COMPZ = 'I' or 'V'; LDZ >= 1 otherwise.
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, ZHSEQR failed to compute all the
+ eigenvalues in a total of 30*(IHI-ILO+1) iterations;
+ elements 1:ilo-1 and i+1:n of W contain those
+ eigenvalues which have been successfully computed.
+
+ =====================================================================
+
+
+ Decode and test the input parameters
+*/
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1 * 1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantt = lsame_(job, "S");
+ initz = lsame_(compz, "I");
+ wantz = initz || lsame_(compz, "V");
+
+ *info = 0;
+ i__1 = max(1,*n);
+ work[1].r = (doublereal) i__1, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if ((! lsame_(job, "E") && ! wantt)) {
+ *info = -1;
+ } else if ((! lsame_(compz, "N") && ! wantz)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if (*ldz < 1 || (wantz && *ldz < max(1,*n))) {
+ *info = -10;
+ } else if ((*lwork < max(1,*n) && ! lquery)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHSEQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Initialize Z, if necessary */
+
+ if (initz) {
+ zlaset_("Full", n, n, &c_b59, &c_b60, &z__[z_offset], ldz);
+ }
+
+/* Store the eigenvalues isolated by ZGEBAL. */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ + i__ * h_dim1;
+ w[i__2].r = h__[i__3].r, w[i__2].i = h__[i__3].i;
+/* L10: */
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ + i__ * h_dim1;
+ w[i__2].r = h__[i__3].r, w[i__2].i = h__[i__3].i;
+/* L20: */
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ i__1 = *ilo;
+ i__2 = *ilo + *ilo * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+ return 0;
+ }
+
+/*
+ Set rows and columns ILO to IHI to zero below the first
+ subdiagonal.
+*/
+
+ i__1 = *ihi - 2;
+ for (j = *ilo; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j + 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * h_dim1;
+ h__[i__3].r = 0., h__[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ nh = *ihi - *ilo + 1;
+
+/*
+ I1 and I2 are the indices of the first row and last column of H
+ to which transformations must be applied. If eigenvalues only are
+ being computed, I1 and I2 are re-set inside the main loop.
+*/
+
+ if (wantt) {
+ i1 = 1;
+ i2 = *n;
+ } else {
+ i1 = *ilo;
+ i2 = *ihi;
+ }
+
+/* Ensure that the subdiagonal elements are real. */
+
+ i__1 = *ihi;
+ for (i__ = *ilo + 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ temp.r = h__[i__2].r, temp.i = h__[i__2].i;
+ if (d_imag(&temp) != 0.) {
+ d__1 = temp.r;
+ d__2 = d_imag(&temp);
+ rtemp = dlapy2_(&d__1, &d__2);
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ h__[i__2].r = rtemp, h__[i__2].i = 0.;
+ z__1.r = temp.r / rtemp, z__1.i = temp.i / rtemp;
+ temp.r = z__1.r, temp.i = z__1.i;
+ if (i2 > i__) {
+ i__2 = i2 - i__;
+ d_cnjg(&z__1, &temp);
+ zscal_(&i__2, &z__1, &h__[i__ + (i__ + 1) * h_dim1], ldh);
+ }
+ i__2 = i__ - i1;
+ zscal_(&i__2, &temp, &h__[i1 + i__ * h_dim1], &c__1);
+ if (i__ < *ihi) {
+ i__2 = i__ + 1 + i__ * h_dim1;
+ i__3 = i__ + 1 + i__ * h_dim1;
+ z__1.r = temp.r * h__[i__3].r - temp.i * h__[i__3].i, z__1.i =
+ temp.r * h__[i__3].i + temp.i * h__[i__3].r;
+ h__[i__2].r = z__1.r, h__[i__2].i = z__1.i;
+ }
+ if (wantz) {
+ zscal_(&nh, &temp, &z__[*ilo + i__ * z_dim1], &c__1);
+ }
+ }
+/* L50: */
+ }
+
+/*
+ Determine the order of the multi-shift QR algorithm to be used.
+
+ Writing concatenation
+*/
+ i__4[0] = 1, a__1[0] = job;
+ i__4[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__4, &c__2, (ftnlen)2);
+ ns = ilaenv_(&c__4, "ZHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+/* Writing concatenation */
+ i__4[0] = 1, a__1[0] = job;
+ i__4[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__4, &c__2, (ftnlen)2);
+ maxb = ilaenv_(&c__8, "ZHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+ if (ns <= 1 || ns > nh || maxb >= nh) {
+
+/* Use the standard double-shift algorithm */
+
+ zlahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1], ilo,
+ ihi, &z__[z_offset], ldz, info);
+ return 0;
+ }
+ maxb = max(2,maxb);
+/* Computing MIN */
+ i__1 = min(ns,maxb);
+ ns = min(i__1,15);
+
+/*
+ Now 1 < NS <= MAXB < NH.
+
+ Set machine-dependent constants for the stopping criterion.
+ If norm(H) <= sqrt(OVFL), overflow should not occur.
+*/
+
+ unfl = SAFEMINIMUM;
+ ovfl = 1. / unfl;
+ dlabad_(&unfl, &ovfl);
+ ulp = PRECISION;
+ smlnum = unfl * (nh / ulp);
+
+/* ITN is the total number of multiple-shift QR iterations allowed. */
+
+ itn = nh * 30;
+
+/*
+ The main loop begins here. I is the loop index and decreases from
+ IHI to ILO in steps of at most MAXB. Each iteration of the loop
+ works with the active submatrix in rows and columns L to I.
+ Eigenvalues I+1 to IHI have already converged. Either L = ILO, or
+ H(L,L-1) is negligible so that the matrix splits.
+*/
+
+ i__ = *ihi;
+L60:
+ if (i__ < *ilo) {
+ goto L180;
+ }
+
+/*
+ Perform multiple-shift QR iterations on rows and columns ILO to I
+ until a submatrix of order at most MAXB splits off at the bottom
+ because a subdiagonal element has become negligible.
+*/
+
+ l = *ilo;
+ i__1 = itn;
+ for (its = 0; its <= i__1; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__2 = l + 1;
+ for (k = i__; k >= i__2; --k) {
+ i__3 = k - 1 + (k - 1) * h_dim1;
+ i__5 = k + k * h_dim1;
+ tst1 = (d__1 = h__[i__3].r, abs(d__1)) + (d__2 = d_imag(&h__[k -
+ 1 + (k - 1) * h_dim1]), abs(d__2)) + ((d__3 = h__[i__5].r,
+ abs(d__3)) + (d__4 = d_imag(&h__[k + k * h_dim1]), abs(
+ d__4)));
+ if (tst1 == 0.) {
+ i__3 = i__ - l + 1;
+ tst1 = zlanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, rwork);
+ }
+ i__3 = k + (k - 1) * h_dim1;
+/* Computing MAX */
+ d__2 = ulp * tst1;
+ if ((d__1 = h__[i__3].r, abs(d__1)) <= max(d__2,smlnum)) {
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible. */
+
+ i__2 = l + (l - 1) * h_dim1;
+ h__[i__2].r = 0., h__[i__2].i = 0.;
+ }
+
+/* Exit from loop if a submatrix of order <= MAXB has split off. */
+
+ if (l >= i__ - maxb + 1) {
+ goto L170;
+ }
+
+/*
+ Now the active submatrix is in rows and columns L to I. If
+ eigenvalues only are being computed, only the active submatrix
+ need be transformed.
+*/
+
+ if (! wantt) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if (its == 20 || its == 30) {
+
+/* Exceptional shifts. */
+
+ i__2 = i__;
+ for (ii = i__ - ns + 1; ii <= i__2; ++ii) {
+ i__3 = ii;
+ i__5 = ii + (ii - 1) * h_dim1;
+ i__6 = ii + ii * h_dim1;
+ d__3 = ((d__1 = h__[i__5].r, abs(d__1)) + (d__2 = h__[i__6].r,
+ abs(d__2))) * 1.5;
+ w[i__3].r = d__3, w[i__3].i = 0.;
+/* L90: */
+ }
+ } else {
+
+/* Use eigenvalues of trailing submatrix of order NS as shifts. */
+
+ zlacpy_("Full", &ns, &ns, &h__[i__ - ns + 1 + (i__ - ns + 1) *
+ h_dim1], ldh, s, &c__15);
+ zlahqr_(&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &w[i__ -
+ ns + 1], &c__1, &ns, &z__[z_offset], ldz, &ierr);
+ if (ierr > 0) {
+
+/*
+ If ZLAHQR failed to compute all NS eigenvalues, use the
+ unconverged diagonal elements as the remaining shifts.
+*/
+
+ i__2 = ierr;
+ for (ii = 1; ii <= i__2; ++ii) {
+ i__3 = i__ - ns + ii;
+ i__5 = ii + ii * 15 - 16;
+ w[i__3].r = s[i__5].r, w[i__3].i = s[i__5].i;
+/* L100: */
+ }
+ }
+ }
+
+/*
+ Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns))
+ where G is the Hessenberg submatrix H(L:I,L:I) and w is
+ the vector of shifts (stored in W). The result is
+ stored in the local array V.
+*/
+
+ v[0].r = 1., v[0].i = 0.;
+ i__2 = ns + 1;
+ for (ii = 2; ii <= i__2; ++ii) {
+ i__3 = ii - 1;
+ v[i__3].r = 0., v[i__3].i = 0.;
+/* L110: */
+ }
+ nv = 1;
+ i__2 = i__;
+ for (j = i__ - ns + 1; j <= i__2; ++j) {
+ i__3 = nv + 1;
+ zcopy_(&i__3, v, &c__1, vv, &c__1);
+ i__3 = nv + 1;
+ i__5 = j;
+ z__1.r = -w[i__5].r, z__1.i = -w[i__5].i;
+ zgemv_("No transpose", &i__3, &nv, &c_b60, &h__[l + l * h_dim1],
+ ldh, vv, &c__1, &z__1, v, &c__1);
+ ++nv;
+
+/*
+ Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero,
+ reset it to the unit vector.
+*/
+
+ itemp = izamax_(&nv, v, &c__1);
+ i__3 = itemp - 1;
+ rtemp = (d__1 = v[i__3].r, abs(d__1)) + (d__2 = d_imag(&v[itemp -
+ 1]), abs(d__2));
+ if (rtemp == 0.) {
+ v[0].r = 1., v[0].i = 0.;
+ i__3 = nv;
+ for (ii = 2; ii <= i__3; ++ii) {
+ i__5 = ii - 1;
+ v[i__5].r = 0., v[i__5].i = 0.;
+/* L120: */
+ }
+ } else {
+ rtemp = max(rtemp,smlnum);
+ d__1 = 1. / rtemp;
+ zdscal_(&nv, &d__1, v, &c__1);
+ }
+/* L130: */
+ }
+
+/* Multiple-shift QR step */
+
+ i__2 = i__ - 1;
+ for (k = l; k <= i__2; ++k) {
+
+/*
+ The first iteration of this loop determines a reflection G
+ from the vector V and applies it from left and right to H,
+ thus creating a nonzero bulge below the subdiagonal.
+
+ Each subsequent iteration determines a reflection G to
+ restore the Hessenberg form in the (K-1)th column, and thus
+ chases the bulge one step toward the bottom of the active
+ submatrix. NR is the order of G.
+
+ Computing MIN
+*/
+ i__3 = ns + 1, i__5 = i__ - k + 1;
+ nr = min(i__3,i__5);
+ if (k > l) {
+ zcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ zlarfg_(&nr, v, &v[1], &c__1, &tau);
+ if (k > l) {
+ i__3 = k + (k - 1) * h_dim1;
+ h__[i__3].r = v[0].r, h__[i__3].i = v[0].i;
+ i__3 = i__;
+ for (ii = k + 1; ii <= i__3; ++ii) {
+ i__5 = ii + (k - 1) * h_dim1;
+ h__[i__5].r = 0., h__[i__5].i = 0.;
+/* L140: */
+ }
+ }
+ v[0].r = 1., v[0].i = 0.;
+
+/*
+ Apply G' from the left to transform the rows of the matrix
+ in columns K to I2.
+*/
+
+ i__3 = i2 - k + 1;
+ d_cnjg(&z__1, &tau);
+ zlarfx_("Left", &nr, &i__3, v, &z__1, &h__[k + k * h_dim1], ldh, &
+ work[1]);
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+NR,I).
+
+ Computing MIN
+*/
+ i__5 = k + nr;
+ i__3 = min(i__5,i__) - i1 + 1;
+ zlarfx_("Right", &i__3, &nr, v, &tau, &h__[i1 + k * h_dim1], ldh,
+ &work[1]);
+
+ if (wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ zlarfx_("Right", &nh, &nr, v, &tau, &z__[*ilo + k * z_dim1],
+ ldz, &work[1]);
+ }
+/* L150: */
+ }
+
+/* Ensure that H(I,I-1) is real. */
+
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ temp.r = h__[i__2].r, temp.i = h__[i__2].i;
+ if (d_imag(&temp) != 0.) {
+ d__1 = temp.r;
+ d__2 = d_imag(&temp);
+ rtemp = dlapy2_(&d__1, &d__2);
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ h__[i__2].r = rtemp, h__[i__2].i = 0.;
+ z__1.r = temp.r / rtemp, z__1.i = temp.i / rtemp;
+ temp.r = z__1.r, temp.i = z__1.i;
+ if (i2 > i__) {
+ i__2 = i2 - i__;
+ d_cnjg(&z__1, &temp);
+ zscal_(&i__2, &z__1, &h__[i__ + (i__ + 1) * h_dim1], ldh);
+ }
+ i__2 = i__ - i1;
+ zscal_(&i__2, &temp, &h__[i1 + i__ * h_dim1], &c__1);
+ if (wantz) {
+ zscal_(&nh, &temp, &z__[*ilo + i__ * z_dim1], &c__1);
+ }
+ }
+
+/* L160: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L170:
+
+/*
+ A submatrix of order <= MAXB in rows and columns L to I has split
+ off. Use the double-shift QR algorithm to handle it.
+*/
+
+ zlahqr_(&wantt, &wantz, n, &l, &i__, &h__[h_offset], ldh, &w[1], ilo, ihi,
+ &z__[z_offset], ldz, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/*
+ Decrement number of remaining iterations, and return to start of
+ the main loop with a new value of I.
+*/
+
+ itn -= its;
+ i__ = l - 1;
+ goto L60;
+
+L180:
+ i__1 = max(1,*n);
+ work[1].r = (doublereal) i__1, work[1].i = 0.;
+ return 0;
+
+/* End of ZHSEQR */
+
+} /* zhseqr_ */
+
+/* Subroutine */ int zlabrd_(integer *m, integer *n, integer *nb,
+ doublecomplex *a, integer *lda, doublereal *d__, doublereal *e,
+ doublecomplex *tauq, doublecomplex *taup, doublecomplex *x, integer *
+ ldx, doublecomplex *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__;
+ static doublecomplex alpha;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zlarfg_(integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *), zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLABRD reduces the first NB rows and columns of a complex general
+ m by n matrix A to upper or lower real bidiagonal form by a unitary
+ transformation Q' * A * P, and returns the matrices X and Y which
+ are needed to apply the transformation to the unreduced part of A.
+
+ If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower
+ bidiagonal form.
+
+ This is an auxiliary routine called by ZGEBRD
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A.
+
+ N (input) INTEGER
+ The number of columns in the matrix A.
+
+ NB (input) INTEGER
+ The number of leading rows and columns of A to be reduced.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the m by n general matrix to be reduced.
+ On exit, the first NB rows and columns of the matrix are
+ overwritten; the rest of the array is unchanged.
+ If m >= n, elements on and below the diagonal in the first NB
+ columns, with the array TAUQ, represent the unitary
+ matrix Q as a product of elementary reflectors; and
+ elements above the diagonal in the first NB rows, with the
+ array TAUP, represent the unitary matrix P as a product
+ of elementary reflectors.
+ If m < n, elements below the diagonal in the first NB
+ columns, with the array TAUQ, represent the unitary
+ matrix Q as a product of elementary reflectors, and
+ elements on and above the diagonal in the first NB rows,
+ with the array TAUP, represent the unitary matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) DOUBLE PRECISION array, dimension (NB)
+ The diagonal elements of the first NB rows and columns of
+ the reduced matrix. D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (NB)
+ The off-diagonal elements of the first NB rows and columns of
+ the reduced matrix.
+
+ TAUQ (output) COMPLEX*16 array dimension (NB)
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix Q. See Further Details.
+
+ TAUP (output) COMPLEX*16 array, dimension (NB)
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix P. See Further Details.
+
+ X (output) COMPLEX*16 array, dimension (LDX,NB)
+ The m-by-nb matrix X required to update the unreduced part
+ of A.
+
+ LDX (input) INTEGER
+ The leading dimension of the array X. LDX >= max(1,M).
+
+ Y (output) COMPLEX*16 array, dimension (LDY,NB)
+ The n-by-nb matrix Y required to update the unreduced part
+ of A.
+
+ LDY (output) INTEGER
+ The leading dimension of the array Y. LDY >= max(1,N).
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are complex scalars, and v and u are complex
+ vectors.
+
+ If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in
+ A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in
+ A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The elements of the vectors v and u together form the m-by-nb matrix
+ V and the nb-by-n matrix U' which are needed, with X and Y, to apply
+ the transformation to the unreduced part of the matrix, using a block
+ update of the form: A := A - V*Y' - X*U'.
+
+ The contents of A on exit are illustrated by the following examples
+ with nb = 2:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 )
+ ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 )
+ ( v1 v2 a a a ) ( v1 1 a a a a )
+ ( v1 v2 a a a ) ( v1 v2 a a a a )
+ ( v1 v2 a a a ) ( v1 v2 a a a a )
+ ( v1 v2 a a a )
+
+ where a denotes an element of the original matrix which is unchanged,
+ vi denotes an element of the vector defining H(i), and ui an element
+ of the vector defining G(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1 * 1;
+ x -= x_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1 * 1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:m,i) */
+
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + a_dim1], lda,
+ &y[i__ + y_dim1], ldy, &c_b60, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + x_dim1], ldx,
+ &a[i__ * a_dim1 + 1], &c__1, &c_b60, &a[i__ + i__ *
+ a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+1:m,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1, &
+ tauq[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ + (
+ i__ + 1) * a_dim1], lda, &a[i__ + i__ * a_dim1], &
+ c__1, &c_b59, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ +
+ a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b59, &
+ y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b60, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &x[i__ +
+ x_dim1], ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b59, &
+ y[i__ * y_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[(i__ +
+ 1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b60, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ zscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+
+/* Update A(i,i+1:n) */
+
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ zlacgv_(&i__, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__, &z__1, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b60, &a[i__ +
+ (i__ + 1) * a_dim1], lda);
+ zlacgv_(&i__, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &x[i__ + x_dim1], ldx);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[(i__ +
+ 1) * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b60,
+ &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &x[i__ + x_dim1], ldx);
+
+/* Generate reflection P(i) to annihilate A(i,i+2:n) */
+
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ zgemv_("No transpose", &i__2, &i__3, &c_b60, &a[i__ + 1 + (
+ i__ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &c_b59, &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__, &c_b60, &y[i__ + 1
+ + y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b59, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__, &z__1, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b60, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ zgemv_("No transpose", &i__2, &i__3, &c_b60, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b59, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b60, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ zscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i,i:n) */
+
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + y_dim1], ldy,
+ &a[i__ + a_dim1], lda, &c_b60, &a[i__ + i__ * a_dim1],
+ lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &x[i__ + x_dim1], ldx);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[i__ *
+ a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b60, &a[i__ +
+ i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &x[i__ + x_dim1], ldx);
+
+/* Generate reflection P(i) to annihilate A(i,i+1:n) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ if (i__ < *m) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ zgemv_("No transpose", &i__2, &i__3, &c_b60, &a[i__ + 1 + i__
+ * a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b59, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &y[i__ +
+ y_dim1], ldy, &a[i__ + i__ * a_dim1], lda, &c_b59, &x[
+ i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b60, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ zgemv_("No transpose", &i__2, &i__3, &c_b60, &a[i__ * a_dim1
+ + 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b59, &x[
+ i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b60, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ zscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+
+/* Update A(i+1:m,i) */
+
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 +
+ a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b60, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__, &z__1, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b60, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+2:m,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1,
+ &tauq[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ +
+ 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ *
+ a_dim1], &c__1, &c_b59, &y[i__ + 1 + i__ * y_dim1], &
+ c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ +
+ 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b59, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b60, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__, &c_b60, &x[i__ + 1
+ + x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b59, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__, &i__2, &z__1, &a[(i__ + 1)
+ * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b60, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ zscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+ } else {
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of ZLABRD */
+
+} /* zlabrd_ */
+
+/* Subroutine */ int zlacgv_(integer *n, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, ioff;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ ZLACGV conjugates a complex vector of length N.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The length of the vector X. N >= 0.
+
+ X (input/output) COMPLEX*16 array, dimension
+ (1+(N-1)*abs(INCX))
+ On entry, the vector of length N to be conjugated.
+ On exit, X is overwritten with conjg(X).
+
+ INCX (input) INTEGER
+ The spacing between successive elements of X.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*incx == 1) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ d_cnjg(&z__1, &x[i__]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L10: */
+ }
+ } else {
+ ioff = 1;
+ if (*incx < 0) {
+ ioff = 1 - (*n - 1) * *incx;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ioff;
+ d_cnjg(&z__1, &x[ioff]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ ioff += *incx;
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of ZLACGV */
+
+} /* zlacgv_ */
+
+/* Subroutine */ int zlacp2_(char *uplo, integer *m, integer *n, doublereal *
+ a, integer *lda, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZLACP2 copies all or part of a real two-dimensional matrix A to a
+ complex matrix B.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be copied to B.
+ = 'U': Upper triangular part
+ = 'L': Lower triangular part
+ Otherwise: All of the matrix A
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The m by n matrix A. If UPLO = 'U', only the upper trapezium
+ is accessed; if UPLO = 'L', only the lower trapezium is
+ accessed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (output) COMPLEX*16 array, dimension (LDB,N)
+ On exit, B = A in the locations specified by UPLO.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLACP2 */
+
+} /* zlacp2_ */
+
+/* Subroutine */ int zlacpy_(char *uplo, integer *m, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ ZLACPY copies all or part of a two-dimensional matrix A to another
+ matrix B.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be copied to B.
+ = 'U': Upper triangular part
+ = 'L': Lower triangular part
+ Otherwise: All of the matrix A
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input) COMPLEX*16 array, dimension (LDA,N)
+ The m by n matrix A. If UPLO = 'U', only the upper trapezium
+ is accessed; if UPLO = 'L', only the lower trapezium is
+ accessed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (output) COMPLEX*16 array, dimension (LDB,N)
+ On exit, B = A in the locations specified by UPLO.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLACPY */
+
+} /* zlacpy_ */
+
+/* Subroutine */ int zlacrm_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *b, integer *ldb, doublecomplex *c__,
+ integer *ldc, doublereal *rwork)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, a_dim1, a_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLACRM performs a very simple matrix-matrix multiplication:
+ C := A * B,
+ where A is M by N and complex; B is N by N and real;
+ C is M by N and complex.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A and of the matrix C.
+ M >= 0.
+
+ N (input) INTEGER
+ The number of columns and rows of the matrix B and
+ the number of columns of the matrix C.
+ N >= 0.
+
+ A (input) COMPLEX*16 array, dimension (LDA, N)
+ A contains the M by N matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >=max(1,M).
+
+ B (input) DOUBLE PRECISION array, dimension (LDB, N)
+ B contains the N by N matrix B.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >=max(1,N).
+
+ C (input) COMPLEX*16 array, dimension (LDC, N)
+ C contains the M by N matrix C.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >=max(1,N).
+
+ RWORK (workspace) DOUBLE PRECISION array, dimension (2*M*N)
+
+ =====================================================================
+
+
+ Quick return if possible.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --rwork;
+
+ /* Function Body */
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ rwork[(j - 1) * *m + i__] = a[i__3].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ l = *m * *n + 1;
+ dgemm_("N", "N", m, n, n, &c_b1015, &rwork[1], m, &b[b_offset], ldb, &
+ c_b324, &rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = l + (j - 1) * *m + i__ - 1;
+ c__[i__3].r = rwork[i__4], c__[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ rwork[(j - 1) * *m + i__] = d_imag(&a[i__ + j * a_dim1]);
+/* L50: */
+ }
+/* L60: */
+ }
+ dgemm_("N", "N", m, n, n, &c_b1015, &rwork[1], m, &b[b_offset], ldb, &
+ c_b324, &rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ d__1 = c__[i__4].r;
+ i__5 = l + (j - 1) * *m + i__ - 1;
+ z__1.r = d__1, z__1.i = rwork[i__5];
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+ return 0;
+
+/* End of ZLACRM */
+
+} /* zlacrm_ */
+
+/* Double Complex */ VOID zladiv_(doublecomplex * ret_val, doublecomplex *x,
+ doublecomplex *y)
+{
+ /* System generated locals */
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ static doublereal zi, zr;
+ extern /* Subroutine */ int dladiv_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ ZLADIV := X / Y, where X and Y are complex. The computation of X / Y
+ will not overflow on an intermediary step unless the results
+ overflows.
+
+ Arguments
+ =========
+
+ X (input) COMPLEX*16
+ Y (input) COMPLEX*16
+ The complex scalars X and Y.
+
+ =====================================================================
+*/
+
+
+ d__1 = x->r;
+ d__2 = d_imag(x);
+ d__3 = y->r;
+ d__4 = d_imag(y);
+ dladiv_(&d__1, &d__2, &d__3, &d__4, &zr, &zi);
+ z__1.r = zr, z__1.i = zi;
+ ret_val->r = z__1.r, ret_val->i = z__1.i;
+
+ return ;
+
+/* End of ZLADIV */
+
+} /* zladiv_ */
+
+/* Subroutine */ int zlaed0_(integer *qsiz, integer *n, doublereal *d__,
+ doublereal *e, doublecomplex *q, integer *ldq, doublecomplex *qstore,
+ integer *ldqs, doublereal *rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, k, ll, iq, lgn, msd2, smm1, spm1, spm2;
+ static doublereal temp;
+ static integer curr, iperm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer indxq, iwrem, iqptr, tlvls;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlaed7_(integer *, integer *,
+ integer *, integer *, integer *, integer *, doublereal *,
+ doublecomplex *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *,
+ doublereal *, doublecomplex *, doublereal *, integer *, integer *)
+ ;
+ static integer igivcl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlacrm_(integer *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, doublecomplex *, integer *,
+ doublereal *);
+ static integer igivnm, submat, curprb, subpbs, igivpt;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ static integer curlvl, matsiz, iprmpt, smlsiz;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ Using the divide and conquer method, ZLAED0 computes all eigenvalues
+ of a symmetric tridiagonal matrix which is one diagonal block of
+ those from reducing a dense or band Hermitian matrix and
+ corresponding eigenvectors of the dense or band matrix.
+
+ Arguments
+ =========
+
+ QSIZ (input) INTEGER
+ The dimension of the unitary matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, the eigenvalues in ascending order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N-1)
+ On entry, the off-diagonal elements of the tridiagonal matrix.
+ On exit, E has been destroyed.
+
+ Q (input/output) COMPLEX*16 array, dimension (LDQ,N)
+ On entry, Q must contain an QSIZ x N matrix whose columns
+ unitarily orthonormal. It is a part of the unitary matrix
+ that reduces the full dense Hermitian matrix to a
+ (reducible) symmetric tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ IWORK (workspace) INTEGER array,
+ the dimension of IWORK must be at least
+ 6 + 6*N + 5*N*lg N
+ ( lg( N ) = smallest integer k
+ such that 2^k >= N )
+
+ RWORK (workspace) DOUBLE PRECISION array,
+ dimension (1 + 3*N + 2*N*lg N + 3*N**2)
+ ( lg( N ) = smallest integer k
+ such that 2^k >= N )
+
+ QSTORE (workspace) COMPLEX*16 array, dimension (LDQS, N)
+ Used to store parts of
+ the eigenvector matrix when the updating matrix multiplies
+ take place.
+
+ LDQS (input) INTEGER
+ The leading dimension of the array QSTORE.
+ LDQS >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an eigenvalue while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through mod(INFO,N+1).
+
+ =====================================================================
+
+ Warning: N could be as big as QSIZ!
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ qstore_dim1 = *ldqs;
+ qstore_offset = 1 + qstore_dim1 * 1;
+ qstore -= qstore_offset;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+/*
+ IF( ICOMPQ .LT. 0 .OR. ICOMPQ .GT. 2 ) THEN
+ INFO = -1
+ ELSE IF( ( ICOMPQ .EQ. 1 ) .AND. ( QSIZ .LT. MAX( 0, N ) ) )
+ $ THEN
+*/
+ if (*qsiz < max(0,*n)) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ } else if (*ldqs < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLAED0", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "ZLAED0", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ Determine the size and placement of the submatrices, and save in
+ the leading elements of IWORK.
+*/
+
+ iwork[1] = *n;
+ subpbs = 1;
+ tlvls = 0;
+L10:
+ if (iwork[subpbs] > smlsiz) {
+ for (j = subpbs; j >= 1; --j) {
+ iwork[j * 2] = (iwork[j] + 1) / 2;
+ iwork[((j) << (1)) - 1] = iwork[j] / 2;
+/* L20: */
+ }
+ ++tlvls;
+ subpbs <<= 1;
+ goto L10;
+ }
+ i__1 = subpbs;
+ for (j = 2; j <= i__1; ++j) {
+ iwork[j] += iwork[j - 1];
+/* L30: */
+ }
+
+/*
+ Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1
+ using rank-1 modifications (cuts).
+*/
+
+ spm1 = subpbs - 1;
+ i__1 = spm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ submat = iwork[i__] + 1;
+ smm1 = submat - 1;
+ d__[smm1] -= (d__1 = e[smm1], abs(d__1));
+ d__[submat] -= (d__1 = e[smm1], abs(d__1));
+/* L40: */
+ }
+
+ indxq = ((*n) << (2)) + 3;
+
+/*
+ Set up workspaces for eigenvalues only/accumulate new vectors
+ routine
+*/
+
+ temp = log((doublereal) (*n)) / log(2.);
+ lgn = (integer) temp;
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ iprmpt = indxq + *n + 1;
+ iperm = iprmpt + *n * lgn;
+ iqptr = iperm + *n * lgn;
+ igivpt = iqptr + *n + 2;
+ igivcl = igivpt + *n * lgn;
+
+ igivnm = 1;
+ iq = igivnm + ((*n) << (1)) * lgn;
+/* Computing 2nd power */
+ i__1 = *n;
+ iwrem = iq + i__1 * i__1 + 1;
+/* Initialize pointers */
+ i__1 = subpbs;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ iwork[iprmpt + i__] = 1;
+ iwork[igivpt + i__] = 1;
+/* L50: */
+ }
+ iwork[iqptr] = 1;
+
+/*
+ Solve each submatrix eigenproblem at the bottom of the divide and
+ conquer tree.
+*/
+
+ curr = 0;
+ i__1 = spm1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[1];
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 1] - iwork[i__];
+ }
+ ll = iq - 1 + iwork[iqptr + curr];
+ dsteqr_("I", &matsiz, &d__[submat], &e[submat], &rwork[ll], &matsiz, &
+ rwork[1], info);
+ zlacrm_(qsiz, &matsiz, &q[submat * q_dim1 + 1], ldq, &rwork[ll], &
+ matsiz, &qstore[submat * qstore_dim1 + 1], ldqs, &rwork[iwrem]
+ );
+/* Computing 2nd power */
+ i__2 = matsiz;
+ iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2;
+ ++curr;
+ if (*info > 0) {
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+ return 0;
+ }
+ k = 1;
+ i__2 = iwork[i__ + 1];
+ for (j = submat; j <= i__2; ++j) {
+ iwork[indxq + j] = k;
+ ++k;
+/* L60: */
+ }
+/* L70: */
+ }
+
+/*
+ Successively merge eigensystems of adjacent submatrices
+ into eigensystem for the corresponding larger matrix.
+
+ while ( SUBPBS > 1 )
+*/
+
+ curlvl = 1;
+L80:
+ if (subpbs > 1) {
+ spm2 = subpbs - 2;
+ i__1 = spm2;
+ for (i__ = 0; i__ <= i__1; i__ += 2) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[2];
+ msd2 = iwork[1];
+ curprb = 0;
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 2] - iwork[i__];
+ msd2 = matsiz / 2;
+ ++curprb;
+ }
+
+/*
+ Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2)
+ into an eigensystem of size MATSIZ. ZLAED7 handles the case
+ when the eigenvectors of a full or band Hermitian matrix (which
+ was reduced to tridiagonal form) are desired.
+
+ I am free to use Q as a valuable working space until Loop 150.
+*/
+
+ zlaed7_(&matsiz, &msd2, qsiz, &tlvls, &curlvl, &curprb, &d__[
+ submat], &qstore[submat * qstore_dim1 + 1], ldqs, &e[
+ submat + msd2 - 1], &iwork[indxq + submat], &rwork[iq], &
+ iwork[iqptr], &iwork[iprmpt], &iwork[iperm], &iwork[
+ igivpt], &iwork[igivcl], &rwork[igivnm], &q[submat *
+ q_dim1 + 1], &rwork[iwrem], &iwork[subpbs + 1], info);
+ if (*info > 0) {
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+ return 0;
+ }
+ iwork[i__ / 2 + 1] = iwork[i__ + 2];
+/* L90: */
+ }
+ subpbs /= 2;
+ ++curlvl;
+ goto L80;
+ }
+
+/*
+ end while
+
+ Re-merge the eigenvalues/vectors which were deflated at the final
+ merge step.
+*/
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ rwork[i__] = d__[j];
+ zcopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1 + 1]
+ , &c__1);
+/* L100: */
+ }
+ dcopy_(n, &rwork[1], &c__1, &d__[1], &c__1);
+
+ return 0;
+
+/* End of ZLAED0 */
+
+} /* zlaed0_ */
+
+/* Subroutine */ int zlaed7_(integer *n, integer *cutpnt, integer *qsiz,
+ integer *tlvls, integer *curlvl, integer *curpbm, doublereal *d__,
+ doublecomplex *q, integer *ldq, doublereal *rho, integer *indxq,
+ doublereal *qstore, integer *qptr, integer *prmptr, integer *perm,
+ integer *givptr, integer *givcol, doublereal *givnum, doublecomplex *
+ work, doublereal *rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, k, n1, n2, iq, iw, iz, ptr, ind1, ind2, indx, curr,
+ indxc, indxp;
+ extern /* Subroutine */ int dlaed9_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *),
+ zlaed8_(integer *, integer *, integer *, doublecomplex *, integer
+ *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublereal *, integer *,
+ integer *, integer *, integer *, integer *, integer *,
+ doublereal *, integer *), dlaeda_(integer *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *);
+ static integer idlmda;
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *), zlacrm_(integer *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *, doublecomplex *, integer *, doublereal *
+ );
+ static integer coltyp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLAED7 computes the updated eigensystem of a diagonal
+ matrix after modification by a rank-one symmetric matrix. This
+ routine is used only for the eigenproblem which requires all
+ eigenvalues and optionally eigenvectors of a dense or banded
+ Hermitian matrix that has been reduced to tridiagonal form.
+
+ T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out)
+
+ where Z = Q'u, u is a vector of length N with ones in the
+ CUTPNT and CUTPNT + 1 th elements and zeros elsewhere.
+
+ The eigenvectors of the original matrix are stored in Q, and the
+ eigenvalues are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple eigenvalues or if there is a zero in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine DLAED2.
+
+ The second stage consists of calculating the updated
+ eigenvalues. This is done by finding the roots of the secular
+ equation via the routine DLAED4 (as called by SLAED3).
+ This routine also calculates the eigenvectors of the current
+ problem.
+
+ The final stage consists of computing the updated eigenvectors
+ directly using the updated eigenvalues. The eigenvectors for
+ the current problem are multiplied with the eigenvectors from
+ the overall problem.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ CUTPNT (input) INTEGER
+ Contains the location of the last eigenvalue in the leading
+ sub-matrix. min(1,N) <= CUTPNT <= N.
+
+ QSIZ (input) INTEGER
+ The dimension of the unitary matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N.
+
+ TLVLS (input) INTEGER
+ The total number of merging levels in the overall divide and
+ conquer tree.
+
+ CURLVL (input) INTEGER
+ The current level in the overall merge routine,
+ 0 <= curlvl <= tlvls.
+
+ CURPBM (input) INTEGER
+ The current problem in the current level in the overall
+ merge routine (counting from upper left to lower right).
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the eigenvalues of the rank-1-perturbed matrix.
+ On exit, the eigenvalues of the repaired matrix.
+
+ Q (input/output) COMPLEX*16 array, dimension (LDQ,N)
+ On entry, the eigenvectors of the rank-1-perturbed matrix.
+ On exit, the eigenvectors of the repaired tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ RHO (input) DOUBLE PRECISION
+ Contains the subdiagonal element used to create the rank-1
+ modification.
+
+ INDXQ (output) INTEGER array, dimension (N)
+ This contains the permutation which will reintegrate the
+ subproblem just solved back into sorted order,
+ ie. D( INDXQ( I = 1, N ) ) will be in ascending order.
+
+ IWORK (workspace) INTEGER array, dimension (4*N)
+
+ RWORK (workspace) DOUBLE PRECISION array,
+ dimension (3*N+2*QSIZ*N)
+
+ WORK (workspace) COMPLEX*16 array, dimension (QSIZ*N)
+
+ QSTORE (input/output) DOUBLE PRECISION array, dimension (N**2+1)
+ Stores eigenvectors of submatrices encountered during
+ divide and conquer, packed together. QPTR points to
+ beginning of the submatrices.
+
+ QPTR (input/output) INTEGER array, dimension (N+2)
+ List of indices pointing to beginning of submatrices stored
+ in QSTORE. The submatrices are numbered starting at the
+ bottom left of the divide and conquer tree, from left to
+ right and bottom to top.
+
+ PRMPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in PERM a
+ level's permutation is stored. PRMPTR(i+1) - PRMPTR(i)
+ indicates the size of the permutation and also the size of
+ the full, non-deflated problem.
+
+ PERM (input) INTEGER array, dimension (N lg N)
+ Contains the permutations (from deflation and sorting) to be
+ applied to each eigenblock.
+
+ GIVPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in GIVCOL a
+ level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i)
+ indicates the number of Givens rotations.
+
+ GIVCOL (input) INTEGER array, dimension (2, N lg N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ --indxq;
+ --qstore;
+ --qptr;
+ --prmptr;
+ --perm;
+ --givptr;
+ givcol -= 3;
+ givnum -= 3;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+/*
+ IF( ICOMPQ.LT.0 .OR. ICOMPQ.GT.1 ) THEN
+ INFO = -1
+ ELSE IF( N.LT.0 ) THEN
+*/
+ if (*n < 0) {
+ *info = -1;
+ } else if (min(1,*n) > *cutpnt || *n < *cutpnt) {
+ *info = -2;
+ } else if (*qsiz < *n) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLAED7", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in DLAED2 and SLAED3.
+*/
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq = iw + *n;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+/*
+ Form the z-vector which consists of the last row of Q_1 and the
+ first row of Q_2.
+*/
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *tlvls - i__;
+ ptr += pow_ii(&c__2, &i__2);
+/* L10: */
+ }
+ curr = ptr + *curpbm;
+ dlaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], &
+ givcol[3], &givnum[3], &qstore[1], &qptr[1], &rwork[iz], &rwork[
+ iz + *n], info);
+
+/*
+ When solving the final problem, we no longer need the stored data,
+ so we will overwrite the data from this level onto the previously
+ used storage space.
+*/
+
+ if (*curlvl == *tlvls) {
+ qptr[curr] = 1;
+ prmptr[curr] = 1;
+ givptr[curr] = 1;
+ }
+
+/* Sort and Deflate eigenvalues. */
+
+ zlaed8_(&k, n, qsiz, &q[q_offset], ldq, &d__[1], rho, cutpnt, &rwork[iz],
+ &rwork[idlmda], &work[1], qsiz, &rwork[iw], &iwork[indxp], &iwork[
+ indx], &indxq[1], &perm[prmptr[curr]], &givptr[curr + 1], &givcol[
+ ((givptr[curr]) << (1)) + 1], &givnum[((givptr[curr]) << (1)) + 1]
+ , info);
+ prmptr[curr + 1] = prmptr[curr] + *n;
+ givptr[curr + 1] += givptr[curr];
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ dlaed9_(&k, &c__1, &k, n, &d__[1], &rwork[iq], &k, rho, &rwork[idlmda]
+ , &rwork[iw], &qstore[qptr[curr]], &k, info);
+ zlacrm_(qsiz, &k, &work[1], qsiz, &qstore[qptr[curr]], &k, &q[
+ q_offset], ldq, &rwork[iq]);
+/* Computing 2nd power */
+ i__1 = k;
+ qptr[curr + 1] = qptr[curr] + i__1 * i__1;
+ if (*info != 0) {
+ return 0;
+ }
+
+/* Prepare the INDXQ sorting premutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ ind1 = 1;
+ ind2 = *n;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ qptr[curr + 1] = qptr[curr];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLAED7 */
+
+} /* zlaed7_ */
+
+/* Subroutine */ int zlaed8_(integer *k, integer *n, integer *qsiz,
+ doublecomplex *q, integer *ldq, doublereal *d__, doublereal *rho,
+ integer *cutpnt, doublereal *z__, doublereal *dlamda, doublecomplex *
+ q2, integer *ldq2, doublereal *w, integer *indxp, integer *indx,
+ integer *indxq, integer *perm, integer *givptr, integer *givcol,
+ doublereal *givnum, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal c__;
+ static integer i__, j;
+ static doublereal s, t;
+ static integer k2, n1, n2, jp, n1p1;
+ static doublereal eps, tau, tol;
+ static integer jlam, imax, jmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dcopy_(integer *, doublereal *, integer *, doublereal
+ *, integer *), zdrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *), zcopy_(
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *)
+ ;
+
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *), zlacpy_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLAED8 merges the two sets of eigenvalues together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ eigenvalues are close together or if there is a tiny element in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ Arguments
+ =========
+
+ K (output) INTEGER
+ Contains the number of non-deflated eigenvalues.
+ This is the order of the related secular equation.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ QSIZ (input) INTEGER
+ The dimension of the unitary matrix used to reduce
+ the dense or band matrix to tridiagonal form.
+ QSIZ >= N if ICOMPQ = 1.
+
+ Q (input/output) COMPLEX*16 array, dimension (LDQ,N)
+ On entry, Q contains the eigenvectors of the partially solved
+ system which has been previously updated in matrix
+ multiplies with other partially solved eigensystems.
+ On exit, Q contains the trailing (N-K) updated eigenvectors
+ (those which were deflated) in its last N-K columns.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max( 1, N ).
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, D contains the eigenvalues of the two submatrices to
+ be combined. On exit, D contains the trailing (N-K) updated
+ eigenvalues (those which were deflated) sorted into increasing
+ order.
+
+ RHO (input/output) DOUBLE PRECISION
+ Contains the off diagonal element associated with the rank-1
+ cut which originally split the two submatrices which are now
+ being recombined. RHO is modified during the computation to
+ the value required by DLAED3.
+
+ CUTPNT (input) INTEGER
+ Contains the location of the last eigenvalue in the leading
+ sub-matrix. MIN(1,N) <= CUTPNT <= N.
+
+ Z (input) DOUBLE PRECISION array, dimension (N)
+ On input this vector contains the updating vector (the last
+ row of the first sub-eigenvector matrix and the first row of
+ the second sub-eigenvector matrix). The contents of Z are
+ destroyed during the updating process.
+
+ DLAMDA (output) DOUBLE PRECISION array, dimension (N)
+ Contains a copy of the first K eigenvalues which will be used
+ by DLAED3 to form the secular equation.
+
+ Q2 (output) COMPLEX*16 array, dimension (LDQ2,N)
+ If ICOMPQ = 0, Q2 is not referenced. Otherwise,
+ Contains a copy of the first K eigenvectors which will be used
+ by DLAED7 in a matrix multiply (DGEMM) to update the new
+ eigenvectors.
+
+ LDQ2 (input) INTEGER
+ The leading dimension of the array Q2. LDQ2 >= max( 1, N ).
+
+ W (output) DOUBLE PRECISION array, dimension (N)
+ This will hold the first k values of the final
+ deflation-altered z-vector and will be passed to DLAED3.
+
+ INDXP (workspace) INTEGER array, dimension (N)
+ This will contain the permutation used to place deflated
+ values of D at the end of the array. On output INDXP(1:K)
+ points to the nondeflated D-values and INDXP(K+1:N)
+ points to the deflated eigenvalues.
+
+ INDX (workspace) INTEGER array, dimension (N)
+ This will contain the permutation used to sort the contents of
+ D into ascending order.
+
+ INDXQ (input) INTEGER array, dimension (N)
+ This contains the permutation which separately sorts the two
+ sub-problems in D into ascending order. Note that elements in
+ the second half of this permutation must first have CUTPNT
+ added to their values in order to be accurate.
+
+ PERM (output) INTEGER array, dimension (N)
+ Contains the permutations (from deflation and sorting) to be
+ applied to each eigenblock.
+
+ GIVPTR (output) INTEGER
+ Contains the number of Givens rotations which took place in
+ this subproblem.
+
+ GIVCOL (output) INTEGER array, dimension (2, N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (output) DOUBLE PRECISION array, dimension (2, N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1 * 1;
+ q -= q_offset;
+ --d__;
+ --z__;
+ --dlamda;
+ q2_dim1 = *ldq2;
+ q2_offset = 1 + q2_dim1 * 1;
+ q2 -= q2_offset;
+ --w;
+ --indxp;
+ --indx;
+ --indxq;
+ --perm;
+ givcol -= 3;
+ givnum -= 3;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -2;
+ } else if (*qsiz < *n) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -5;
+ } else if (*cutpnt < min(1,*n) || *cutpnt > *n) {
+ *info = -8;
+ } else if (*ldq2 < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLAED8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n1 = *cutpnt;
+ n2 = *n - n1;
+ n1p1 = n1 + 1;
+
+ if (*rho < 0.) {
+ dscal_(&n2, &c_b1294, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1 */
+
+ t = 1. / sqrt(2.);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ indx[j] = j;
+/* L10: */
+ }
+ dscal_(n, &t, &z__[1], &c__1);
+ *rho = (d__1 = *rho * 2., abs(d__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) {
+ indxq[i__] += *cutpnt;
+/* L20: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+ w[i__] = z__[indxq[i__]];
+/* L30: */
+ }
+ i__ = 1;
+ j = *cutpnt + 1;
+ dlamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = dlamda[indx[i__]];
+ z__[i__] = w[indx[i__]];
+/* L40: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ imax = idamax_(n, &z__[1], &c__1);
+ jmax = idamax_(n, &d__[1], &c__1);
+ eps = EPSILON;
+ tol = eps * 8. * (d__1 = d__[jmax], abs(d__1));
+
+/*
+ If the rank-1 modifier is small enough, no more needs to be done
+ -- except to reorganize Q so that its columns correspond with the
+ elements in D.
+*/
+
+ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) {
+ *k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+ zcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1]
+ , &c__1);
+/* L50: */
+ }
+ zlacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq);
+ return 0;
+ }
+
+/*
+ If there are multiple eigenvalues then the problem deflates. Here
+ the number of equal eigenvalues are found. As each equal
+ eigenvalue is found, an elementary reflector is computed to rotate
+ the corresponding eigensubspace so that the corresponding
+ components of Z are zero in this new basis.
+*/
+
+ *k = 0;
+ *givptr = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ if (j == *n) {
+ goto L100;
+ }
+ } else {
+ jlam = j;
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ ++j;
+ if (j > *n) {
+ goto L90;
+ }
+ if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[jlam];
+ c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = dlapy2_(&c__, &s);
+ t = d__[j] - d__[jlam];
+ c__ /= tau;
+ s = -s / tau;
+ if ((d__1 = t * c__ * s, abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[j] = tau;
+ z__[jlam] = 0.;
+
+/* Record the appropriate Givens rotation */
+
+ ++(*givptr);
+ givcol[((*givptr) << (1)) + 1] = indxq[indx[jlam]];
+ givcol[((*givptr) << (1)) + 2] = indxq[indx[j]];
+ givnum[((*givptr) << (1)) + 1] = c__;
+ givnum[((*givptr) << (1)) + 2] = s;
+ zdrot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[indxq[
+ indx[j]] * q_dim1 + 1], &c__1, &c__, &s);
+ t = d__[jlam] * c__ * c__ + d__[j] * s * s;
+ d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__;
+ d__[jlam] = t;
+ --k2;
+ i__ = 1;
+L80:
+ if (k2 + i__ <= *n) {
+ if (d__[jlam] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = jlam;
+ ++i__;
+ goto L80;
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ jlam = j;
+ } else {
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+ jlam = j;
+ }
+ }
+ goto L70;
+L90:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+
+L100:
+
+/*
+ Sort the eigenvalues and corresponding eigenvectors into DLAMDA
+ and Q2 respectively. The eigenvalues/vectors which were not
+ deflated go into the first K slots of DLAMDA and Q2 respectively,
+ while those which were deflated go into the last N - K slots.
+*/
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+ zcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1], &
+ c__1);
+/* L110: */
+ }
+
+/*
+ The deflated eigenvalues and their corresponding vectors go back
+ into the last N - K slots of D and Q respectively.
+*/
+
+ if (*k < *n) {
+ i__1 = *n - *k;
+ dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = *n - *k;
+ zlacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*k +
+ 1) * q_dim1 + 1], ldq);
+ }
+
+ return 0;
+
+/* End of ZLAED8 */
+
+} /* zlaed8_ */
+
+/* Subroutine */ int zlahqr_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, doublecomplex *h__, integer *ldh,
+ doublecomplex *w, integer *iloz, integer *ihiz, doublecomplex *z__,
+ integer *ldz, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void z_sqrt(doublecomplex *, doublecomplex *), d_cnjg(doublecomplex *,
+ doublecomplex *);
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, k, l, m;
+ static doublereal s;
+ static doublecomplex t, u, v[2], x, y;
+ static integer i1, i2;
+ static doublecomplex t1;
+ static doublereal t2;
+ static doublecomplex v2;
+ static doublereal h10;
+ static doublecomplex h11;
+ static doublereal h21;
+ static doublecomplex h22;
+ static integer nh, nz;
+ static doublecomplex h11s;
+ static integer itn, its;
+ static doublereal ulp;
+ static doublecomplex sum;
+ static doublereal tst1;
+ static doublecomplex temp;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ static doublereal rtemp, rwork[1];
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+
+ extern /* Subroutine */ int zlarfg_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *);
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+ extern doublereal zlanhs_(char *, integer *, doublecomplex *, integer *,
+ doublereal *);
+ static doublereal smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZLAHQR is an auxiliary routine called by ZHSEQR to update the
+ eigenvalues and Schur decomposition already computed by ZHSEQR, by
+ dealing with the Hessenberg submatrix in rows and columns ILO to IHI.
+
+ Arguments
+ =========
+
+ WANTT (input) LOGICAL
+ = .TRUE. : the full Schur form T is required;
+ = .FALSE.: only eigenvalues are required.
+
+ WANTZ (input) LOGICAL
+ = .TRUE. : the matrix of Schur vectors Z is required;
+ = .FALSE.: Schur vectors are not required.
+
+ N (input) INTEGER
+ The order of the matrix H. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that H is already upper triangular in rows and
+ columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless ILO = 1).
+ ZLAHQR works primarily with the Hessenberg submatrix in rows
+ and columns ILO to IHI, but applies transformations to all of
+ H if WANTT is .TRUE..
+ 1 <= ILO <= max(1,IHI); IHI <= N.
+
+ H (input/output) COMPLEX*16 array, dimension (LDH,N)
+ On entry, the upper Hessenberg matrix H.
+ On exit, if WANTT is .TRUE., H is upper triangular in rows
+ and columns ILO:IHI, with any 2-by-2 diagonal blocks in
+ standard form. If WANTT is .FALSE., the contents of H are
+ unspecified on exit.
+
+ LDH (input) INTEGER
+ The leading dimension of the array H. LDH >= max(1,N).
+
+ W (output) COMPLEX*16 array, dimension (N)
+ The computed eigenvalues ILO to IHI are stored in the
+ corresponding elements of W. If WANTT is .TRUE., the
+ eigenvalues are stored in the same order as on the diagonal
+ of the Schur form returned in H, with W(i) = H(i,i).
+
+ ILOZ (input) INTEGER
+ IHIZ (input) INTEGER
+ Specify the rows of Z to which transformations must be
+ applied if WANTZ is .TRUE..
+ 1 <= ILOZ <= ILO; IHI <= IHIZ <= N.
+
+ Z (input/output) COMPLEX*16 array, dimension (LDZ,N)
+ If WANTZ is .TRUE., on entry Z must contain the current
+ matrix Z of transformations accumulated by ZHSEQR, and on
+ exit Z has been updated; transformations are applied only to
+ the submatrix Z(ILOZ:IHIZ,ILO:IHI).
+ If WANTZ is .FALSE., Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = i, ZLAHQR failed to compute all the
+ eigenvalues ILO to IHI in a total of 30*(IHI-ILO+1)
+ iterations; elements i+1:ihi of W contain those
+ eigenvalues which have been successfully computed.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1 * 1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ i__1 = *ilo;
+ i__2 = *ilo + *ilo * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+ return 0;
+ }
+
+ nh = *ihi - *ilo + 1;
+ nz = *ihiz - *iloz + 1;
+
+/*
+ Set machine-dependent constants for the stopping criterion.
+ If norm(H) <= sqrt(OVFL), overflow should not occur.
+*/
+
+ ulp = PRECISION;
+ smlnum = SAFEMINIMUM / ulp;
+
+/*
+ I1 and I2 are the indices of the first row and last column of H
+ to which transformations must be applied. If eigenvalues only are
+ being computed, I1 and I2 are set inside the main loop.
+*/
+
+ if (*wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* ITN is the total number of QR iterations allowed. */
+
+ itn = nh * 30;
+
+/*
+ The main loop begins here. I is the loop index and decreases from
+ IHI to ILO in steps of 1. Each iteration of the loop works
+ with the active submatrix in rows and columns L to I.
+ Eigenvalues I+1 to IHI have already converged. Either L = ILO, or
+ H(L,L-1) is negligible so that the matrix splits.
+*/
+
+ i__ = *ihi;
+L10:
+ if (i__ < *ilo) {
+ goto L130;
+ }
+
+/*
+ Perform QR iterations on rows and columns ILO to I until a
+ submatrix of order 1 splits off at the bottom because a
+ subdiagonal element has become negligible.
+*/
+
+ l = *ilo;
+ i__1 = itn;
+ for (its = 0; its <= i__1; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__2 = l + 1;
+ for (k = i__; k >= i__2; --k) {
+ i__3 = k - 1 + (k - 1) * h_dim1;
+ i__4 = k + k * h_dim1;
+ tst1 = (d__1 = h__[i__3].r, abs(d__1)) + (d__2 = d_imag(&h__[k -
+ 1 + (k - 1) * h_dim1]), abs(d__2)) + ((d__3 = h__[i__4].r,
+ abs(d__3)) + (d__4 = d_imag(&h__[k + k * h_dim1]), abs(
+ d__4)));
+ if (tst1 == 0.) {
+ i__3 = i__ - l + 1;
+ tst1 = zlanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, rwork);
+ }
+ i__3 = k + (k - 1) * h_dim1;
+/* Computing MAX */
+ d__2 = ulp * tst1;
+ if ((d__1 = h__[i__3].r, abs(d__1)) <= max(d__2,smlnum)) {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible */
+
+ i__2 = l + (l - 1) * h_dim1;
+ h__[i__2].r = 0., h__[i__2].i = 0.;
+ }
+
+/* Exit from loop if a submatrix of order 1 has split off. */
+
+ if (l >= i__) {
+ goto L120;
+ }
+
+/*
+ Now the active submatrix is in rows and columns L to I. If
+ eigenvalues only are being computed, only the active submatrix
+ need be transformed.
+*/
+
+ if (! (*wantt)) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if (its == 10 || its == 20) {
+
+/* Exceptional shift. */
+
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ s = (d__1 = h__[i__2].r, abs(d__1)) * .75;
+ i__2 = i__ + i__ * h_dim1;
+ z__1.r = s + h__[i__2].r, z__1.i = h__[i__2].i;
+ t.r = z__1.r, t.i = z__1.i;
+ } else {
+
+/* Wilkinson's shift. */
+
+ i__2 = i__ + i__ * h_dim1;
+ t.r = h__[i__2].r, t.i = h__[i__2].i;
+ i__2 = i__ - 1 + i__ * h_dim1;
+ i__3 = i__ + (i__ - 1) * h_dim1;
+ d__1 = h__[i__3].r;
+ z__1.r = d__1 * h__[i__2].r, z__1.i = d__1 * h__[i__2].i;
+ u.r = z__1.r, u.i = z__1.i;
+ if (u.r != 0. || u.i != 0.) {
+ i__2 = i__ - 1 + (i__ - 1) * h_dim1;
+ z__2.r = h__[i__2].r - t.r, z__2.i = h__[i__2].i - t.i;
+ z__1.r = z__2.r * .5, z__1.i = z__2.i * .5;
+ x.r = z__1.r, x.i = z__1.i;
+ z__3.r = x.r * x.r - x.i * x.i, z__3.i = x.r * x.i + x.i *
+ x.r;
+ z__2.r = z__3.r + u.r, z__2.i = z__3.i + u.i;
+ z_sqrt(&z__1, &z__2);
+ y.r = z__1.r, y.i = z__1.i;
+ if (x.r * y.r + d_imag(&x) * d_imag(&y) < 0.) {
+ z__1.r = -y.r, z__1.i = -y.i;
+ y.r = z__1.r, y.i = z__1.i;
+ }
+ z__3.r = x.r + y.r, z__3.i = x.i + y.i;
+ zladiv_(&z__2, &u, &z__3);
+ z__1.r = t.r - z__2.r, z__1.i = t.i - z__2.i;
+ t.r = z__1.r, t.i = z__1.i;
+ }
+ }
+
+/* Look for two consecutive small subdiagonal elements. */
+
+ i__2 = l + 1;
+ for (m = i__ - 1; m >= i__2; --m) {
+
+/*
+ Determine the effect of starting the single-shift QR
+ iteration at row M, and see if this would make H(M,M-1)
+ negligible.
+*/
+
+ i__3 = m + m * h_dim1;
+ h11.r = h__[i__3].r, h11.i = h__[i__3].i;
+ i__3 = m + 1 + (m + 1) * h_dim1;
+ h22.r = h__[i__3].r, h22.i = h__[i__3].i;
+ z__1.r = h11.r - t.r, z__1.i = h11.i - t.i;
+ h11s.r = z__1.r, h11s.i = z__1.i;
+ i__3 = m + 1 + m * h_dim1;
+ h21 = h__[i__3].r;
+ s = (d__1 = h11s.r, abs(d__1)) + (d__2 = d_imag(&h11s), abs(d__2))
+ + abs(h21);
+ z__1.r = h11s.r / s, z__1.i = h11s.i / s;
+ h11s.r = z__1.r, h11s.i = z__1.i;
+ h21 /= s;
+ v[0].r = h11s.r, v[0].i = h11s.i;
+ v[1].r = h21, v[1].i = 0.;
+ i__3 = m + (m - 1) * h_dim1;
+ h10 = h__[i__3].r;
+ tst1 = ((d__1 = h11s.r, abs(d__1)) + (d__2 = d_imag(&h11s), abs(
+ d__2))) * ((d__3 = h11.r, abs(d__3)) + (d__4 = d_imag(&
+ h11), abs(d__4)) + ((d__5 = h22.r, abs(d__5)) + (d__6 =
+ d_imag(&h22), abs(d__6))));
+ if ((d__1 = h10 * h21, abs(d__1)) <= ulp * tst1) {
+ goto L50;
+ }
+/* L40: */
+ }
+ i__2 = l + l * h_dim1;
+ h11.r = h__[i__2].r, h11.i = h__[i__2].i;
+ i__2 = l + 1 + (l + 1) * h_dim1;
+ h22.r = h__[i__2].r, h22.i = h__[i__2].i;
+ z__1.r = h11.r - t.r, z__1.i = h11.i - t.i;
+ h11s.r = z__1.r, h11s.i = z__1.i;
+ i__2 = l + 1 + l * h_dim1;
+ h21 = h__[i__2].r;
+ s = (d__1 = h11s.r, abs(d__1)) + (d__2 = d_imag(&h11s), abs(d__2)) +
+ abs(h21);
+ z__1.r = h11s.r / s, z__1.i = h11s.i / s;
+ h11s.r = z__1.r, h11s.i = z__1.i;
+ h21 /= s;
+ v[0].r = h11s.r, v[0].i = h11s.i;
+ v[1].r = h21, v[1].i = 0.;
+L50:
+
+/* Single-shift QR step */
+
+ i__2 = i__ - 1;
+ for (k = m; k <= i__2; ++k) {
+
+/*
+ The first iteration of this loop determines a reflection G
+ from the vector V and applies it from left and right to H,
+ thus creating a nonzero bulge below the subdiagonal.
+
+ Each subsequent iteration determines a reflection G to
+ restore the Hessenberg form in the (K-1)th column, and thus
+ chases the bulge one step toward the bottom of the active
+ submatrix.
+
+ V(2) is always real before the call to ZLARFG, and hence
+ after the call T2 ( = T1*V(2) ) is also real.
+*/
+
+ if (k > m) {
+ zcopy_(&c__2, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ zlarfg_(&c__2, v, &v[1], &c__1, &t1);
+ if (k > m) {
+ i__3 = k + (k - 1) * h_dim1;
+ h__[i__3].r = v[0].r, h__[i__3].i = v[0].i;
+ i__3 = k + 1 + (k - 1) * h_dim1;
+ h__[i__3].r = 0., h__[i__3].i = 0.;
+ }
+ v2.r = v[1].r, v2.i = v[1].i;
+ z__1.r = t1.r * v2.r - t1.i * v2.i, z__1.i = t1.r * v2.i + t1.i *
+ v2.r;
+ t2 = z__1.r;
+
+/*
+ Apply G from the left to transform the rows of the matrix
+ in columns K to I2.
+*/
+
+ i__3 = i2;
+ for (j = k; j <= i__3; ++j) {
+ d_cnjg(&z__3, &t1);
+ i__4 = k + j * h_dim1;
+ z__2.r = z__3.r * h__[i__4].r - z__3.i * h__[i__4].i, z__2.i =
+ z__3.r * h__[i__4].i + z__3.i * h__[i__4].r;
+ i__5 = k + 1 + j * h_dim1;
+ z__4.r = t2 * h__[i__5].r, z__4.i = t2 * h__[i__5].i;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__4 = k + j * h_dim1;
+ i__5 = k + j * h_dim1;
+ z__1.r = h__[i__5].r - sum.r, z__1.i = h__[i__5].i - sum.i;
+ h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
+ i__4 = k + 1 + j * h_dim1;
+ i__5 = k + 1 + j * h_dim1;
+ z__2.r = sum.r * v2.r - sum.i * v2.i, z__2.i = sum.r * v2.i +
+ sum.i * v2.r;
+ z__1.r = h__[i__5].r - z__2.r, z__1.i = h__[i__5].i - z__2.i;
+ h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
+/* L60: */
+ }
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+2,I).
+
+ Computing MIN
+*/
+ i__4 = k + 2;
+ i__3 = min(i__4,i__);
+ for (j = i1; j <= i__3; ++j) {
+ i__4 = j + k * h_dim1;
+ z__2.r = t1.r * h__[i__4].r - t1.i * h__[i__4].i, z__2.i =
+ t1.r * h__[i__4].i + t1.i * h__[i__4].r;
+ i__5 = j + (k + 1) * h_dim1;
+ z__3.r = t2 * h__[i__5].r, z__3.i = t2 * h__[i__5].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__4 = j + k * h_dim1;
+ i__5 = j + k * h_dim1;
+ z__1.r = h__[i__5].r - sum.r, z__1.i = h__[i__5].i - sum.i;
+ h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
+ i__4 = j + (k + 1) * h_dim1;
+ i__5 = j + (k + 1) * h_dim1;
+ d_cnjg(&z__3, &v2);
+ z__2.r = sum.r * z__3.r - sum.i * z__3.i, z__2.i = sum.r *
+ z__3.i + sum.i * z__3.r;
+ z__1.r = h__[i__5].r - z__2.r, z__1.i = h__[i__5].i - z__2.i;
+ h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
+/* L70: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__3 = *ihiz;
+ for (j = *iloz; j <= i__3; ++j) {
+ i__4 = j + k * z_dim1;
+ z__2.r = t1.r * z__[i__4].r - t1.i * z__[i__4].i, z__2.i =
+ t1.r * z__[i__4].i + t1.i * z__[i__4].r;
+ i__5 = j + (k + 1) * z_dim1;
+ z__3.r = t2 * z__[i__5].r, z__3.i = t2 * z__[i__5].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__4 = j + k * z_dim1;
+ i__5 = j + k * z_dim1;
+ z__1.r = z__[i__5].r - sum.r, z__1.i = z__[i__5].i -
+ sum.i;
+ z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
+ i__4 = j + (k + 1) * z_dim1;
+ i__5 = j + (k + 1) * z_dim1;
+ d_cnjg(&z__3, &v2);
+ z__2.r = sum.r * z__3.r - sum.i * z__3.i, z__2.i = sum.r *
+ z__3.i + sum.i * z__3.r;
+ z__1.r = z__[i__5].r - z__2.r, z__1.i = z__[i__5].i -
+ z__2.i;
+ z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
+/* L80: */
+ }
+ }
+
+ if ((k == m && m > l)) {
+
+/*
+ If the QR step was started at row M > L because two
+ consecutive small subdiagonals were found, then extra
+ scaling must be performed to ensure that H(M,M-1) remains
+ real.
+*/
+
+ z__1.r = 1. - t1.r, z__1.i = 0. - t1.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ d__1 = z_abs(&temp);
+ z__1.r = temp.r / d__1, z__1.i = temp.i / d__1;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = m + 1 + m * h_dim1;
+ i__4 = m + 1 + m * h_dim1;
+ d_cnjg(&z__2, &temp);
+ z__1.r = h__[i__4].r * z__2.r - h__[i__4].i * z__2.i, z__1.i =
+ h__[i__4].r * z__2.i + h__[i__4].i * z__2.r;
+ h__[i__3].r = z__1.r, h__[i__3].i = z__1.i;
+ if (m + 2 <= i__) {
+ i__3 = m + 2 + (m + 1) * h_dim1;
+ i__4 = m + 2 + (m + 1) * h_dim1;
+ z__1.r = h__[i__4].r * temp.r - h__[i__4].i * temp.i,
+ z__1.i = h__[i__4].r * temp.i + h__[i__4].i *
+ temp.r;
+ h__[i__3].r = z__1.r, h__[i__3].i = z__1.i;
+ }
+ i__3 = i__;
+ for (j = m; j <= i__3; ++j) {
+ if (j != m + 1) {
+ if (i2 > j) {
+ i__4 = i2 - j;
+ zscal_(&i__4, &temp, &h__[j + (j + 1) * h_dim1],
+ ldh);
+ }
+ i__4 = j - i1;
+ d_cnjg(&z__1, &temp);
+ zscal_(&i__4, &z__1, &h__[i1 + j * h_dim1], &c__1);
+ if (*wantz) {
+ d_cnjg(&z__1, &temp);
+ zscal_(&nz, &z__1, &z__[*iloz + j * z_dim1], &
+ c__1);
+ }
+ }
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+
+/* Ensure that H(I,I-1) is real. */
+
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ temp.r = h__[i__2].r, temp.i = h__[i__2].i;
+ if (d_imag(&temp) != 0.) {
+ rtemp = z_abs(&temp);
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ h__[i__2].r = rtemp, h__[i__2].i = 0.;
+ z__1.r = temp.r / rtemp, z__1.i = temp.i / rtemp;
+ temp.r = z__1.r, temp.i = z__1.i;
+ if (i2 > i__) {
+ i__2 = i2 - i__;
+ d_cnjg(&z__1, &temp);
+ zscal_(&i__2, &z__1, &h__[i__ + (i__ + 1) * h_dim1], ldh);
+ }
+ i__2 = i__ - i1;
+ zscal_(&i__2, &temp, &h__[i1 + i__ * h_dim1], &c__1);
+ if (*wantz) {
+ zscal_(&nz, &temp, &z__[*iloz + i__ * z_dim1], &c__1);
+ }
+ }
+
+/* L110: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L120:
+
+/* H(I,I-1) is negligible: one eigenvalue has converged. */
+
+ i__1 = i__;
+ i__2 = i__ + i__ * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+
+/*
+ Decrement number of remaining iterations, and return to start of
+ the main loop with new value of I.
+*/
+
+ itn -= its;
+ i__ = l - 1;
+ goto L10;
+
+L130:
+ return 0;
+
+/* End of ZLAHQR */
+
+} /* zlahqr_ */
+
+/* Subroutine */ int zlahrd_(integer *n, integer *k, integer *nb,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *t,
+ integer *ldt, doublecomplex *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__;
+ static doublecomplex ei;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *), ztrmv_(char *, char *,
+ char *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zlarfg_(integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *),
+ zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZLAHRD reduces the first NB columns of a complex general n-by-(n-k+1)
+ matrix A so that elements below the k-th subdiagonal are zero. The
+ reduction is performed by a unitary similarity transformation
+ Q' * A * Q. The routine returns the matrices V and T which determine
+ Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T.
+
+ This is an auxiliary routine called by ZGEHRD.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A.
+
+ K (input) INTEGER
+ The offset for the reduction. Elements below the k-th
+ subdiagonal in the first NB columns are reduced to zero.
+
+ NB (input) INTEGER
+ The number of columns to be reduced.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N-K+1)
+ On entry, the n-by-(n-k+1) general matrix A.
+ On exit, the elements on and above the k-th subdiagonal in
+ the first NB columns are overwritten with the corresponding
+ elements of the reduced matrix; the elements below the k-th
+ subdiagonal, with the array TAU, represent the matrix Q as a
+ product of elementary reflectors. The other columns of A are
+ unchanged. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) COMPLEX*16 array, dimension (NB)
+ The scalar factors of the elementary reflectors. See Further
+ Details.
+
+ T (output) COMPLEX*16 array, dimension (LDT,NB)
+ The upper triangular matrix T.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= NB.
+
+ Y (output) COMPLEX*16 array, dimension (LDY,NB)
+ The n-by-nb matrix Y.
+
+ LDY (input) INTEGER
+ The leading dimension of the array Y. LDY >= max(1,N).
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of nb elementary reflectors
+
+ Q = H(1) H(2) . . . H(nb).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in
+ A(i+k+1:n,i), and tau in TAU(i).
+
+ The elements of the vectors v together form the (n-k+1)-by-nb matrix
+ V which is needed, with T and Y, to apply the transformation to the
+ unreduced part of the matrix, using an update of the form:
+ A := (I - V*T*V') * (A - Y*V').
+
+ The contents of A on exit are illustrated by the following example
+ with n = 7, k = 3 and nb = 2:
+
+ ( a h a a a )
+ ( a h a a a )
+ ( a h a a a )
+ ( h h a a a )
+ ( v1 h a a a )
+ ( v1 v2 a a a )
+ ( v1 v2 a a a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1 * 1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1 * 1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/*
+ Update A(1:n,i)
+
+ Compute i-th column of A - Y * V'
+*/
+
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+ i__2 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", n, &i__2, &z__1, &y[y_offset], ldy, &a[*k
+ + i__ - 1 + a_dim1], lda, &c_b60, &a[i__ * a_dim1 + 1], &
+ c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+
+/*
+ Apply I - V * T' * V' to this column (call it b) from the
+ left, using the last column of T as workspace
+
+ Let V = ( V1 ) and b = ( b1 ) (first I-1 rows)
+ ( V2 ) ( b2 )
+
+ where V1 is unit lower triangular
+
+ w := V1' * b1
+*/
+
+ i__2 = i__ - 1;
+ zcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ ztrmv_("Lower", "Conjugate transpose", "Unit", &i__2, &a[*k + 1 +
+ a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b60,
+ &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ ztrmv_("Upper", "Conjugate transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[*k + i__ + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1, &c_b60, &a[*k + i__ +
+ i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ ztrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
+ , lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zaxpy_(&i__2, &z__1, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__
+ * a_dim1], &c__1);
+
+ i__2 = *k + i__ - 1 + (i__ - 1) * a_dim1;
+ a[i__2].r = ei.r, a[i__2].i = ei.i;
+ }
+
+/*
+ Generate the elementary reflector H(i) to annihilate
+ A(k+i+1:n,i)
+*/
+
+ i__2 = *k + i__ + i__ * a_dim1;
+ ei.r = a[i__2].r, ei.i = a[i__2].i;
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ zlarfg_(&i__2, &ei, &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__])
+ ;
+ i__2 = *k + i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute Y(1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+ zgemv_("No transpose", n, &i__2, &c_b60, &a[(i__ + 1) * a_dim1 + 1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b59, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b59, &t[
+ i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", n, &i__2, &z__1, &y[y_offset], ldy, &t[i__ *
+ t_dim1 + 1], &c__1, &c_b60, &y[i__ * y_dim1 + 1], &c__1);
+ zscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);
+
+/* Compute T(1:i,i) */
+
+ i__2 = i__ - 1;
+ i__3 = i__;
+ z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
+ zscal_(&i__2, &z__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ ztrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ i__2 = i__ + i__ * t_dim1;
+ i__3 = i__;
+ t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;
+
+/* L10: */
+ }
+ i__1 = *k + *nb + *nb * a_dim1;
+ a[i__1].r = ei.r, a[i__1].i = ei.i;
+
+ return 0;
+
+/* End of ZLAHRD */
+
+} /* zlahrd_ */
+
+/* Subroutine */ int zlals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, doublecomplex *b, integer *ldb,
+ doublecomplex *bx, integer *ldbx, integer *perm, integer *givptr,
+ integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum,
+ doublereal *poles, doublereal *difl, doublereal *difr, doublereal *
+ z__, integer *k, doublereal *c__, doublereal *s, doublereal *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, difr_dim1, difr_offset, givnum_dim1,
+ givnum_offset, poles_dim1, poles_offset, b_dim1, b_offset,
+ bx_dim1, bx_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, m, n;
+ static doublereal dj;
+ static integer nlp1, jcol;
+ static doublereal temp;
+ static integer jrow;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ static doublereal diflj, difrj, dsigj;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), zdrot_(integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), xerbla_(char *, integer *);
+ static doublereal dsigjp;
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *
+ , integer *, integer *), zlacpy_(char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 1, 1999
+
+
+ Purpose
+ =======
+
+ ZLALS0 applies back the multiplying factors of either the left or the
+ right singular vector matrix of a diagonal matrix appended by a row
+ to the right hand side matrix B in solving the least squares problem
+ using the divide-and-conquer SVD approach.
+
+ For the left singular vector matrix, three types of orthogonal
+ matrices are involved:
+
+ (1L) Givens rotations: the number of such rotations is GIVPTR; the
+ pairs of columns/rows they were applied to are stored in GIVCOL;
+ and the C- and S-values of these rotations are stored in GIVNUM.
+
+ (2L) Permutation. The (NL+1)-st row of B is to be moved to the first
+ row, and for J=2:N, PERM(J)-th row of B is to be moved to the
+ J-th row.
+
+ (3L) The left singular vector matrix of the remaining matrix.
+
+ For the right singular vector matrix, four types of orthogonal
+ matrices are involved:
+
+ (1R) The right singular vector matrix of the remaining matrix.
+
+ (2R) If SQRE = 1, one extra Givens rotation to generate the right
+ null space.
+
+ (3R) The inverse transformation of (2L).
+
+ (4R) The inverse transformation of (1L).
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form:
+ = 0: Left singular vector matrix.
+ = 1: Right singular vector matrix.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input/output) COMPLEX*16 array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B. LDB must be at least
+ max(1,MAX( M, N ) ).
+
+ BX (workspace) COMPLEX*16 array, dimension ( LDBX, NRHS )
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ PERM (input) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) applied
+ to the two blocks.
+
+ GIVPTR (input) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of rows/columns
+ involved in a Givens rotation.
+
+ LDGCOL (input) INTEGER
+ The leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value used in the
+ corresponding Givens rotation.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of arrays DIFR, POLES and
+ GIVNUM, must be at least K.
+
+ POLES (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ On entry, POLES(1:K, 1) contains the new singular
+ values obtained from solving the secular equation, and
+ POLES(1:K, 2) is an array containing the poles in the secular
+ equation.
+
+ DIFL (input) DOUBLE PRECISION array, dimension ( K ).
+ On entry, DIFL(I) is the distance between I-th updated
+ (undeflated) singular value and the I-th (undeflated) old
+ singular value.
+
+ DIFR (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ).
+ On entry, DIFR(I, 1) contains the distances between I-th
+ updated (undeflated) singular value and the I+1-th
+ (undeflated) old singular value. And DIFR(I, 2) is the
+ normalizing factor for the I-th right singular vector.
+
+ Z (input) DOUBLE PRECISION array, dimension ( K )
+ Contain the components of the deflation-adjusted updating row
+ vector.
+
+ K (input) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ C (input) DOUBLE PRECISION
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (input) DOUBLE PRECISION
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ RWORK (workspace) DOUBLE PRECISION array, dimension
+ ( K*(1+NRHS) + 2*NRHS )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1 * 1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1 * 1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1 * 1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1 * 1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1 * 1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/*
+ Apply back orthogonal transformations from the left.
+
+ Step (1L): apply back the Givens rotations performed.
+*/
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ zdrot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
+ ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
+ i__ + ((givnum_dim1) << (1))], &givnum[i__ + givnum_dim1])
+ ;
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ zcopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ zcopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/*
+ Step (3L): apply the inverse of the left singular vector
+ matrix to BX.
+*/
+
+ if (*k == 1) {
+ zcopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.) {
+ zdscal_(nrhs, &c_b1294, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + ((poles_dim1) << (1))];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + ((poles_dim1) << (1))];
+ }
+ if (z__[j] == 0. || poles[j + ((poles_dim1) << (1))] == 0.) {
+ rwork[j] = 0.;
+ } else {
+ rwork[j] = -poles[j + ((poles_dim1) << (1))] * z__[j] /
+ diflj / (poles[j + ((poles_dim1) << (1))] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))]
+ == 0.) {
+ rwork[i__] = 0.;
+ } else {
+ rwork[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
+ i__] / (dlamc3_(&poles[i__ + ((poles_dim1) <<
+ (1))], &dsigj) - diflj) / (poles[i__ + ((
+ poles_dim1) << (1))] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))]
+ == 0.) {
+ rwork[i__] = 0.;
+ } else {
+ rwork[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
+ i__] / (dlamc3_(&poles[i__ + ((poles_dim1) <<
+ (1))], &dsigjp) + difrj) / (poles[i__ + ((
+ poles_dim1) << (1))] + dj);
+ }
+/* L40: */
+ }
+ rwork[1] = -1.;
+ temp = dnrm2_(k, &rwork[1], &c__1);
+
+/*
+ Since B and BX are complex, the following call to DGEMV
+ is performed in two steps (real and imaginary parts).
+
+ CALL DGEMV( 'T', K, NRHS, ONE, BX, LDBX, WORK, 1, ZERO,
+ $ B( J, 1 ), LDB )
+*/
+
+ i__ = *k + ((*nrhs) << (1));
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ i__4 = jrow + jcol * bx_dim1;
+ rwork[i__] = bx[i__4].r;
+/* L50: */
+ }
+/* L60: */
+ }
+ dgemv_("T", k, nrhs, &c_b1015, &rwork[*k + 1 + ((*nrhs) << (1)
+ )], k, &rwork[1], &c__1, &c_b324, &rwork[*k + 1], &
+ c__1);
+ i__ = *k + ((*nrhs) << (1));
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ rwork[i__] = d_imag(&bx[jrow + jcol * bx_dim1]);
+/* L70: */
+ }
+/* L80: */
+ }
+ dgemv_("T", k, nrhs, &c_b1015, &rwork[*k + 1 + ((*nrhs) << (1)
+ )], k, &rwork[1], &c__1, &c_b324, &rwork[*k + 1 + *
+ nrhs], &c__1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = j + jcol * b_dim1;
+ i__4 = jcol + *k;
+ i__5 = jcol + *k + *nrhs;
+ z__1.r = rwork[i__4], z__1.i = rwork[i__5];
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L90: */
+ }
+ zlascl_("G", &c__0, &c__0, &temp, &c_b1015, &c__1, nrhs, &b[j
+ + b_dim1], ldb, info);
+/* L100: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ zlacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/*
+ Apply back the right orthogonal transformations.
+
+ Step (1R): apply back the new right singular vector matrix
+ to B.
+*/
+
+ if (*k == 1) {
+ zcopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + ((poles_dim1) << (1))];
+ if (z__[j] == 0.) {
+ rwork[j] = 0.;
+ } else {
+ rwork[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + ((difr_dim1) << (1))];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.) {
+ rwork[i__] = 0.;
+ } else {
+ d__1 = -poles[i__ + 1 + ((poles_dim1) << (1))];
+ rwork[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + ((difr_dim1) << (1))
+ ];
+ }
+/* L110: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.) {
+ rwork[i__] = 0.;
+ } else {
+ d__1 = -poles[i__ + ((poles_dim1) << (1))];
+ rwork[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + ((difr_dim1) << (1))];
+ }
+/* L120: */
+ }
+
+/*
+ Since B and BX are complex, the following call to DGEMV
+ is performed in two steps (real and imaginary parts).
+
+ CALL DGEMV( 'T', K, NRHS, ONE, B, LDB, WORK, 1, ZERO,
+ $ BX( J, 1 ), LDBX )
+*/
+
+ i__ = *k + ((*nrhs) << (1));
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[i__] = b[i__4].r;
+/* L130: */
+ }
+/* L140: */
+ }
+ dgemv_("T", k, nrhs, &c_b1015, &rwork[*k + 1 + ((*nrhs) << (1)
+ )], k, &rwork[1], &c__1, &c_b324, &rwork[*k + 1], &
+ c__1);
+ i__ = *k + ((*nrhs) << (1));
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ rwork[i__] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L150: */
+ }
+/* L160: */
+ }
+ dgemv_("T", k, nrhs, &c_b1015, &rwork[*k + 1 + ((*nrhs) << (1)
+ )], k, &rwork[1], &c__1, &c_b324, &rwork[*k + 1 + *
+ nrhs], &c__1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = j + jcol * bx_dim1;
+ i__4 = jcol + *k;
+ i__5 = jcol + *k + *nrhs;
+ z__1.r = rwork[i__4], z__1.i = rwork[i__5];
+ bx[i__3].r = z__1.r, bx[i__3].i = z__1.i;
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+
+/*
+ Step (2R): if SQRE = 1, apply back the rotation that is
+ related to the right null space of the subproblem.
+*/
+
+ if (*sqre == 1) {
+ zcopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ zdrot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ zlacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ zcopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ zcopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ zcopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L190: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ d__1 = -givnum[i__ + givnum_dim1];
+ zdrot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
+ ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
+ i__ + ((givnum_dim1) << (1))], &d__1);
+/* L200: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLALS0 */
+
+} /* zlals0_ */
+
+/* Subroutine */ int zlalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, doublecomplex *b, integer *ldb, doublecomplex *bx,
+ integer *ldbx, doublereal *u, integer *ldu, doublereal *vt, integer *
+ k, doublereal *difl, doublereal *difr, doublereal *z__, doublereal *
+ poles, integer *givptr, integer *givcol, integer *ldgcol, integer *
+ perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal *
+ rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
+ difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
+ z_dim1, z_offset, b_dim1, b_offset, bx_dim1, bx_offset, i__1,
+ i__2, i__3, i__4, i__5, i__6;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl,
+ ndb1, nlp1, lvl2, nrp1, jcol, nlvl, sqre, jrow, jimag;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer jreal, inode, ndiml, ndimr;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlals0_(integer *, integer *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), dlasdt_(integer *, integer *, integer *
+ , integer *, integer *, integer *, integer *), xerbla_(char *,
+ integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZLALSA is an itermediate step in solving the least squares problem
+ by computing the SVD of the coefficient matrix in compact form (The
+ singular vectors are computed as products of simple orthorgonal
+ matrices.).
+
+ If ICOMPQ = 0, ZLALSA applies the inverse of the left singular vector
+ matrix of an upper bidiagonal matrix to the right hand side; and if
+ ICOMPQ = 1, ZLALSA applies the right singular vector matrix to the
+ right hand side. The singular vector matrices were generated in
+ compact form by ZLALSA.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether the left or the right singular vector
+ matrix is involved.
+ = 0: Left singular vector matrix
+ = 1: Right singular vector matrix
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The row and column dimensions of the upper bidiagonal matrix.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input) COMPLEX*16 array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,MAX( M, N ) ).
+
+ BX (output) COMPLEX*16 array, dimension ( LDBX, NRHS )
+ On exit, the result of applying the left or right singular
+ vector matrix to B.
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ U (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ).
+ On entry, U contains the left singular vector matrices of all
+ subproblems at the bottom level.
+
+ LDU (input) INTEGER, LDU = > N.
+ The leading dimension of arrays U, VT, DIFL, DIFR,
+ POLES, GIVNUM, and Z.
+
+ VT (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ).
+ On entry, VT' contains the right singular vector matrices of
+ all subproblems at the bottom level.
+
+ K (input) INTEGER array, dimension ( N ).
+
+ DIFL (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ).
+ where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1.
+
+ DIFR (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
+ On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record
+ distances between singular values on the I-th level and
+ singular values on the (I -1)-th level, and DIFR(*, 2 * I)
+ record the normalizing factors of the right singular vectors
+ matrices of subproblems on I-th level.
+
+ Z (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ).
+ On entry, Z(1, I) contains the components of the deflation-
+ adjusted updating row vector for subproblems on the I-th
+ level.
+
+ POLES (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
+ On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old
+ singular values involved in the secular equations on the I-th
+ level.
+
+ GIVPTR (input) INTEGER array, dimension ( N ).
+ On entry, GIVPTR( I ) records the number of Givens
+ rotations performed on the I-th problem on the computation
+ tree.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ).
+ On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the
+ locations of Givens rotations performed on the I-th level on
+ the computation tree.
+
+ LDGCOL (input) INTEGER, LDGCOL = > N.
+ The leading dimension of arrays GIVCOL and PERM.
+
+ PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ).
+ On entry, PERM(*, I) records permutations done on the I-th
+ level of the computation tree.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
+ On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S-
+ values of Givens rotations performed on the I-th level on the
+ computation tree.
+
+ C (input) DOUBLE PRECISION array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ C( I ) contains the C-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ S (input) DOUBLE PRECISION array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ S( I ) contains the S-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ RWORK (workspace) DOUBLE PRECISION array, dimension at least
+ max ( N, (SMLSZ+1)*NRHS*3 ).
+
+ IWORK (workspace) INTEGER array.
+ The dimension must be at least 3 * N
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1 * 1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1 * 1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1 * 1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1 * 1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1 * 1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1 * 1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1 * 1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1 * 1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1 * 1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ The following code applies back the left singular vector factors.
+ For applying back the right singular vector factors, go to 170.
+*/
+
+ if (*icompq == 1) {
+ goto L170;
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by DLASDQ. The corresponding left and right singular vector
+ matrices are in explicit form. First apply back the left
+ singular vector matrices.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+
+/*
+ Since B and BX are complex, the following call to DGEMM
+ is performed in two steps (real and imaginary parts).
+
+ CALL DGEMM( 'T', 'N', NL, NRHS, NL, ONE, U( NLF, 1 ), LDU,
+ $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX )
+*/
+
+ j = (nl * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L10: */
+ }
+/* L20: */
+ }
+ dgemm_("T", "N", &nl, nrhs, &nl, &c_b1015, &u[nlf + u_dim1], ldu, &
+ rwork[((nl * *nrhs) << (1)) + 1], &nl, &c_b324, &rwork[1], &
+ nl);
+ j = (nl * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L30: */
+ }
+/* L40: */
+ }
+ dgemm_("T", "N", &nl, nrhs, &nl, &c_b1015, &u[nlf + u_dim1], ldu, &
+ rwork[((nl * *nrhs) << (1)) + 1], &nl, &c_b324, &rwork[nl * *
+ nrhs + 1], &nl);
+ jreal = 0;
+ jimag = nl * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ bx[i__4].r = z__1.r, bx[i__4].i = z__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+
+/*
+ Since B and BX are complex, the following call to DGEMM
+ is performed in two steps (real and imaginary parts).
+
+ CALL DGEMM( 'T', 'N', NR, NRHS, NR, ONE, U( NRF, 1 ), LDU,
+ $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX )
+*/
+
+ j = (nr * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L70: */
+ }
+/* L80: */
+ }
+ dgemm_("T", "N", &nr, nrhs, &nr, &c_b1015, &u[nrf + u_dim1], ldu, &
+ rwork[((nr * *nrhs) << (1)) + 1], &nr, &c_b324, &rwork[1], &
+ nr);
+ j = (nr * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L90: */
+ }
+/* L100: */
+ }
+ dgemm_("T", "N", &nr, nrhs, &nr, &c_b1015, &u[nrf + u_dim1], ldu, &
+ rwork[((nr * *nrhs) << (1)) + 1], &nr, &c_b324, &rwork[nr * *
+ nrhs + 1], &nr);
+ jreal = 0;
+ jimag = nr * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ bx[i__4].r = z__1.r, bx[i__4].i = z__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* L130: */
+ }
+
+/*
+ Next copy the rows of B that correspond to unchanged rows
+ in the bidiagonal matrix to BX.
+*/
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ zcopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L140: */
+ }
+
+/*
+ Finally go through the left singular vector matrices of all
+ the other subproblems bottom-up on the tree.
+*/
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ find the first node LF and last node LL on
+ the current level LVL
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ zlals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &rwork[1], info);
+/* L150: */
+ }
+/* L160: */
+ }
+ goto L330;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L170:
+
+/*
+ First now go through the right singular vector matrices of all
+ the tree nodes top-down.
+*/
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ Find the first node LF and last node LL on
+ the current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ zlals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &rwork[1], info);
+/* L180: */
+ }
+/* L190: */
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by DLASDQ. The corresponding right singular vector
+ matrices are in explicit form. Apply them back.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+
+/*
+ Since B and BX are complex, the following call to DGEMM is
+ performed in two steps (real and imaginary parts).
+
+ CALL DGEMM( 'T', 'N', NLP1, NRHS, NLP1, ONE, VT( NLF, 1 ), LDU,
+ $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX )
+*/
+
+ j = (nlp1 * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L200: */
+ }
+/* L210: */
+ }
+ dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b1015, &vt[nlf + vt_dim1],
+ ldu, &rwork[((nlp1 * *nrhs) << (1)) + 1], &nlp1, &c_b324, &
+ rwork[1], &nlp1);
+ j = (nlp1 * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L220: */
+ }
+/* L230: */
+ }
+ dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b1015, &vt[nlf + vt_dim1],
+ ldu, &rwork[((nlp1 * *nrhs) << (1)) + 1], &nlp1, &c_b324, &
+ rwork[nlp1 * *nrhs + 1], &nlp1);
+ jreal = 0;
+ jimag = nlp1 * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ bx[i__4].r = z__1.r, bx[i__4].i = z__1.i;
+/* L240: */
+ }
+/* L250: */
+ }
+
+/*
+ Since B and BX are complex, the following call to DGEMM is
+ performed in two steps (real and imaginary parts).
+
+ CALL DGEMM( 'T', 'N', NRP1, NRHS, NRP1, ONE, VT( NRF, 1 ), LDU,
+ $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX )
+*/
+
+ j = (nrp1 * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L260: */
+ }
+/* L270: */
+ }
+ dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b1015, &vt[nrf + vt_dim1],
+ ldu, &rwork[((nrp1 * *nrhs) << (1)) + 1], &nrp1, &c_b324, &
+ rwork[1], &nrp1);
+ j = (nrp1 * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L280: */
+ }
+/* L290: */
+ }
+ dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b1015, &vt[nrf + vt_dim1],
+ ldu, &rwork[((nrp1 * *nrhs) << (1)) + 1], &nrp1, &c_b324, &
+ rwork[nrp1 * *nrhs + 1], &nrp1);
+ jreal = 0;
+ jimag = nrp1 * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ bx[i__4].r = z__1.r, bx[i__4].i = z__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+
+/* L320: */
+ }
+
+L330:
+
+ return 0;
+
+/* End of ZLALSA */
+
+} /* zlalsa_ */
+
+/* Subroutine */ int zlalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, doublereal *d__, doublereal *e, doublecomplex *b, integer *ldb,
+ doublereal *rcond, integer *rank, doublecomplex *work, doublereal *
+ rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), log(doublereal), d_sign(doublereal *,
+ doublereal *);
+
+ /* Local variables */
+ static integer c__, i__, j, k;
+ static doublereal r__;
+ static integer s, u, z__;
+ static doublereal cs;
+ static integer bx;
+ static doublereal sn;
+ static integer st, vt, nm1, st1;
+ static doublereal eps;
+ static integer iwk;
+ static doublereal tol;
+ static integer difl, difr, jcol, irwb, perm, nsub, nlvl, sqre, bxst, jrow,
+ irwu, jimag;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer jreal, irwib, poles, sizei, irwrb, nsize;
+ extern /* Subroutine */ int zdrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *), zcopy_(
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *)
+ ;
+ static integer irwvt, icmpq1, icmpq2;
+
+ extern /* Subroutine */ int dlasda_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaset_(char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *), xerbla_(char *, integer *);
+ static integer givcol;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int zlalsa_(integer *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *), zlascl_(char *, integer *,
+ integer *, doublereal *, doublereal *, integer *, integer *,
+ doublecomplex *, integer *, integer *), dlasrt_(char *,
+ integer *, doublereal *, integer *), zlacpy_(char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zlaset_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *);
+ static doublereal orgnrm;
+ static integer givnum, givptr, nrwork, irwwrk, smlszp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ ZLALSD uses the singular value decomposition of A to solve the least
+ squares problem of finding X to minimize the Euclidean norm of each
+ column of A*X-B, where A is N-by-N upper bidiagonal, and X and B
+ are N-by-NRHS. The solution X overwrites B.
+
+ The singular values of A smaller than RCOND times the largest
+ singular value are treated as zero in solving the least squares
+ problem; in this case a minimum norm solution is returned.
+ The actual singular values are returned in D in ascending order.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': D and E define an upper bidiagonal matrix.
+ = 'L': D and E define a lower bidiagonal matrix.
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The dimension of the bidiagonal matrix. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of columns of B. NRHS must be at least 1.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry D contains the main diagonal of the bidiagonal
+ matrix. On exit, if INFO = 0, D contains its singular values.
+
+ E (input) DOUBLE PRECISION array, dimension (N-1)
+ Contains the super-diagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ B (input/output) COMPLEX*16 array, dimension (LDB,NRHS)
+ On input, B contains the right hand sides of the least
+ squares problem. On output, B contains the solution X.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,N).
+
+ RCOND (input) DOUBLE PRECISION
+ The singular values of A less than or equal to RCOND times
+ the largest singular value are treated as zero in solving
+ the least squares problem. If RCOND is negative,
+ machine precision is used instead.
+ For example, if diag(S)*X=B were the least squares problem,
+ where diag(S) is a diagonal matrix of singular values, the
+ solution would be X(i) = B(i) / S(i) if S(i) is greater than
+ RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to
+ RCOND*max(S).
+
+ RANK (output) INTEGER
+ The number of singular values of A greater than RCOND times
+ the largest singular value.
+
+ WORK (workspace) COMPLEX*16 array, dimension at least
+ (N * NRHS).
+
+ RWORK (workspace) DOUBLE PRECISION array, dimension at least
+ (9*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + (SMLSIZ+1)**2),
+ where
+ NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
+
+ IWORK (workspace) INTEGER array, dimension at least
+ (3*N*NLVL + 11*N).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an singular value while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through MOD(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < 1 || *ldb < *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLALSD", &i__1);
+ return 0;
+ }
+
+ eps = EPSILON;
+
+/* Set up the tolerance. */
+
+ if (*rcond <= 0. || *rcond >= 1.) {
+ *rcond = eps;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.) {
+ zlaset_("A", &c__1, nrhs, &c_b59, &c_b59, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ zlascl_("G", &c__0, &c__0, &d__[1], &c_b1015, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = abs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ zdrot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ rwork[((i__) << (1)) - 1] = cs;
+ rwork[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = rwork[((j) << (1)) - 1];
+ sn = rwork[j * 2];
+ zdrot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__
+ * b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ zlaset_("A", n, nrhs, &c_b59, &c_b59, &b[b_offset], ldb);
+ return 0;
+ }
+
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, n, &c__1, &d__[1], n, info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/*
+ If N is smaller than the minimum divide size SMLSIZ, then solve
+ the problem with another solver.
+*/
+
+ if (*n <= *smlsiz) {
+ irwu = 1;
+ irwvt = irwu + *n * *n;
+ irwwrk = irwvt + *n * *n;
+ irwrb = irwwrk;
+ irwib = irwrb + *n * *nrhs;
+ irwb = irwib + *n * *nrhs;
+ dlaset_("A", n, n, &c_b324, &c_b1015, &rwork[irwu], n);
+ dlaset_("A", n, n, &c_b324, &c_b1015, &rwork[irwvt], n);
+ dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &rwork[irwvt], n,
+ &rwork[irwu], n, &rwork[irwwrk], &c__1, &rwork[irwwrk], info);
+ if (*info != 0) {
+ return 0;
+ }
+
+/*
+ In the real version, B is passed to DLASDQ and multiplied
+ internally by Q'. Here B is complex and that product is
+ computed below in two steps (real and imaginary parts).
+*/
+
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ i__3 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__3].r;
+/* L40: */
+ }
+/* L50: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b1015, &rwork[irwu], n, &rwork[irwb],
+ n, &c_b324, &rwork[irwrb], n);
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b1015, &rwork[irwu], n, &rwork[irwb],
+ n, &c_b324, &rwork[irwib], n);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__3 = jrow + jcol * b_dim1;
+ i__4 = jreal;
+ i__5 = jimag;
+ z__1.r = rwork[i__4], z__1.i = rwork[i__5];
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ zlaset_("A", &c__1, nrhs, &c_b59, &c_b59, &b[i__ + b_dim1],
+ ldb);
+ } else {
+ zlascl_("G", &c__0, &c__0, &d__[i__], &c_b1015, &c__1, nrhs, &
+ b[i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L100: */
+ }
+
+/*
+ Since B is complex, the following call to DGEMM is performed
+ in two steps (real and imaginary parts). That is for V * B
+ (in the real version of the code V' is stored in WORK).
+
+ CALL DGEMM( 'T', 'N', N, NRHS, N, ONE, WORK, N, B, LDB, ZERO,
+ $ WORK( NWORK ), N )
+*/
+
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ i__3 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__3].r;
+/* L110: */
+ }
+/* L120: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b1015, &rwork[irwvt], n, &rwork[irwb],
+ n, &c_b324, &rwork[irwrb], n);
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L130: */
+ }
+/* L140: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b1015, &rwork[irwvt], n, &rwork[irwb],
+ n, &c_b324, &rwork[irwib], n);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__3 = jrow + jcol * b_dim1;
+ i__4 = jreal;
+ i__5 = jimag;
+ z__1.r = rwork[i__4], z__1.i = rwork[i__5];
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b1015, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ dlasrt_("D", n, &d__[1], info);
+ zlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((doublereal) (*n) / (doublereal) (*smlsiz + 1)) /
+ log(2.)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + ((nlvl * *n) << (1));
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + ((nlvl) << (1)) * *n;
+ nrwork = givnum + ((nlvl) << (1)) * *n;
+ bx = 1;
+
+ irwrb = nrwork;
+ irwib = irwrb + *smlsiz * *nrhs;
+ irwb = irwib + *smlsiz * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + ((nlvl * *n) << (1));
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) < eps) {
+ d__[i__] = d_sign(&eps, &d__[i__]);
+ }
+/* L170: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/*
+ Subproblem found. First determine its size and then
+ apply divide and conquer on it.
+*/
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((d__1 = e[i__], abs(d__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/*
+ A subproblem with E(NM1) small. This implies an
+ 1-by-1 subproblem at D(N), which is not solved
+ explicitly.
+*/
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ zcopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/*
+ This is a 1-by-1 subproblem and is not solved
+ explicitly.
+*/
+
+ zcopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by DLASDQ. */
+
+ dlaset_("A", &nsize, &nsize, &c_b324, &c_b1015, &rwork[vt +
+ st1], n);
+ dlaset_("A", &nsize, &nsize, &c_b324, &c_b1015, &rwork[u +
+ st1], n);
+ dlasdq_("U", &c__0, &nsize, &nsize, &nsize, &c__0, &d__[st], &
+ e[st], &rwork[vt + st1], n, &rwork[u + st1], n, &
+ rwork[nrwork], &c__1, &rwork[nrwork], info)
+ ;
+ if (*info != 0) {
+ return 0;
+ }
+
+/*
+ In the real version, B is passed to DLASDQ and multiplied
+ internally by Q'. Here B is complex and that product is
+ computed below in two steps (real and imaginary parts).
+*/
+
+ j = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L180: */
+ }
+/* L190: */
+ }
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1015, &rwork[u +
+ st1], n, &rwork[irwb], &nsize, &c_b324, &rwork[irwrb],
+ &nsize);
+ j = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L200: */
+ }
+/* L210: */
+ }
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1015, &rwork[u +
+ st1], n, &rwork[irwb], &nsize, &c_b324, &rwork[irwib],
+ &nsize);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * b_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L220: */
+ }
+/* L230: */
+ }
+
+ zlacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ dlasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ rwork[u + st1], n, &rwork[vt + st1], &iwork[k + st1],
+ &rwork[difl + st1], &rwork[difr + st1], &rwork[z__ +
+ st1], &rwork[poles + st1], &iwork[givptr + st1], &
+ iwork[givcol + st1], n, &iwork[perm + st1], &rwork[
+ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &
+ rwork[nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ zlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &rwork[u + st1], n, &rwork[vt + st1], &
+ iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1]
+ , &rwork[z__ + st1], &rwork[poles + st1], &iwork[
+ givptr + st1], &iwork[givcol + st1], n, &iwork[perm +
+ st1], &rwork[givnum + st1], &rwork[c__ + st1], &rwork[
+ s + st1], &rwork[nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L240: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Some of the elements in D can be negative because 1-by-1
+ subproblems were not solved explicitly.
+*/
+
+ if ((d__1 = d__[i__], abs(d__1)) <= tol) {
+ zlaset_("A", &c__1, nrhs, &c_b59, &c_b59, &work[bx + i__ - 1], n);
+ } else {
+ ++(*rank);
+ zlascl_("G", &c__0, &c__0, &d__[i__], &c_b1015, &c__1, nrhs, &
+ work[bx + i__ - 1], n, info);
+ }
+ d__[i__] = (d__1 = d__[i__], abs(d__1));
+/* L250: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ zcopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+
+/*
+ Since B and BX are complex, the following call to DGEMM
+ is performed in two steps (real and imaginary parts).
+
+ CALL DGEMM( 'T', 'N', NSIZE, NRHS, NSIZE, ONE,
+ $ RWORK( VT+ST1 ), N, RWORK( BXST ), N, ZERO,
+ $ B( ST, 1 ), LDB )
+*/
+
+ j = bxst - *n - 1;
+ jreal = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ j += *n;
+ i__3 = nsize;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++jreal;
+ i__4 = j + jrow;
+ rwork[jreal] = work[i__4].r;
+/* L260: */
+ }
+/* L270: */
+ }
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1015, &rwork[vt + st1],
+ n, &rwork[irwb], &nsize, &c_b324, &rwork[irwrb], &nsize);
+ j = bxst - *n - 1;
+ jimag = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ j += *n;
+ i__3 = nsize;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++jimag;
+ rwork[jimag] = d_imag(&work[j + jrow]);
+/* L280: */
+ }
+/* L290: */
+ }
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1015, &rwork[vt + st1],
+ n, &rwork[irwb], &nsize, &c_b324, &rwork[irwib], &nsize);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * b_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+ } else {
+ zlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &rwork[u + st1], n, &rwork[vt + st1], &
+ iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1], &
+ rwork[z__ + st1], &rwork[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &rwork[
+ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &rwork[
+ nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L320: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ dlascl_("G", &c__0, &c__0, &c_b1015, &orgnrm, n, &c__1, &d__[1], n, info);
+ dlasrt_("D", n, &d__[1], info);
+ zlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of ZLALSD */
+
+} /* zlalsd_ */
+
+doublereal zlange_(char *norm, integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ ZLANGE returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ complex matrix A.
+
+ Description
+ ===========
+
+ ZLANGE returns the value
+
+ ZLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in ZLANGE as described
+ above.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0. When M = 0,
+ ZLANGE is set to zero.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0. When N = 0,
+ ZLANGE is set to zero.
+
+ A (input) COMPLEX*16 array, dimension (LDA,N)
+ The m by n matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(M,1).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
+ where LWORK >= M when NORM = 'I'; otherwise, WORK is not
+ referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += z_abs(&a[i__ + j * a_dim1]);
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&a[i__ + j * a_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ zlassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANGE */
+
+} /* zlange_ */
+
+doublereal zlanhe_(char *norm, char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ ZLANHE returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ complex hermitian matrix A.
+
+ Description
+ ===========
+
+ ZLANHE returns the value
+
+ ZLANHE = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in ZLANHE as described
+ above.
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ hermitian matrix A is to be referenced.
+ = 'U': Upper triangular part of A is referenced
+ = 'L': Lower triangular part of A is referenced
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, ZLANHE is
+ set to zero.
+
+ A (input) COMPLEX*16 array, dimension (LDA,N)
+ The hermitian matrix A. If UPLO = 'U', the leading n by n
+ upper triangular part of A contains the upper triangular part
+ of the matrix A, and the strictly lower triangular part of A
+ is not referenced. If UPLO = 'L', the leading n by n lower
+ triangular part of A contains the lower triangular part of
+ the matrix A, and the strictly upper triangular part of A is
+ not referenced. Note that the imaginary parts of the diagonal
+ elements need not be set and are assumed to be zero.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(N,1).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
+ where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise,
+ WORK is not referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is hermitian). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = z_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ i__2 = j + j * a_dim1;
+ work[j] = sum + (d__1 = a[i__2].r, abs(d__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * a_dim1;
+ sum = work[j] + (d__1 = a[i__2].r, abs(d__1));
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = z_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ zlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ zlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ if (a[i__2].r != 0.) {
+ i__2 = i__ + i__ * a_dim1;
+ absa = (d__1 = a[i__2].r, abs(d__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ d__1 = scale / absa;
+ sum = sum * (d__1 * d__1) + 1.;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ d__1 = absa / scale;
+ sum += d__1 * d__1;
+ }
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANHE */
+
+} /* zlanhe_ */
+
+doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ ZLANHS returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ Hessenberg matrix A.
+
+ Description
+ ===========
+
+ ZLANHS returns the value
+
+ ZLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in ZLANHS as described
+ above.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, ZLANHS is
+ set to zero.
+
+ A (input) COMPLEX*16 array, dimension (LDA,N)
+ The n by n upper Hessenberg matrix A; the part of A below the
+ first sub-diagonal is not referenced.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(N,1).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
+ where LWORK >= N when NORM = 'I'; otherwise, WORK is not
+ referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += z_abs(&a[i__ + j * a_dim1]);
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&a[i__ + j * a_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ zlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANHS */
+
+} /* zlanhs_ */
+
+/* Subroutine */ int zlarcm_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublecomplex *b, integer *ldb, doublecomplex *c__, integer *ldc,
+ doublereal *rwork)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZLARCM performs a very simple matrix-matrix multiplication:
+ C := A * B,
+ where A is M by M and real; B is M by N and complex;
+ C is M by N and complex.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A and of the matrix C.
+ M >= 0.
+
+ N (input) INTEGER
+ The number of columns and rows of the matrix B and
+ the number of columns of the matrix C.
+ N >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA, M)
+ A contains the M by M matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >=max(1,M).
+
+ B (input) DOUBLE PRECISION array, dimension (LDB, N)
+ B contains the M by N matrix B.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >=max(1,M).
+
+ C (input) COMPLEX*16 array, dimension (LDC, N)
+ C contains the M by N matrix C.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >=max(1,M).
+
+ RWORK (workspace) DOUBLE PRECISION array, dimension (2*M*N)
+
+ =====================================================================
+
+
+ Quick return if possible.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1 * 1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --rwork;
+
+ /* Function Body */
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[(j - 1) * *m + i__] = b[i__3].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ l = *m * *n + 1;
+ dgemm_("N", "N", m, n, m, &c_b1015, &a[a_offset], lda, &rwork[1], m, &
+ c_b324, &rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = l + (j - 1) * *m + i__ - 1;
+ c__[i__3].r = rwork[i__4], c__[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ rwork[(j - 1) * *m + i__] = d_imag(&b[i__ + j * b_dim1]);
+/* L50: */
+ }
+/* L60: */
+ }
+ dgemm_("N", "N", m, n, m, &c_b1015, &a[a_offset], lda, &rwork[1], m, &
+ c_b324, &rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ d__1 = c__[i__4].r;
+ i__5 = l + (j - 1) * *m + i__ - 1;
+ z__1.r = d__1, z__1.i = rwork[i__5];
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+ return 0;
+
+/* End of ZLARCM */
+
+} /* zlarcm_ */
+
+/* Subroutine */ int zlarf_(char *side, integer *m, integer *n, doublecomplex
+ *v, integer *incv, doublecomplex *tau, doublecomplex *c__, integer *
+ ldc, doublecomplex *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ doublecomplex z__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLARF applies a complex elementary reflector H to a complex M-by-N
+ matrix C, from either the left or the right. H is represented in the
+ form
+
+ H = I - tau * v * v'
+
+ where tau is a complex scalar and v is a complex vector.
+
+ If tau = 0, then H is taken to be the unit matrix.
+
+ To apply H' (the conjugate transpose of H), supply conjg(tau) instead
+ tau.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': form H * C
+ = 'R': form C * H
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ V (input) COMPLEX*16 array, dimension
+ (1 + (M-1)*abs(INCV)) if SIDE = 'L'
+ or (1 + (N-1)*abs(INCV)) if SIDE = 'R'
+ The vector v in the representation of H. V is not used if
+ TAU = 0.
+
+ INCV (input) INTEGER
+ The increment between elements of v. INCV <> 0.
+
+ TAU (input) COMPLEX*16
+ The value tau in the representation of H.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by the matrix H * C if SIDE = 'L',
+ or C * H if SIDE = 'R'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) COMPLEX*16 array, dimension
+ (N) if SIDE = 'L'
+ or (M) if SIDE = 'R'
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+
+/* Form H * C */
+
+ if (tau->r != 0. || tau->i != 0.) {
+
+/* w := C' * v */
+
+ zgemv_("Conjugate transpose", m, n, &c_b60, &c__[c_offset], ldc, &
+ v[1], incv, &c_b59, &work[1], &c__1);
+
+/* C := C - v * w' */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zgerc_(m, n, &z__1, &v[1], incv, &work[1], &c__1, &c__[c_offset],
+ ldc);
+ }
+ } else {
+
+/* Form C * H */
+
+ if (tau->r != 0. || tau->i != 0.) {
+
+/* w := C * v */
+
+ zgemv_("No transpose", m, n, &c_b60, &c__[c_offset], ldc, &v[1],
+ incv, &c_b59, &work[1], &c__1);
+
+/* C := C - w * v' */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zgerc_(m, n, &z__1, &work[1], &c__1, &v[1], incv, &c__[c_offset],
+ ldc);
+ }
+ }
+ return 0;
+
+/* End of ZLARF */
+
+} /* zlarf_ */
+
+/* Subroutine */ int zlarfb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, doublecomplex *v, integer
+ *ldv, doublecomplex *t, integer *ldt, doublecomplex *c__, integer *
+ ldc, doublecomplex *work, integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zcopy_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), ztrmm_(char *, char *,
+ char *, char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zlacgv_(integer *, doublecomplex *,
+ integer *);
+ static char transt[1];
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLARFB applies a complex block reflector H or its transpose H' to a
+ complex M-by-N matrix C, from either the left or the right.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply H or H' from the Left
+ = 'R': apply H or H' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply H (No transpose)
+ = 'C': apply H' (Conjugate transpose)
+
+ DIRECT (input) CHARACTER*1
+ Indicates how H is formed from a product of elementary
+ reflectors
+ = 'F': H = H(1) H(2) . . . H(k) (Forward)
+ = 'B': H = H(k) . . . H(2) H(1) (Backward)
+
+ STOREV (input) CHARACTER*1
+ Indicates how the vectors which define the elementary
+ reflectors are stored:
+ = 'C': Columnwise
+ = 'R': Rowwise
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ K (input) INTEGER
+ The order of the matrix T (= the number of elementary
+ reflectors whose product defines the block reflector).
+
+ V (input) COMPLEX*16 array, dimension
+ (LDV,K) if STOREV = 'C'
+ (LDV,M) if STOREV = 'R' and SIDE = 'L'
+ (LDV,N) if STOREV = 'R' and SIDE = 'R'
+ The matrix V. See further details.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V.
+ If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M);
+ if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N);
+ if STOREV = 'R', LDV >= K.
+
+ T (input) COMPLEX*16 array, dimension (LDT,K)
+ The triangular K-by-K matrix T in the representation of the
+ block reflector.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= K.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by H*C or H'*C or C*H or C*H'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) COMPLEX*16 array, dimension (LDWORK,K)
+
+ LDWORK (input) INTEGER
+ The leading dimension of the array WORK.
+ If SIDE = 'L', LDWORK >= max(1,N);
+ if SIDE = 'R', LDWORK >= max(1,M).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1 * 1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1 * 1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1 * 1;
+ work -= work_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(storev, "C")) {
+
+ if (lsame_(direct, "F")) {
+
+/*
+ Let V = ( V1 ) (first K rows)
+ ( V2 )
+ where V1 is unit lower triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
+
+ W := C1'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
+ &c__1);
+ zlacgv_(n, &work[j * work_dim1 + 1], &c__1);
+/* L10: */
+ }
+
+/* W := W * V1 */
+
+ ztrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b60,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C2'*V2 */
+
+ i__1 = *m - *k;
+ zgemm_("Conjugate transpose", "No transpose", n, k, &i__1,
+ &c_b60, &c__[*k + 1 + c_dim1], ldc, &v[*k + 1 +
+ v_dim1], ldv, &c_b60, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ztrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b60, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2 * W' */
+
+ i__1 = *m - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &i__1, n, k,
+ &z__1, &v[*k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork, &c_b60, &c__[*k + 1 +
+ c_dim1], ldc);
+ }
+
+/* W := W * V1' */
+
+ ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", n, k,
+ &c_b60, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * c_dim1;
+ i__4 = j + i__ * c_dim1;
+ d_cnjg(&z__2, &work[i__ + j * work_dim1]);
+ z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i -
+ z__2.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V = (C1*V1 + C2*V2) (stored in WORK)
+
+ W := C1
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L40: */
+ }
+
+/* W := W * V1 */
+
+ ztrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b60,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C2 * V2 */
+
+ i__1 = *n - *k;
+ zgemm_("No transpose", "No transpose", m, k, &i__1, &
+ c_b60, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k +
+ 1 + v_dim1], ldv, &c_b60, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ztrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b60, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (*n > *k) {
+
+/* C2 := C2 - W * V2' */
+
+ i__1 = *n - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", m, &i__1, k,
+ &z__1, &work[work_offset], ldwork, &v[*k + 1 +
+ v_dim1], ldv, &c_b60, &c__[(*k + 1) * c_dim1 + 1],
+ ldc);
+ }
+
+/* W := W * V1' */
+
+ ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", m, k,
+ &c_b60, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ } else {
+
+/*
+ Let V = ( V1 )
+ ( V2 ) (last K rows)
+ where V2 is unit upper triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
+
+ W := C2'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
+ work_dim1 + 1], &c__1);
+ zlacgv_(n, &work[j * work_dim1 + 1], &c__1);
+/* L70: */
+ }
+
+/* W := W * V2 */
+
+ ztrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b60,
+ &v[*m - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+ if (*m > *k) {
+
+/* W := W + C1'*V1 */
+
+ i__1 = *m - *k;
+ zgemm_("Conjugate transpose", "No transpose", n, k, &i__1,
+ &c_b60, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b60, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ztrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b60, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C1 := C1 - V1 * W' */
+
+ i__1 = *m - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &i__1, n, k,
+ &z__1, &v[v_offset], ldv, &work[work_offset],
+ ldwork, &c_b60, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", n, k,
+ &c_b60, &v[*m - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = *m - *k + j + i__ * c_dim1;
+ i__4 = *m - *k + j + i__ * c_dim1;
+ d_cnjg(&z__2, &work[i__ + j * work_dim1]);
+ z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i -
+ z__2.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V = (C1*V1 + C2*V2) (stored in WORK)
+
+ W := C2
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
+ j * work_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+/* W := W * V2 */
+
+ ztrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b60,
+ &v[*n - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+ if (*n > *k) {
+
+/* W := W + C1 * V1 */
+
+ i__1 = *n - *k;
+ zgemm_("No transpose", "No transpose", m, k, &i__1, &
+ c_b60, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b60, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ztrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b60, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (*n > *k) {
+
+/* C1 := C1 - W * V1' */
+
+ i__1 = *n - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", m, &i__1, k,
+ &z__1, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b60, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", m, k,
+ &c_b60, &v[*n - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (*n - *k + j) * c_dim1;
+ i__4 = i__ + (*n - *k + j) * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+ } else if (lsame_(storev, "R")) {
+
+ if (lsame_(direct, "F")) {
+
+/*
+ Let V = ( V1 V2 ) (V1: first K columns)
+ where V1 is unit upper triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
+
+ W := C1'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
+ &c__1);
+ zlacgv_(n, &work[j * work_dim1 + 1], &c__1);
+/* L130: */
+ }
+
+/* W := W * V1' */
+
+ ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", n, k,
+ &c_b60, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C2'*V2' */
+
+ i__1 = *m - *k;
+ zgemm_("Conjugate transpose", "Conjugate transpose", n, k,
+ &i__1, &c_b60, &c__[*k + 1 + c_dim1], ldc, &v[(*
+ k + 1) * v_dim1 + 1], ldv, &c_b60, &work[
+ work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ztrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b60, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2' * W' */
+
+ i__1 = *m - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("Conjugate transpose", "Conjugate transpose", &
+ i__1, n, k, &z__1, &v[(*k + 1) * v_dim1 + 1], ldv,
+ &work[work_offset], ldwork, &c_b60, &c__[*k + 1
+ + c_dim1], ldc);
+ }
+
+/* W := W * V1 */
+
+ ztrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b60,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * c_dim1;
+ i__4 = j + i__ * c_dim1;
+ d_cnjg(&z__2, &work[i__ + j * work_dim1]);
+ z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i -
+ z__2.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
+
+ W := C1
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L160: */
+ }
+
+/* W := W * V1' */
+
+ ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", m, k,
+ &c_b60, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C2 * V2' */
+
+ i__1 = *n - *k;
+ zgemm_("No transpose", "Conjugate transpose", m, k, &i__1,
+ &c_b60, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k
+ + 1) * v_dim1 + 1], ldv, &c_b60, &work[
+ work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ztrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b60, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (*n > *k) {
+
+/* C2 := C2 - W * V2 */
+
+ i__1 = *n - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "No transpose", m, &i__1, k, &z__1,
+ &work[work_offset], ldwork, &v[(*k + 1) * v_dim1
+ + 1], ldv, &c_b60, &c__[(*k + 1) * c_dim1 + 1],
+ ldc);
+ }
+
+/* W := W * V1 */
+
+ ztrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b60,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L170: */
+ }
+/* L180: */
+ }
+
+ }
+
+ } else {
+
+/*
+ Let V = ( V1 V2 ) (V2: last K columns)
+ where V2 is unit lower triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
+
+ W := C2'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
+ work_dim1 + 1], &c__1);
+ zlacgv_(n, &work[j * work_dim1 + 1], &c__1);
+/* L190: */
+ }
+
+/* W := W * V2' */
+
+ ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", n, k,
+ &c_b60, &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C1'*V1' */
+
+ i__1 = *m - *k;
+ zgemm_("Conjugate transpose", "Conjugate transpose", n, k,
+ &i__1, &c_b60, &c__[c_offset], ldc, &v[v_offset],
+ ldv, &c_b60, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ztrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b60, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (*m > *k) {
+
+/* C1 := C1 - V1' * W' */
+
+ i__1 = *m - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("Conjugate transpose", "Conjugate transpose", &
+ i__1, n, k, &z__1, &v[v_offset], ldv, &work[
+ work_offset], ldwork, &c_b60, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ ztrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b60,
+ &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = *m - *k + j + i__ * c_dim1;
+ i__4 = *m - *k + j + i__ * c_dim1;
+ d_cnjg(&z__2, &work[i__ + j * work_dim1]);
+ z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i -
+ z__2.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L200: */
+ }
+/* L210: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
+
+ W := C2
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
+ j * work_dim1 + 1], &c__1);
+/* L220: */
+ }
+
+/* W := W * V2' */
+
+ ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", m, k,
+ &c_b60, &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C1 * V1' */
+
+ i__1 = *n - *k;
+ zgemm_("No transpose", "Conjugate transpose", m, k, &i__1,
+ &c_b60, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b60, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ztrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b60, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (*n > *k) {
+
+/* C1 := C1 - W * V1 */
+
+ i__1 = *n - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "No transpose", m, &i__1, k, &z__1,
+ &work[work_offset], ldwork, &v[v_offset], ldv, &
+ c_b60, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ ztrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b60,
+ &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (*n - *k + j) * c_dim1;
+ i__4 = i__ + (*n - *k + j) * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L230: */
+ }
+/* L240: */
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of ZLARFB */
+
+} /* zlarfb_ */
+
+/* Subroutine */ int zlarfg_(integer *n, doublecomplex *alpha, doublecomplex *
+ x, integer *incx, doublecomplex *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer j, knt;
+ static doublereal beta, alphi, alphr;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ static doublereal xnorm;
+ extern doublereal dlapy3_(doublereal *, doublereal *, doublereal *),
+ dznrm2_(integer *, doublecomplex *, integer *), dlamch_(char *);
+ static doublereal safmin;
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ static doublereal rsafmn;
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLARFG generates a complex elementary reflector H of order n, such
+ that
+
+ H' * ( alpha ) = ( beta ), H' * H = I.
+ ( x ) ( 0 )
+
+ where alpha and beta are scalars, with beta real, and x is an
+ (n-1)-element complex vector. H is represented in the form
+
+ H = I - tau * ( 1 ) * ( 1 v' ) ,
+ ( v )
+
+ where tau is a complex scalar and v is a complex (n-1)-element
+ vector. Note that H is not hermitian.
+
+ If the elements of x are all zero and alpha is real, then tau = 0
+ and H is taken to be the unit matrix.
+
+ Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1 .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the elementary reflector.
+
+ ALPHA (input/output) COMPLEX*16
+ On entry, the value alpha.
+ On exit, it is overwritten with the value beta.
+
+ X (input/output) COMPLEX*16 array, dimension
+ (1+(N-2)*abs(INCX))
+ On entry, the vector x.
+ On exit, it is overwritten with the vector v.
+
+ INCX (input) INTEGER
+ The increment between elements of X. INCX > 0.
+
+ TAU (output) COMPLEX*16
+ The value tau.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 0) {
+ tau->r = 0., tau->i = 0.;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = dznrm2_(&i__1, &x[1], incx);
+ alphr = alpha->r;
+ alphi = d_imag(alpha);
+
+ if ((xnorm == 0. && alphi == 0.)) {
+
+/* H = I */
+
+ tau->r = 0., tau->i = 0.;
+ } else {
+
+/* general case */
+
+ d__1 = dlapy3_(&alphr, &alphi, &xnorm);
+ beta = -d_sign(&d__1, &alphr);
+ safmin = SAFEMINIMUM / EPSILON;
+ rsafmn = 1. / safmin;
+
+ if (abs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+ knt = 0;
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ zdscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ alphi *= rsafmn;
+ alphr *= rsafmn;
+ if (abs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = dznrm2_(&i__1, &x[1], incx);
+ z__1.r = alphr, z__1.i = alphi;
+ alpha->r = z__1.r, alpha->i = z__1.i;
+ d__1 = dlapy3_(&alphr, &alphi, &xnorm);
+ beta = -d_sign(&d__1, &alphr);
+ d__1 = (beta - alphr) / beta;
+ d__2 = -alphi / beta;
+ z__1.r = d__1, z__1.i = d__2;
+ tau->r = z__1.r, tau->i = z__1.i;
+ z__2.r = alpha->r - beta, z__2.i = alpha->i;
+ zladiv_(&z__1, &c_b60, &z__2);
+ alpha->r = z__1.r, alpha->i = z__1.i;
+ i__1 = *n - 1;
+ zscal_(&i__1, alpha, &x[1], incx);
+
+/* If ALPHA is subnormal, it may lose relative accuracy */
+
+ alpha->r = beta, alpha->i = 0.;
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ z__1.r = safmin * alpha->r, z__1.i = safmin * alpha->i;
+ alpha->r = z__1.r, alpha->i = z__1.i;
+/* L20: */
+ }
+ } else {
+ d__1 = (beta - alphr) / beta;
+ d__2 = -alphi / beta;
+ z__1.r = d__1, z__1.i = d__2;
+ tau->r = z__1.r, tau->i = z__1.i;
+ z__2.r = alpha->r - beta, z__2.i = alpha->i;
+ zladiv_(&z__1, &c_b60, &z__2);
+ alpha->r = z__1.r, alpha->i = z__1.i;
+ i__1 = *n - 1;
+ zscal_(&i__1, alpha, &x[1], incx);
+ alpha->r = beta, alpha->i = 0.;
+ }
+ }
+
+ return 0;
+
+/* End of ZLARFG */
+
+} /* zlarfg_ */
+
+/* Subroutine */ int zlarft_(char *direct, char *storev, integer *n, integer *
+ k, doublecomplex *v, integer *ldv, doublecomplex *tau, doublecomplex *
+ t, integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__, j;
+ static doublecomplex vii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ ztrmv_(char *, char *, char *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *),
+ zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLARFT forms the triangular factor T of a complex block reflector H
+ of order n, which is defined as a product of k elementary reflectors.
+
+ If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular;
+
+ If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular.
+
+ If STOREV = 'C', the vector which defines the elementary reflector
+ H(i) is stored in the i-th column of the array V, and
+
+ H = I - V * T * V'
+
+ If STOREV = 'R', the vector which defines the elementary reflector
+ H(i) is stored in the i-th row of the array V, and
+
+ H = I - V' * T * V
+
+ Arguments
+ =========
+
+ DIRECT (input) CHARACTER*1
+ Specifies the order in which the elementary reflectors are
+ multiplied to form the block reflector:
+ = 'F': H = H(1) H(2) . . . H(k) (Forward)
+ = 'B': H = H(k) . . . H(2) H(1) (Backward)
+
+ STOREV (input) CHARACTER*1
+ Specifies how the vectors which define the elementary
+ reflectors are stored (see also Further Details):
+ = 'C': columnwise
+ = 'R': rowwise
+
+ N (input) INTEGER
+ The order of the block reflector H. N >= 0.
+
+ K (input) INTEGER
+ The order of the triangular factor T (= the number of
+ elementary reflectors). K >= 1.
+
+ V (input/output) COMPLEX*16 array, dimension
+ (LDV,K) if STOREV = 'C'
+ (LDV,N) if STOREV = 'R'
+ The matrix V. See further details.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V.
+ If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K.
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i).
+
+ T (output) COMPLEX*16 array, dimension (LDT,K)
+ The k by k triangular factor T of the block reflector.
+ If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is
+ lower triangular. The rest of the array is not used.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= K.
+
+ Further Details
+ ===============
+
+ The shape of the matrix V and the storage of the vectors which define
+ the H(i) is best illustrated by the following example with n = 5 and
+ k = 3. The elements equal to 1 are not stored; the corresponding
+ array elements are modified but restored on exit. The rest of the
+ array is not used.
+
+ DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R':
+
+ V = ( 1 ) V = ( 1 v1 v1 v1 v1 )
+ ( v1 1 ) ( 1 v2 v2 v2 )
+ ( v1 v2 1 ) ( 1 v3 v3 )
+ ( v1 v2 v3 )
+ ( v1 v2 v3 )
+
+ DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R':
+
+ V = ( v1 v2 v3 ) V = ( v1 v1 1 )
+ ( v1 v2 v3 ) ( v2 v2 v2 1 )
+ ( 1 v2 v3 ) ( v3 v3 v3 v3 1 )
+ ( 1 v3 )
+ ( 1 )
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1 * 1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1 * 1;
+ t -= t_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (lsame_(direct, "F")) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ if ((tau[i__2].r == 0. && tau[i__2].i == 0.)) {
+
+/* H(i) = I */
+
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + i__ * t_dim1;
+ t[i__3].r = 0., t[i__3].i = 0.;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ i__2 = i__ + i__ * v_dim1;
+ vii.r = v[i__2].r, vii.i = v[i__2].i;
+ i__2 = i__ + i__ * v_dim1;
+ v[i__2].r = 1., v[i__2].i = 0.;
+ if (lsame_(storev, "C")) {
+
+/* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ i__4 = i__;
+ z__1.r = -tau[i__4].r, z__1.i = -tau[i__4].i;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &v[i__
+ + v_dim1], ldv, &v[i__ + i__ * v_dim1], &c__1, &
+ c_b59, &t[i__ * t_dim1 + 1], &c__1);
+ } else {
+
+/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */
+
+ if (i__ < *n) {
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &v[i__ + (i__ + 1) * v_dim1], ldv);
+ }
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ i__4 = i__;
+ z__1.r = -tau[i__4].r, z__1.i = -tau[i__4].i;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &v[i__ *
+ v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, &
+ c_b59, &t[i__ * t_dim1 + 1], &c__1);
+ if (i__ < *n) {
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &v[i__ + (i__ + 1) * v_dim1], ldv);
+ }
+ }
+ i__2 = i__ + i__ * v_dim1;
+ v[i__2].r = vii.r, v[i__2].i = vii.i;
+
+/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ ztrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ + i__ * t_dim1;
+ i__3 = i__;
+ t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;
+ }
+/* L20: */
+ }
+ } else {
+ for (i__ = *k; i__ >= 1; --i__) {
+ i__1 = i__;
+ if ((tau[i__1].r == 0. && tau[i__1].i == 0.)) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ i__2 = j + i__ * t_dim1;
+ t[i__2].r = 0., t[i__2].i = 0.;
+/* L30: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+ if (lsame_(storev, "C")) {
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ vii.r = v[i__1].r, vii.i = v[i__1].i;
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ v[i__1].r = 1., v[i__1].i = 0.;
+
+/*
+ T(i+1:k,i) :=
+ - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i)
+*/
+
+ i__1 = *n - *k + i__;
+ i__2 = *k - i__;
+ i__3 = i__;
+ z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
+ zgemv_("Conjugate transpose", &i__1, &i__2, &z__1, &v[
+ (i__ + 1) * v_dim1 + 1], ldv, &v[i__ * v_dim1
+ + 1], &c__1, &c_b59, &t[i__ + 1 + i__ *
+ t_dim1], &c__1);
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ v[i__1].r = vii.r, v[i__1].i = vii.i;
+ } else {
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ vii.r = v[i__1].r, vii.i = v[i__1].i;
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ v[i__1].r = 1., v[i__1].i = 0.;
+
+/*
+ T(i+1:k,i) :=
+ - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)'
+*/
+
+ i__1 = *n - *k + i__ - 1;
+ zlacgv_(&i__1, &v[i__ + v_dim1], ldv);
+ i__1 = *k - i__;
+ i__2 = *n - *k + i__;
+ i__3 = i__;
+ z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
+ zgemv_("No transpose", &i__1, &i__2, &z__1, &v[i__ +
+ 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, &
+ c_b59, &t[i__ + 1 + i__ * t_dim1], &c__1);
+ i__1 = *n - *k + i__ - 1;
+ zlacgv_(&i__1, &v[i__ + v_dim1], ldv);
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ v[i__1].r = vii.r, v[i__1].i = vii.i;
+ }
+
+/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ ztrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__
+ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ *
+ t_dim1], &c__1)
+ ;
+ }
+ i__1 = i__ + i__ * t_dim1;
+ i__2 = i__;
+ t[i__1].r = tau[i__2].r, t[i__1].i = tau[i__2].i;
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of ZLARFT */
+
+} /* zlarft_ */
+
+/* Subroutine */ int zlarfx_(char *side, integer *m, integer *n,
+ doublecomplex *v, doublecomplex *tau, doublecomplex *c__, integer *
+ ldc, doublecomplex *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8,
+ i__9, i__10, i__11;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8, z__9, z__10,
+ z__11, z__12, z__13, z__14, z__15, z__16, z__17, z__18, z__19;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer j;
+ static doublecomplex t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, t10, v10, sum;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLARFX applies a complex elementary reflector H to a complex m by n
+ matrix C, from either the left or the right. H is represented in the
+ form
+
+ H = I - tau * v * v'
+
+ where tau is a complex scalar and v is a complex vector.
+
+ If tau = 0, then H is taken to be the unit matrix
+
+ This version uses inline code if H has order < 11.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': form H * C
+ = 'R': form C * H
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ V (input) COMPLEX*16 array, dimension (M) if SIDE = 'L'
+ or (N) if SIDE = 'R'
+ The vector v in the representation of H.
+
+ TAU (input) COMPLEX*16
+ The value tau in the representation of H.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by the matrix H * C if SIDE = 'L',
+ or C * H if SIDE = 'R'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDA >= max(1,M).
+
+ WORK (workspace) COMPLEX*16 array, dimension (N) if SIDE = 'L'
+ or (M) if SIDE = 'R'
+ WORK is not referenced if H has order < 11.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if ((tau->r == 0. && tau->i == 0.)) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form H * C, where H has order m. */
+
+ switch (*m) {
+ case 1: goto L10;
+ case 2: goto L30;
+ case 3: goto L50;
+ case 4: goto L70;
+ case 5: goto L90;
+ case 6: goto L110;
+ case 7: goto L130;
+ case 8: goto L150;
+ case 9: goto L170;
+ case 10: goto L190;
+ }
+
+/*
+ Code for general M
+
+ w := C'*v
+*/
+
+ zgemv_("Conjugate transpose", m, n, &c_b60, &c__[c_offset], ldc, &v[1]
+ , &c__1, &c_b59, &work[1], &c__1);
+
+/* C := C - tau * v * w' */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zgerc_(m, n, &z__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset],
+ ldc);
+ goto L410;
+L10:
+
+/* Special code for 1 x 1 Householder */
+
+ z__3.r = tau->r * v[1].r - tau->i * v[1].i, z__3.i = tau->r * v[1].i
+ + tau->i * v[1].r;
+ d_cnjg(&z__4, &v[1]);
+ z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i = z__3.r * z__4.i
+ + z__3.i * z__4.r;
+ z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i;
+ t1.r = z__1.r, t1.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__1.r = t1.r * c__[i__3].r - t1.i * c__[i__3].i, z__1.i = t1.r *
+ c__[i__3].i + t1.i * c__[i__3].r;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L20: */
+ }
+ goto L410;
+L30:
+
+/* Special code for 2 x 2 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__2.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__2.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__3.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__3.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L40: */
+ }
+ goto L410;
+L50:
+
+/* Special code for 3 x 3 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__3.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__3.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__4.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__4.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i;
+ i__4 = j * c_dim1 + 3;
+ z__5.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__5.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L60: */
+ }
+ goto L410;
+L70:
+
+/* Special code for 4 x 4 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__4.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__4.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__5.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__5.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i;
+ i__4 = j * c_dim1 + 3;
+ z__6.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__6.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__2.r = z__3.r + z__6.r, z__2.i = z__3.i + z__6.i;
+ i__5 = j * c_dim1 + 4;
+ z__7.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__7.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__1.r = z__2.r + z__7.r, z__1.i = z__2.i + z__7.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L80: */
+ }
+ goto L410;
+L90:
+
+/* Special code for 5 x 5 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__5.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__5.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__6.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__6.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__4.r = z__5.r + z__6.r, z__4.i = z__5.i + z__6.i;
+ i__4 = j * c_dim1 + 3;
+ z__7.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__7.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__3.r = z__4.r + z__7.r, z__3.i = z__4.i + z__7.i;
+ i__5 = j * c_dim1 + 4;
+ z__8.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__8.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__2.r = z__3.r + z__8.r, z__2.i = z__3.i + z__8.i;
+ i__6 = j * c_dim1 + 5;
+ z__9.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__9.i = v5.r *
+ c__[i__6].i + v5.i * c__[i__6].r;
+ z__1.r = z__2.r + z__9.r, z__1.i = z__2.i + z__9.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L100: */
+ }
+ goto L410;
+L110:
+
+/* Special code for 6 x 6 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ d_cnjg(&z__1, &v[6]);
+ v6.r = z__1.r, v6.i = z__1.i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__6.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__6.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__7.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__7.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__5.r = z__6.r + z__7.r, z__5.i = z__6.i + z__7.i;
+ i__4 = j * c_dim1 + 3;
+ z__8.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__8.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__4.r = z__5.r + z__8.r, z__4.i = z__5.i + z__8.i;
+ i__5 = j * c_dim1 + 4;
+ z__9.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__9.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__3.r = z__4.r + z__9.r, z__3.i = z__4.i + z__9.i;
+ i__6 = j * c_dim1 + 5;
+ z__10.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__10.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__2.r = z__3.r + z__10.r, z__2.i = z__3.i + z__10.i;
+ i__7 = j * c_dim1 + 6;
+ z__11.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__11.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__1.r = z__2.r + z__11.r, z__1.i = z__2.i + z__11.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L120: */
+ }
+ goto L410;
+L130:
+
+/* Special code for 7 x 7 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ d_cnjg(&z__1, &v[6]);
+ v6.r = z__1.r, v6.i = z__1.i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ d_cnjg(&z__1, &v[7]);
+ v7.r = z__1.r, v7.i = z__1.i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__7.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__7.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__8.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__8.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__6.r = z__7.r + z__8.r, z__6.i = z__7.i + z__8.i;
+ i__4 = j * c_dim1 + 3;
+ z__9.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__9.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__5.r = z__6.r + z__9.r, z__5.i = z__6.i + z__9.i;
+ i__5 = j * c_dim1 + 4;
+ z__10.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__10.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__4.r = z__5.r + z__10.r, z__4.i = z__5.i + z__10.i;
+ i__6 = j * c_dim1 + 5;
+ z__11.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__11.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__3.r = z__4.r + z__11.r, z__3.i = z__4.i + z__11.i;
+ i__7 = j * c_dim1 + 6;
+ z__12.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__12.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__2.r = z__3.r + z__12.r, z__2.i = z__3.i + z__12.i;
+ i__8 = j * c_dim1 + 7;
+ z__13.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__13.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__1.r = z__2.r + z__13.r, z__1.i = z__2.i + z__13.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L140: */
+ }
+ goto L410;
+L150:
+
+/* Special code for 8 x 8 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ d_cnjg(&z__1, &v[6]);
+ v6.r = z__1.r, v6.i = z__1.i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ d_cnjg(&z__1, &v[7]);
+ v7.r = z__1.r, v7.i = z__1.i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ d_cnjg(&z__1, &v[8]);
+ v8.r = z__1.r, v8.i = z__1.i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__8.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__8.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__9.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__9.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__7.r = z__8.r + z__9.r, z__7.i = z__8.i + z__9.i;
+ i__4 = j * c_dim1 + 3;
+ z__10.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__10.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__6.r = z__7.r + z__10.r, z__6.i = z__7.i + z__10.i;
+ i__5 = j * c_dim1 + 4;
+ z__11.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__11.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__5.r = z__6.r + z__11.r, z__5.i = z__6.i + z__11.i;
+ i__6 = j * c_dim1 + 5;
+ z__12.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__12.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__4.r = z__5.r + z__12.r, z__4.i = z__5.i + z__12.i;
+ i__7 = j * c_dim1 + 6;
+ z__13.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__13.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__3.r = z__4.r + z__13.r, z__3.i = z__4.i + z__13.i;
+ i__8 = j * c_dim1 + 7;
+ z__14.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__14.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__2.r = z__3.r + z__14.r, z__2.i = z__3.i + z__14.i;
+ i__9 = j * c_dim1 + 8;
+ z__15.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__15.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__1.r = z__2.r + z__15.r, z__1.i = z__2.i + z__15.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L160: */
+ }
+ goto L410;
+L170:
+
+/* Special code for 9 x 9 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ d_cnjg(&z__1, &v[6]);
+ v6.r = z__1.r, v6.i = z__1.i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ d_cnjg(&z__1, &v[7]);
+ v7.r = z__1.r, v7.i = z__1.i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ d_cnjg(&z__1, &v[8]);
+ v8.r = z__1.r, v8.i = z__1.i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ d_cnjg(&z__1, &v[9]);
+ v9.r = z__1.r, v9.i = z__1.i;
+ d_cnjg(&z__2, &v9);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t9.r = z__1.r, t9.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__9.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__9.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__10.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__10.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ z__8.r = z__9.r + z__10.r, z__8.i = z__9.i + z__10.i;
+ i__4 = j * c_dim1 + 3;
+ z__11.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__11.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__7.r = z__8.r + z__11.r, z__7.i = z__8.i + z__11.i;
+ i__5 = j * c_dim1 + 4;
+ z__12.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__12.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__6.r = z__7.r + z__12.r, z__6.i = z__7.i + z__12.i;
+ i__6 = j * c_dim1 + 5;
+ z__13.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__13.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__5.r = z__6.r + z__13.r, z__5.i = z__6.i + z__13.i;
+ i__7 = j * c_dim1 + 6;
+ z__14.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__14.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__4.r = z__5.r + z__14.r, z__4.i = z__5.i + z__14.i;
+ i__8 = j * c_dim1 + 7;
+ z__15.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__15.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__3.r = z__4.r + z__15.r, z__3.i = z__4.i + z__15.i;
+ i__9 = j * c_dim1 + 8;
+ z__16.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__16.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__2.r = z__3.r + z__16.r, z__2.i = z__3.i + z__16.i;
+ i__10 = j * c_dim1 + 9;
+ z__17.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__17.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ z__1.r = z__2.r + z__17.r, z__1.i = z__2.i + z__17.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 9;
+ i__3 = j * c_dim1 + 9;
+ z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L180: */
+ }
+ goto L410;
+L190:
+
+/* Special code for 10 x 10 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ d_cnjg(&z__1, &v[6]);
+ v6.r = z__1.r, v6.i = z__1.i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ d_cnjg(&z__1, &v[7]);
+ v7.r = z__1.r, v7.i = z__1.i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ d_cnjg(&z__1, &v[8]);
+ v8.r = z__1.r, v8.i = z__1.i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ d_cnjg(&z__1, &v[9]);
+ v9.r = z__1.r, v9.i = z__1.i;
+ d_cnjg(&z__2, &v9);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t9.r = z__1.r, t9.i = z__1.i;
+ d_cnjg(&z__1, &v[10]);
+ v10.r = z__1.r, v10.i = z__1.i;
+ d_cnjg(&z__2, &v10);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t10.r = z__1.r, t10.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__10.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__10.i = v1.r
+ * c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__11.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__11.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ z__9.r = z__10.r + z__11.r, z__9.i = z__10.i + z__11.i;
+ i__4 = j * c_dim1 + 3;
+ z__12.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__12.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__8.r = z__9.r + z__12.r, z__8.i = z__9.i + z__12.i;
+ i__5 = j * c_dim1 + 4;
+ z__13.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__13.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__7.r = z__8.r + z__13.r, z__7.i = z__8.i + z__13.i;
+ i__6 = j * c_dim1 + 5;
+ z__14.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__14.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__6.r = z__7.r + z__14.r, z__6.i = z__7.i + z__14.i;
+ i__7 = j * c_dim1 + 6;
+ z__15.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__15.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__5.r = z__6.r + z__15.r, z__5.i = z__6.i + z__15.i;
+ i__8 = j * c_dim1 + 7;
+ z__16.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__16.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__4.r = z__5.r + z__16.r, z__4.i = z__5.i + z__16.i;
+ i__9 = j * c_dim1 + 8;
+ z__17.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__17.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__3.r = z__4.r + z__17.r, z__3.i = z__4.i + z__17.i;
+ i__10 = j * c_dim1 + 9;
+ z__18.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__18.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ z__2.r = z__3.r + z__18.r, z__2.i = z__3.i + z__18.i;
+ i__11 = j * c_dim1 + 10;
+ z__19.r = v10.r * c__[i__11].r - v10.i * c__[i__11].i, z__19.i =
+ v10.r * c__[i__11].i + v10.i * c__[i__11].r;
+ z__1.r = z__2.r + z__19.r, z__1.i = z__2.i + z__19.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 9;
+ i__3 = j * c_dim1 + 9;
+ z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 10;
+ i__3 = j * c_dim1 + 10;
+ z__2.r = sum.r * t10.r - sum.i * t10.i, z__2.i = sum.r * t10.i +
+ sum.i * t10.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L200: */
+ }
+ goto L410;
+ } else {
+
+/* Form C * H, where H has order n. */
+
+ switch (*n) {
+ case 1: goto L210;
+ case 2: goto L230;
+ case 3: goto L250;
+ case 4: goto L270;
+ case 5: goto L290;
+ case 6: goto L310;
+ case 7: goto L330;
+ case 8: goto L350;
+ case 9: goto L370;
+ case 10: goto L390;
+ }
+
+/*
+ Code for general N
+
+ w := C * v
+*/
+
+ zgemv_("No transpose", m, n, &c_b60, &c__[c_offset], ldc, &v[1], &
+ c__1, &c_b59, &work[1], &c__1);
+
+/* C := C - tau * w * v' */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zgerc_(m, n, &z__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset],
+ ldc);
+ goto L410;
+L210:
+
+/* Special code for 1 x 1 Householder */
+
+ z__3.r = tau->r * v[1].r - tau->i * v[1].i, z__3.i = tau->r * v[1].i
+ + tau->i * v[1].r;
+ d_cnjg(&z__4, &v[1]);
+ z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i = z__3.r * z__4.i
+ + z__3.i * z__4.r;
+ z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i;
+ t1.r = z__1.r, t1.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__1.r = t1.r * c__[i__3].r - t1.i * c__[i__3].i, z__1.i = t1.r *
+ c__[i__3].i + t1.i * c__[i__3].r;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L220: */
+ }
+ goto L410;
+L230:
+
+/* Special code for 2 x 2 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__2.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__2.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ z__3.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__3.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L240: */
+ }
+ goto L410;
+L250:
+
+/* Special code for 3 x 3 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__3.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__3.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ z__4.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__4.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i;
+ i__4 = j + c_dim1 * 3;
+ z__5.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__5.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L260: */
+ }
+ goto L410;
+L270:
+
+/* Special code for 4 x 4 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__4.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__4.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ z__5.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__5.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i;
+ i__4 = j + c_dim1 * 3;
+ z__6.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__6.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__2.r = z__3.r + z__6.r, z__2.i = z__3.i + z__6.i;
+ i__5 = j + ((c_dim1) << (2));
+ z__7.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__7.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__1.r = z__2.r + z__7.r, z__1.i = z__2.i + z__7.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L280: */
+ }
+ goto L410;
+L290:
+
+/* Special code for 5 x 5 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__5.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__5.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ z__6.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__6.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__4.r = z__5.r + z__6.r, z__4.i = z__5.i + z__6.i;
+ i__4 = j + c_dim1 * 3;
+ z__7.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__7.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__3.r = z__4.r + z__7.r, z__3.i = z__4.i + z__7.i;
+ i__5 = j + ((c_dim1) << (2));
+ z__8.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__8.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__2.r = z__3.r + z__8.r, z__2.i = z__3.i + z__8.i;
+ i__6 = j + c_dim1 * 5;
+ z__9.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__9.i = v5.r *
+ c__[i__6].i + v5.i * c__[i__6].r;
+ z__1.r = z__2.r + z__9.r, z__1.i = z__2.i + z__9.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L300: */
+ }
+ goto L410;
+L310:
+
+/* Special code for 6 x 6 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__6.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__6.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ z__7.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__7.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__5.r = z__6.r + z__7.r, z__5.i = z__6.i + z__7.i;
+ i__4 = j + c_dim1 * 3;
+ z__8.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__8.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__4.r = z__5.r + z__8.r, z__4.i = z__5.i + z__8.i;
+ i__5 = j + ((c_dim1) << (2));
+ z__9.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__9.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__3.r = z__4.r + z__9.r, z__3.i = z__4.i + z__9.i;
+ i__6 = j + c_dim1 * 5;
+ z__10.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__10.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__2.r = z__3.r + z__10.r, z__2.i = z__3.i + z__10.i;
+ i__7 = j + c_dim1 * 6;
+ z__11.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__11.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__1.r = z__2.r + z__11.r, z__1.i = z__2.i + z__11.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L320: */
+ }
+ goto L410;
+L330:
+
+/* Special code for 7 x 7 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__7.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__7.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ z__8.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__8.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__6.r = z__7.r + z__8.r, z__6.i = z__7.i + z__8.i;
+ i__4 = j + c_dim1 * 3;
+ z__9.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__9.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__5.r = z__6.r + z__9.r, z__5.i = z__6.i + z__9.i;
+ i__5 = j + ((c_dim1) << (2));
+ z__10.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__10.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__4.r = z__5.r + z__10.r, z__4.i = z__5.i + z__10.i;
+ i__6 = j + c_dim1 * 5;
+ z__11.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__11.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__3.r = z__4.r + z__11.r, z__3.i = z__4.i + z__11.i;
+ i__7 = j + c_dim1 * 6;
+ z__12.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__12.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__2.r = z__3.r + z__12.r, z__2.i = z__3.i + z__12.i;
+ i__8 = j + c_dim1 * 7;
+ z__13.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__13.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__1.r = z__2.r + z__13.r, z__1.i = z__2.i + z__13.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L340: */
+ }
+ goto L410;
+L350:
+
+/* Special code for 8 x 8 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__8.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__8.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ z__9.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__9.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__7.r = z__8.r + z__9.r, z__7.i = z__8.i + z__9.i;
+ i__4 = j + c_dim1 * 3;
+ z__10.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__10.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__6.r = z__7.r + z__10.r, z__6.i = z__7.i + z__10.i;
+ i__5 = j + ((c_dim1) << (2));
+ z__11.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__11.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__5.r = z__6.r + z__11.r, z__5.i = z__6.i + z__11.i;
+ i__6 = j + c_dim1 * 5;
+ z__12.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__12.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__4.r = z__5.r + z__12.r, z__4.i = z__5.i + z__12.i;
+ i__7 = j + c_dim1 * 6;
+ z__13.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__13.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__3.r = z__4.r + z__13.r, z__3.i = z__4.i + z__13.i;
+ i__8 = j + c_dim1 * 7;
+ z__14.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__14.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__2.r = z__3.r + z__14.r, z__2.i = z__3.i + z__14.i;
+ i__9 = j + ((c_dim1) << (3));
+ z__15.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__15.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__1.r = z__2.r + z__15.r, z__1.i = z__2.i + z__15.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (3));
+ i__3 = j + ((c_dim1) << (3));
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L360: */
+ }
+ goto L410;
+L370:
+
+/* Special code for 9 x 9 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ v9.r = v[9].r, v9.i = v[9].i;
+ d_cnjg(&z__2, &v9);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t9.r = z__1.r, t9.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__9.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__9.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ z__10.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__10.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ z__8.r = z__9.r + z__10.r, z__8.i = z__9.i + z__10.i;
+ i__4 = j + c_dim1 * 3;
+ z__11.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__11.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__7.r = z__8.r + z__11.r, z__7.i = z__8.i + z__11.i;
+ i__5 = j + ((c_dim1) << (2));
+ z__12.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__12.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__6.r = z__7.r + z__12.r, z__6.i = z__7.i + z__12.i;
+ i__6 = j + c_dim1 * 5;
+ z__13.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__13.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__5.r = z__6.r + z__13.r, z__5.i = z__6.i + z__13.i;
+ i__7 = j + c_dim1 * 6;
+ z__14.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__14.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__4.r = z__5.r + z__14.r, z__4.i = z__5.i + z__14.i;
+ i__8 = j + c_dim1 * 7;
+ z__15.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__15.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__3.r = z__4.r + z__15.r, z__3.i = z__4.i + z__15.i;
+ i__9 = j + ((c_dim1) << (3));
+ z__16.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__16.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__2.r = z__3.r + z__16.r, z__2.i = z__3.i + z__16.i;
+ i__10 = j + c_dim1 * 9;
+ z__17.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__17.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ z__1.r = z__2.r + z__17.r, z__1.i = z__2.i + z__17.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (3));
+ i__3 = j + ((c_dim1) << (3));
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 9;
+ i__3 = j + c_dim1 * 9;
+ z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L380: */
+ }
+ goto L410;
+L390:
+
+/* Special code for 10 x 10 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ v9.r = v[9].r, v9.i = v[9].i;
+ d_cnjg(&z__2, &v9);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t9.r = z__1.r, t9.i = z__1.i;
+ v10.r = v[10].r, v10.i = v[10].i;
+ d_cnjg(&z__2, &v10);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t10.r = z__1.r, t10.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__10.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__10.i = v1.r
+ * c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ z__11.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__11.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ z__9.r = z__10.r + z__11.r, z__9.i = z__10.i + z__11.i;
+ i__4 = j + c_dim1 * 3;
+ z__12.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__12.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__8.r = z__9.r + z__12.r, z__8.i = z__9.i + z__12.i;
+ i__5 = j + ((c_dim1) << (2));
+ z__13.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__13.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__7.r = z__8.r + z__13.r, z__7.i = z__8.i + z__13.i;
+ i__6 = j + c_dim1 * 5;
+ z__14.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__14.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__6.r = z__7.r + z__14.r, z__6.i = z__7.i + z__14.i;
+ i__7 = j + c_dim1 * 6;
+ z__15.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__15.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__5.r = z__6.r + z__15.r, z__5.i = z__6.i + z__15.i;
+ i__8 = j + c_dim1 * 7;
+ z__16.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__16.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__4.r = z__5.r + z__16.r, z__4.i = z__5.i + z__16.i;
+ i__9 = j + ((c_dim1) << (3));
+ z__17.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__17.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__3.r = z__4.r + z__17.r, z__3.i = z__4.i + z__17.i;
+ i__10 = j + c_dim1 * 9;
+ z__18.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__18.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ z__2.r = z__3.r + z__18.r, z__2.i = z__3.i + z__18.i;
+ i__11 = j + c_dim1 * 10;
+ z__19.r = v10.r * c__[i__11].r - v10.i * c__[i__11].i, z__19.i =
+ v10.r * c__[i__11].i + v10.i * c__[i__11].r;
+ z__1.r = z__2.r + z__19.r, z__1.i = z__2.i + z__19.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + ((c_dim1) << (3));
+ i__3 = j + ((c_dim1) << (3));
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 9;
+ i__3 = j + c_dim1 * 9;
+ z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 10;
+ i__3 = j + c_dim1 * 10;
+ z__2.r = sum.r * t10.r - sum.i * t10.i, z__2.i = sum.r * t10.i +
+ sum.i * t10.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L400: */
+ }
+ goto L410;
+ }
+L410:
+ return 0;
+
+/* End of ZLARFX */
+
+} /* zlarfx_ */
+
+/* Subroutine */ int zlascl_(char *type__, integer *kl, integer *ku,
+ doublereal *cfrom, doublereal *cto, integer *m, integer *n,
+ doublecomplex *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__, j, k1, k2, k3, k4;
+ static doublereal mul, cto1;
+ static logical done;
+ static doublereal ctoc;
+ extern logical lsame_(char *, char *);
+ static integer itype;
+ static doublereal cfrom1;
+
+ static doublereal cfromc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal bignum, smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ ZLASCL multiplies the M by N complex matrix A by the real scalar
+ CTO/CFROM. This is done without over/underflow as long as the final
+ result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
+ A may be full, upper triangular, lower triangular, upper Hessenberg,
+ or banded.
+
+ Arguments
+ =========
+
+ TYPE (input) CHARACTER*1
+ TYPE indices the storage type of the input matrix.
+ = 'G': A is a full matrix.
+ = 'L': A is a lower triangular matrix.
+ = 'U': A is an upper triangular matrix.
+ = 'H': A is an upper Hessenberg matrix.
+ = 'B': A is a symmetric band matrix with lower bandwidth KL
+ and upper bandwidth KU and with the only the lower
+ half stored.
+ = 'Q': A is a symmetric band matrix with lower bandwidth KL
+ and upper bandwidth KU and with the only the upper
+ half stored.
+ = 'Z': A is a band matrix with lower bandwidth KL and upper
+ bandwidth KU.
+
+ KL (input) INTEGER
+ The lower bandwidth of A. Referenced only if TYPE = 'B',
+ 'Q' or 'Z'.
+
+ KU (input) INTEGER
+ The upper bandwidth of A. Referenced only if TYPE = 'B',
+ 'Q' or 'Z'.
+
+ CFROM (input) DOUBLE PRECISION
+ CTO (input) DOUBLE PRECISION
+ The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
+ without over/underflow if the final result CTO*A(I,J)/CFROM
+ can be represented without over/underflow. CFROM must be
+ nonzero.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,M)
+ The matrix to be multiplied by CTO/CFROM. See TYPE for the
+ storage type.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ INFO (output) INTEGER
+ 0 - successful exit
+ <0 - if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(type__, "G")) {
+ itype = 0;
+ } else if (lsame_(type__, "L")) {
+ itype = 1;
+ } else if (lsame_(type__, "U")) {
+ itype = 2;
+ } else if (lsame_(type__, "H")) {
+ itype = 3;
+ } else if (lsame_(type__, "B")) {
+ itype = 4;
+ } else if (lsame_(type__, "Q")) {
+ itype = 5;
+ } else if (lsame_(type__, "Z")) {
+ itype = 6;
+ } else {
+ itype = -1;
+ }
+
+ if (itype == -1) {
+ *info = -1;
+ } else if (*cfrom == 0.) {
+ *info = -4;
+ } else if (*m < 0) {
+ *info = -6;
+ } else if (*n < 0 || (itype == 4 && *n != *m) || (itype == 5 && *n != *m))
+ {
+ *info = -7;
+ } else if ((itype <= 3 && *lda < max(1,*m))) {
+ *info = -9;
+ } else if (itype >= 4) {
+/* Computing MAX */
+ i__1 = *m - 1;
+ if (*kl < 0 || *kl > max(i__1,0)) {
+ *info = -2;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *n - 1;
+ if (*ku < 0 || *ku > max(i__1,0) || ((itype == 4 || itype == 5) &&
+ *kl != *ku)) {
+ *info = -3;
+ } else if ((itype == 4 && *lda < *kl + 1) || (itype == 5 && *lda <
+ *ku + 1) || (itype == 6 && *lda < ((*kl) << (1)) + *ku +
+ 1)) {
+ *info = -9;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLASCL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *m == 0) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = SAFEMINIMUM;
+ bignum = 1. / smlnum;
+
+ cfromc = *cfrom;
+ ctoc = *cto;
+
+L10:
+ cfrom1 = cfromc * smlnum;
+ cto1 = ctoc / bignum;
+ if ((abs(cfrom1) > abs(ctoc) && ctoc != 0.)) {
+ mul = smlnum;
+ done = FALSE_;
+ cfromc = cfrom1;
+ } else if (abs(cto1) > abs(cfromc)) {
+ mul = bignum;
+ done = FALSE_;
+ ctoc = cto1;
+ } else {
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ }
+
+ if (itype == 0) {
+
+/* Full matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (itype == 1) {
+
+/* Lower triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ } else if (itype == 2) {
+
+/* Upper triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L60: */
+ }
+/* L70: */
+ }
+
+ } else if (itype == 3) {
+
+/* Upper Hessenberg matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (itype == 4) {
+
+/* Lower half of a symmetric band matrix */
+
+ k3 = *kl + 1;
+ k4 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = k3, i__4 = k4 - j;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L100: */
+ }
+/* L110: */
+ }
+
+ } else if (itype == 5) {
+
+/* Upper half of a symmetric band matrix */
+
+ k1 = *ku + 2;
+ k3 = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k1 - j;
+ i__3 = k3;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L120: */
+ }
+/* L130: */
+ }
+
+ } else if (itype == 6) {
+
+/* Band matrix */
+
+ k1 = *kl + *ku + 2;
+ k2 = *kl + 1;
+ k3 = ((*kl) << (1)) + *ku + 1;
+ k4 = *kl + *ku + 1 + *m;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = k1 - j;
+/* Computing MIN */
+ i__4 = k3, i__5 = k4 - j;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,k2); i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ }
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of ZLASCL */
+
+} /* zlascl_ */
+
+/* Subroutine */ int zlaset_(char *uplo, integer *m, integer *n,
+ doublecomplex *alpha, doublecomplex *beta, doublecomplex *a, integer *
+ lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ ZLASET initializes a 2-D array A to BETA on the diagonal and
+ ALPHA on the offdiagonals.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be set.
+ = 'U': Upper triangular part is set. The lower triangle
+ is unchanged.
+ = 'L': Lower triangular part is set. The upper triangle
+ is unchanged.
+ Otherwise: All of the matrix A is set.
+
+ M (input) INTEGER
+ On entry, M specifies the number of rows of A.
+
+ N (input) INTEGER
+ On entry, N specifies the number of columns of A.
+
+ ALPHA (input) COMPLEX*16
+ All the offdiagonal array elements are set to ALPHA.
+
+ BETA (input) COMPLEX*16
+ All the diagonal array elements are set to BETA.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n, i.ne.j;
+ A(i,i) = BETA , 1 <= i <= min(m,n)
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+
+/*
+ Set the diagonal to BETA and the strictly upper triangular
+ part of the array to ALPHA.
+*/
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L10: */
+ }
+/* L20: */
+ }
+ i__1 = min(*n,*m);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L30: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+
+/*
+ Set the diagonal to BETA and the strictly lower triangular
+ part of the array to ALPHA.
+*/
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = min(*n,*m);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L60: */
+ }
+
+ } else {
+
+/*
+ Set the array to BETA on the diagonal and ALPHA on the
+ offdiagonal.
+*/
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L70: */
+ }
+/* L80: */
+ }
+ i__1 = min(*m,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L90: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLASET */
+
+} /* zlaset_ */
+
+/* Subroutine */ int zlasr_(char *side, char *pivot, char *direct, integer *m,
+ integer *n, doublereal *c__, doublereal *s, doublecomplex *a,
+ integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Local variables */
+ static integer i__, j, info;
+ static doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ static doublereal ctemp, stemp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ ZLASR performs the transformation
+
+ A := P*A, when SIDE = 'L' or 'l' ( Left-hand side )
+
+ A := A*P', when SIDE = 'R' or 'r' ( Right-hand side )
+
+ where A is an m by n complex matrix and P is an orthogonal matrix,
+ consisting of a sequence of plane rotations determined by the
+ parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l'
+ and z = n when SIDE = 'R' or 'r' ):
+
+ When DIRECT = 'F' or 'f' ( Forward sequence ) then
+
+ P = P( z - 1 )*...*P( 2 )*P( 1 ),
+
+ and when DIRECT = 'B' or 'b' ( Backward sequence ) then
+
+ P = P( 1 )*P( 2 )*...*P( z - 1 ),
+
+ where P( k ) is a plane rotation matrix for the following planes:
+
+ when PIVOT = 'V' or 'v' ( Variable pivot ),
+ the plane ( k, k + 1 )
+
+ when PIVOT = 'T' or 't' ( Top pivot ),
+ the plane ( 1, k + 1 )
+
+ when PIVOT = 'B' or 'b' ( Bottom pivot ),
+ the plane ( k, z )
+
+ c( k ) and s( k ) must contain the cosine and sine that define the
+ matrix P( k ). The two by two plane rotation part of the matrix
+ P( k ), R( k ), is assumed to be of the form
+
+ R( k ) = ( c( k ) s( k ) ).
+ ( -s( k ) c( k ) )
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ Specifies whether the plane rotation matrix P is applied to
+ A on the left or the right.
+ = 'L': Left, compute A := P*A
+ = 'R': Right, compute A:= A*P'
+
+ DIRECT (input) CHARACTER*1
+ Specifies whether P is a forward or backward sequence of
+ plane rotations.
+ = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 )
+ = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 )
+
+ PIVOT (input) CHARACTER*1
+ Specifies the plane for which P(k) is a plane rotation
+ matrix.
+ = 'V': Variable pivot, the plane (k,k+1)
+ = 'T': Top pivot, the plane (1,k+1)
+ = 'B': Bottom pivot, the plane (k,z)
+
+ M (input) INTEGER
+ The number of rows of the matrix A. If m <= 1, an immediate
+ return is effected.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. If n <= 1, an
+ immediate return is effected.
+
+ C, S (input) DOUBLE PRECISION arrays, dimension
+ (M-1) if SIDE = 'L'
+ (N-1) if SIDE = 'R'
+ c(k) and s(k) contain the cosine and sine that define the
+ matrix P(k). The two by two plane rotation part of the
+ matrix P(k), R(k), is assumed to be of the form
+ R( k ) = ( c( k ) s( k ) ).
+ ( -s( k ) c( k ) )
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ The m by n matrix A. On exit, A is overwritten by P*A if
+ SIDE = 'R' or by A*P' if SIDE = 'L'.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --c__;
+ --s;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! (lsame_(side, "L") || lsame_(side, "R"))) {
+ info = 1;
+ } else if (! (lsame_(pivot, "V") || lsame_(pivot,
+ "T") || lsame_(pivot, "B"))) {
+ info = 2;
+ } else if (! (lsame_(direct, "F") || lsame_(direct,
+ "B"))) {
+ info = 3;
+ } else if (*m < 0) {
+ info = 4;
+ } else if (*n < 0) {
+ info = 5;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZLASR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form P * A */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + 1 + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + 1 + i__ * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__4 = j + i__ * a_dim1;
+ z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[
+ i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = j + i__ * a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__4 = j + i__ * a_dim1;
+ z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + 1 + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + 1 + i__ * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__3 = j + i__ * a_dim1;
+ z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[
+ i__3].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = j + i__ * a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__3 = j + i__ * a_dim1;
+ z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[
+ i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + i__ * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__4 = i__ * a_dim1 + 1;
+ z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[
+ i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = i__ * a_dim1 + 1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__4 = i__ * a_dim1 + 1;
+ z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + i__ * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__3 = i__ * a_dim1 + 1;
+ z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[
+ i__3].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = i__ * a_dim1 + 1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__3 = i__ * a_dim1 + 1;
+ z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[
+ i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + i__ * a_dim1;
+ i__4 = *m + i__ * a_dim1;
+ z__2.r = stemp * a[i__4].r, z__2.i = stemp * a[
+ i__4].i;
+ z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = *m + i__ * a_dim1;
+ i__4 = *m + i__ * a_dim1;
+ z__2.r = ctemp * a[i__4].r, z__2.i = ctemp * a[
+ i__4].i;
+ z__3.r = stemp * temp.r, z__3.i = stemp * temp.i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + i__ * a_dim1;
+ i__3 = *m + i__ * a_dim1;
+ z__2.r = stemp * a[i__3].r, z__2.i = stemp * a[
+ i__3].i;
+ z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = *m + i__ * a_dim1;
+ i__3 = *m + i__ * a_dim1;
+ z__2.r = ctemp * a[i__3].r, z__2.i = ctemp * a[
+ i__3].i;
+ z__3.r = stemp * temp.r, z__3.i = stemp * temp.i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ } else if (lsame_(side, "R")) {
+
+/* Form A * P' */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (j + 1) * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + (j + 1) * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__4 = i__ + j * a_dim1;
+ z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[
+ i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = i__ + j * a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__4 = i__ + j * a_dim1;
+ z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + (j + 1) * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + (j + 1) * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__3 = i__ + j * a_dim1;
+ z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[
+ i__3].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = i__ + j * a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__3 = i__ + j * a_dim1;
+ z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[
+ i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + j * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__4 = i__ + a_dim1;
+ z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[
+ i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = i__ + a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__4 = i__ + a_dim1;
+ z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L170: */
+ }
+ }
+/* L180: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + j * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__3 = i__ + a_dim1;
+ z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[
+ i__3].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = i__ + a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__3 = i__ + a_dim1;
+ z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[
+ i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L190: */
+ }
+ }
+/* L200: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + *n * a_dim1;
+ z__2.r = stemp * a[i__4].r, z__2.i = stemp * a[
+ i__4].i;
+ z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = i__ + *n * a_dim1;
+ i__4 = i__ + *n * a_dim1;
+ z__2.r = ctemp * a[i__4].r, z__2.i = ctemp * a[
+ i__4].i;
+ z__3.r = stemp * temp.r, z__3.i = stemp * temp.i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L210: */
+ }
+ }
+/* L220: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + *n * a_dim1;
+ z__2.r = stemp * a[i__3].r, z__2.i = stemp * a[
+ i__3].i;
+ z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = i__ + *n * a_dim1;
+ i__3 = i__ + *n * a_dim1;
+ z__2.r = ctemp * a[i__3].r, z__2.i = ctemp * a[
+ i__3].i;
+ z__3.r = stemp * temp.r, z__3.i = stemp * temp.i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZLASR */
+
+} /* zlasr_ */
+
+/* Subroutine */ int zlassq_(integer *n, doublecomplex *x, integer *incx,
+ doublereal *scale, doublereal *sumsq)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ static integer ix;
+ static doublereal temp1;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZLASSQ returns the values scl and ssq such that
+
+ ( scl**2 )*ssq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq,
+
+ where x( i ) = abs( X( 1 + ( i - 1 )*INCX ) ). The value of sumsq is
+ assumed to be at least unity and the value of ssq will then satisfy
+
+ 1.0 .le. ssq .le. ( sumsq + 2*n ).
+
+ scale is assumed to be non-negative and scl returns the value
+
+ scl = max( scale, abs( real( x( i ) ) ), abs( aimag( x( i ) ) ) ),
+ i
+
+ scale and sumsq must be supplied in SCALE and SUMSQ respectively.
+ SCALE and SUMSQ are overwritten by scl and ssq respectively.
+
+ The routine makes only one pass through the vector X.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of elements to be used from the vector X.
+
+ X (input) COMPLEX*16 array, dimension (N)
+ The vector x as described above.
+ x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n.
+
+ INCX (input) INTEGER
+ The increment between successive values of the vector X.
+ INCX > 0.
+
+ SCALE (input/output) DOUBLE PRECISION
+ On entry, the value scale in the equation above.
+ On exit, SCALE is overwritten with the value scl .
+
+ SUMSQ (input/output) DOUBLE PRECISION
+ On entry, the value sumsq in the equation above.
+ On exit, SUMSQ is overwritten with the value ssq .
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n > 0) {
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ i__3 = ix;
+ if (x[i__3].r != 0.) {
+ i__3 = ix;
+ temp1 = (d__1 = x[i__3].r, abs(d__1));
+ if (*scale < temp1) {
+/* Computing 2nd power */
+ d__1 = *scale / temp1;
+ *sumsq = *sumsq * (d__1 * d__1) + 1;
+ *scale = temp1;
+ } else {
+/* Computing 2nd power */
+ d__1 = temp1 / *scale;
+ *sumsq += d__1 * d__1;
+ }
+ }
+ if (d_imag(&x[ix]) != 0.) {
+ temp1 = (d__1 = d_imag(&x[ix]), abs(d__1));
+ if (*scale < temp1) {
+/* Computing 2nd power */
+ d__1 = *scale / temp1;
+ *sumsq = *sumsq * (d__1 * d__1) + 1;
+ *scale = temp1;
+ } else {
+/* Computing 2nd power */
+ d__1 = temp1 / *scale;
+ *sumsq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLASSQ */
+
+} /* zlassq_ */
+
+/* Subroutine */ int zlaswp_(integer *n, doublecomplex *a, integer *lda,
+ integer *k1, integer *k2, integer *ipiv, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+
+ /* Local variables */
+ static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
+ static doublecomplex temp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZLASWP performs a series of row interchanges on the matrix A.
+ One row interchange is initiated for each of rows K1 through K2 of A.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of columns of the matrix A.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the matrix of column dimension N to which the row
+ interchanges will be applied.
+ On exit, the permuted matrix.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+
+ K1 (input) INTEGER
+ The first element of IPIV for which a row interchange will
+ be done.
+
+ K2 (input) INTEGER
+ The last element of IPIV for which a row interchange will
+ be done.
+
+ IPIV (input) INTEGER array, dimension (M*abs(INCX))
+ The vector of pivot indices. Only the elements in positions
+ K1 through K2 of IPIV are accessed.
+ IPIV(K) = L implies rows K and L are to be interchanged.
+
+ INCX (input) INTEGER
+ The increment between successive values of IPIV. If IPIV
+ is negative, the pivots are applied in reverse order.
+
+ Further Details
+ ===============
+
+ Modified by
+ R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA
+
+ =====================================================================
+
+
+ Interchange row I with row IPIV(I) for each of rows K1 through K2.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ if (*incx > 0) {
+ ix0 = *k1;
+ i1 = *k1;
+ i2 = *k2;
+ inc = 1;
+ } else if (*incx < 0) {
+ ix0 = (1 - *k2) * *incx + 1;
+ i1 = *k2;
+ i2 = *k1;
+ inc = -1;
+ } else {
+ return 0;
+ }
+
+ n32 = (*n / 32) << (5);
+ if (n32 != 0) {
+ i__1 = n32;
+ for (j = 1; j <= i__1; j += 32) {
+ ix = ix0;
+ i__2 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__4 = j + 31;
+ for (k = j; k <= i__4; ++k) {
+ i__5 = i__ + k * a_dim1;
+ temp.r = a[i__5].r, temp.i = a[i__5].i;
+ i__5 = i__ + k * a_dim1;
+ i__6 = ip + k * a_dim1;
+ a[i__5].r = a[i__6].r, a[i__5].i = a[i__6].i;
+ i__5 = ip + k * a_dim1;
+ a[i__5].r = temp.r, a[i__5].i = temp.i;
+/* L10: */
+ }
+ }
+ ix += *incx;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ if (n32 != *n) {
+ ++n32;
+ ix = ix0;
+ i__1 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__2 = *n;
+ for (k = n32; k <= i__2; ++k) {
+ i__4 = i__ + k * a_dim1;
+ temp.r = a[i__4].r, temp.i = a[i__4].i;
+ i__4 = i__ + k * a_dim1;
+ i__5 = ip + k * a_dim1;
+ a[i__4].r = a[i__5].r, a[i__4].i = a[i__5].i;
+ i__4 = ip + k * a_dim1;
+ a[i__4].r = temp.r, a[i__4].i = temp.i;
+/* L40: */
+ }
+ }
+ ix += *incx;
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLASWP */
+
+} /* zlaswp_ */
+
+/* Subroutine */ int zlatrd_(char *uplo, integer *n, integer *nb,
+ doublecomplex *a, integer *lda, doublereal *e, doublecomplex *tau,
+ doublecomplex *w, integer *ldw)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Local variables */
+ static integer i__, iw;
+ static doublecomplex alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zhemv_(char *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *), zaxpy_(integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), zlacgv_(integer *, doublecomplex *,
+ integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLATRD reduces NB rows and columns of a complex Hermitian matrix A to
+ Hermitian tridiagonal form by a unitary similarity
+ transformation Q' * A * Q, and returns the matrices V and W which are
+ needed to apply the transformation to the unreduced part of A.
+
+ If UPLO = 'U', ZLATRD reduces the last NB rows and columns of a
+ matrix, of which the upper triangle is supplied;
+ if UPLO = 'L', ZLATRD reduces the first NB rows and columns of a
+ matrix, of which the lower triangle is supplied.
+
+ This is an auxiliary routine called by ZHETRD.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER
+ Specifies whether the upper or lower triangular part of the
+ Hermitian matrix A is stored:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A.
+
+ NB (input) INTEGER
+ The number of rows and columns to be reduced.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the leading
+ n-by-n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n-by-n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit:
+ if UPLO = 'U', the last NB columns have been reduced to
+ tridiagonal form, with the diagonal elements overwriting
+ the diagonal elements of A; the elements above the diagonal
+ with the array TAU, represent the unitary matrix Q as a
+ product of elementary reflectors;
+ if UPLO = 'L', the first NB columns have been reduced to
+ tridiagonal form, with the diagonal elements overwriting
+ the diagonal elements of A; the elements below the diagonal
+ with the array TAU, represent the unitary matrix Q as a
+ product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ E (output) DOUBLE PRECISION array, dimension (N-1)
+ If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal
+ elements of the last NB columns of the reduced matrix;
+ if UPLO = 'L', E(1:nb) contains the subdiagonal elements of
+ the first NB columns of the reduced matrix.
+
+ TAU (output) COMPLEX*16 array, dimension (N-1)
+ The scalar factors of the elementary reflectors, stored in
+ TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'.
+ See Further Details.
+
+ W (output) COMPLEX*16 array, dimension (LDW,NB)
+ The n-by-nb matrix W required to update the unreduced part
+ of A.
+
+ LDW (input) INTEGER
+ The leading dimension of the array W. LDW >= max(1,N).
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n) H(n-1) . . . H(n-nb+1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i),
+ and tau in TAU(i-1).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(nb).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i),
+ and tau in TAU(i).
+
+ The elements of the vectors v together form the n-by-nb matrix V
+ which is needed, with W, to apply the transformation to the unreduced
+ part of the matrix, using a Hermitian rank-2k update of the form:
+ A := A - V*W' - W*V'.
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5 and nb = 2:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( a a a v4 v5 ) ( d )
+ ( a a v4 v5 ) ( 1 d )
+ ( a 1 v5 ) ( v1 1 a )
+ ( d 1 ) ( v1 v2 a a )
+ ( d ) ( v1 v2 a a a )
+
+ where d denotes a diagonal element of the reduced matrix, a denotes
+ an element of the original matrix that is unchanged, and vi denotes
+ an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --e;
+ --tau;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1 * 1;
+ w -= w_offset;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(uplo, "U")) {
+
+/* Reduce last NB columns of upper triangle */
+
+ i__1 = *n - *nb + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ iw = i__ - *n + *nb;
+ if (i__ < *n) {
+
+/* Update A(1:i,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__, &i__2, &z__1, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
+ c_b60, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__, &i__2, &z__1, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b60, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ if (i__ > 1) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(1:i-2,i)
+*/
+
+ i__2 = i__ - 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = i__ - 1;
+ zlarfg_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &tau[i__
+ - 1]);
+ i__2 = i__ - 1;
+ e[i__2] = alpha.r;
+ i__2 = i__ - 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute W(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ zhemv_("Upper", &i__2, &c_b60, &a[a_offset], lda, &a[i__ *
+ a_dim1 + 1], &c__1, &c_b59, &w[iw * w_dim1 + 1], &
+ c__1);
+ if (i__ < *n) {
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &w[(
+ iw + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1],
+ &c__1, &c_b59, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b60, &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[(
+ i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1],
+ &c__1, &c_b59, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b60, &w[iw * w_dim1 + 1], &c__1);
+ }
+ i__2 = i__ - 1;
+ zscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
+ z__3.r = -.5, z__3.i = -0.;
+ i__2 = i__ - 1;
+ z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i =
+ z__3.r * tau[i__2].i + z__3.i * tau[i__2].r;
+ i__3 = i__ - 1;
+ zdotc_(&z__4, &i__3, &w[iw * w_dim1 + 1], &c__1, &a[i__ *
+ a_dim1 + 1], &c__1);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r *
+ z__4.i + z__2.i * z__4.r;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ i__2 = i__ - 1;
+ zaxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw *
+ w_dim1 + 1], &c__1);
+ }
+
+/* L10: */
+ }
+ } else {
+
+/* Reduce first NB columns of lower triangle */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:n,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &w[i__ + w_dim1], ldw);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + a_dim1], lda,
+ &w[i__ + w_dim1], ldw, &c_b60, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &w[i__ + w_dim1], ldw);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + w_dim1], ldw,
+ &a[i__ + a_dim1], lda, &c_b60, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ if (i__ < *n) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(i+2:n,i)
+*/
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1,
+ &tau[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute W(i+1:n,i) */
+
+ i__2 = *n - i__;
+ zhemv_("Lower", &i__2, &c_b60, &a[i__ + 1 + (i__ + 1) *
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b59, &w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &w[i__ +
+ 1 + w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b59, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 +
+ a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b60, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ +
+ 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b59, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + 1 +
+ w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b60, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ zscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
+ z__3.r = -.5, z__3.i = -0.;
+ i__2 = i__;
+ z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i =
+ z__3.r * tau[i__2].i + z__3.i * tau[i__2].r;
+ i__3 = *n - i__;
+ zdotc_(&z__4, &i__3, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r *
+ z__4.i + z__2.i * z__4.r;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ i__2 = *n - i__;
+ zaxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ }
+
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLATRD */
+
+} /* zlatrd_ */
+
+/* Subroutine */ int zlatrs_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, doublecomplex *a, integer *lda, doublecomplex *x,
+ doublereal *scale, doublereal *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal xj, rec, tjj;
+ static integer jinc;
+ static doublereal xbnd;
+ static integer imax;
+ static doublereal tmax;
+ static doublecomplex tjjs;
+ static doublereal xmax, grow;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ static doublereal tscal;
+ static doublecomplex uscal;
+ static integer jlast;
+ static doublecomplex csumj;
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ static logical upper;
+ extern /* Double Complex */ VOID zdotu_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), ztrsv_(
+ char *, char *, char *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlabad_(
+ doublereal *, doublereal *);
+
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ static doublereal bignum;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+ static logical notran;
+ static integer jfirst;
+ extern doublereal dzasum_(integer *, doublecomplex *, integer *);
+ static doublereal smlnum;
+ static logical nounit;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1992
+
+
+ Purpose
+ =======
+
+ ZLATRS solves one of the triangular systems
+
+ A * x = s*b, A**T * x = s*b, or A**H * x = s*b,
+
+ with scaling to prevent overflow. Here A is an upper or lower
+ triangular matrix, A**T denotes the transpose of A, A**H denotes the
+ conjugate transpose of A, x and b are n-element vectors, and s is a
+ scaling factor, usually less than or equal to 1, chosen so that the
+ components of x will be less than the overflow threshold. If the
+ unscaled problem will not cause overflow, the Level 2 BLAS routine
+ ZTRSV is called. If the matrix A is singular (A(j,j) = 0 for some j),
+ then s is set to 0 and a non-trivial solution to A*x = 0 is returned.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the matrix A is upper or lower triangular.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ TRANS (input) CHARACTER*1
+ Specifies the operation applied to A.
+ = 'N': Solve A * x = s*b (No transpose)
+ = 'T': Solve A**T * x = s*b (Transpose)
+ = 'C': Solve A**H * x = s*b (Conjugate transpose)
+
+ DIAG (input) CHARACTER*1
+ Specifies whether or not the matrix A is unit triangular.
+ = 'N': Non-unit triangular
+ = 'U': Unit triangular
+
+ NORMIN (input) CHARACTER*1
+ Specifies whether CNORM has been set or not.
+ = 'Y': CNORM contains the column norms on entry
+ = 'N': CNORM is not set on entry. On exit, the norms will
+ be computed and stored in CNORM.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input) COMPLEX*16 array, dimension (LDA,N)
+ The triangular matrix A. If UPLO = 'U', the leading n by n
+ upper triangular part of the array A contains the upper
+ triangular matrix, and the strictly lower triangular part of
+ A is not referenced. If UPLO = 'L', the leading n by n lower
+ triangular part of the array A contains the lower triangular
+ matrix, and the strictly upper triangular part of A is not
+ referenced. If DIAG = 'U', the diagonal elements of A are
+ also not referenced and are assumed to be 1.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max (1,N).
+
+ X (input/output) COMPLEX*16 array, dimension (N)
+ On entry, the right hand side b of the triangular system.
+ On exit, X is overwritten by the solution vector x.
+
+ SCALE (output) DOUBLE PRECISION
+ The scaling factor s for the triangular system
+ A * x = s*b, A**T * x = s*b, or A**H * x = s*b.
+ If SCALE = 0, the matrix A is singular or badly scaled, and
+ the vector x is an exact or approximate solution to A*x = 0.
+
+ CNORM (input or output) DOUBLE PRECISION array, dimension (N)
+
+ If NORMIN = 'Y', CNORM is an input argument and CNORM(j)
+ contains the norm of the off-diagonal part of the j-th column
+ of A. If TRANS = 'N', CNORM(j) must be greater than or equal
+ to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j)
+ must be greater than or equal to the 1-norm.
+
+ If NORMIN = 'N', CNORM is an output argument and CNORM(j)
+ returns the 1-norm of the offdiagonal part of the j-th column
+ of A.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ Further Details
+ ======= =======
+
+ A rough bound on x is computed; if that is less than overflow, ZTRSV
+ is called, otherwise, specific code is used which checks for possible
+ overflow or divide-by-zero at every operation.
+
+ A columnwise scheme is used for solving A*x = b. The basic algorithm
+ if A is lower triangular is
+
+ x[1:n] := b[1:n]
+ for j = 1, ..., n
+ x(j) := x(j) / A(j,j)
+ x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j]
+ end
+
+ Define bounds on the components of x after j iterations of the loop:
+ M(j) = bound on x[1:j]
+ G(j) = bound on x[j+1:n]
+ Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}.
+
+ Then for iteration j+1 we have
+ M(j+1) <= G(j) / | A(j+1,j+1) |
+ G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] |
+ <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | )
+
+ where CNORM(j+1) is greater than or equal to the infinity-norm of
+ column j+1 of A, not counting the diagonal. Hence
+
+ G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | )
+ 1<=i<=j
+ and
+
+ |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| )
+ 1<=i< j
+
+ Since |x(j)| <= M(j), we use the Level 2 BLAS routine ZTRSV if the
+ reciprocal of the largest M(j), j=1,..,n, is larger than
+ max(underflow, 1/overflow).
+
+ The bound on x(j) is also used to determine when a step in the
+ columnwise method can be performed without fear of overflow. If
+ the computed bound is greater than a large constant, x is scaled to
+ prevent overflow, but if the bound overflows, x is set to 0, x(j) to
+ 1, and scale to 0, and a non-trivial solution to A*x = 0 is found.
+
+ Similarly, a row-wise scheme is used to solve A**T *x = b or
+ A**H *x = b. The basic algorithm for A upper triangular is
+
+ for j = 1, ..., n
+ x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j)
+ end
+
+ We simultaneously compute two bounds
+ G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j
+ M(j) = bound on x(i), 1<=i<=j
+
+ The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we
+ add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1.
+ Then the bound on x(j) is
+
+ M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) |
+
+ <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| )
+ 1<=i<=j
+
+ and we can safely call ZTRSV if 1/M(n) and 1/G(n) are both greater
+ than max(underflow, 1/overflow).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --x;
+ --cnorm;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (((! notran && ! lsame_(trans, "T")) && !
+ lsame_(trans, "C"))) {
+ *info = -2;
+ } else if ((! nounit && ! lsame_(diag, "U"))) {
+ *info = -3;
+ } else if ((! lsame_(normin, "Y") && ! lsame_(
+ normin, "N"))) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLATRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = SAFEMINIMUM;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum /= PRECISION;
+ bignum = 1. / smlnum;
+ *scale = 1.;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ cnorm[j] = dzasum_(&i__2, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ cnorm[j] = dzasum_(&i__2, &a[j + 1 + j * a_dim1], &c__1);
+/* L20: */
+ }
+ cnorm[*n] = 0.;
+ }
+ }
+
+/*
+ Scale the column norms by TSCAL if the maximum element in CNORM is
+ greater than BIGNUM/2.
+*/
+
+ imax = idamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum * .5) {
+ tscal = 1.;
+ } else {
+ tscal = .5 / (smlnum * tmax);
+ dscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/*
+ Compute a bound on the computed solution vector to see if the
+ Level 2 BLAS routine ZTRSV can be used.
+*/
+
+ xmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__2].r / 2., abs(d__1)) + (d__2 =
+ d_imag(&x[j]) / 2., abs(d__2));
+ xmax = max(d__3,d__4);
+/* L30: */
+ }
+ xbnd = xmax;
+
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L60;
+ }
+
+ if (nounit) {
+
+/*
+ A is non-unit triangular.
+
+ Compute GROW = 1/G(j) and XBND = 1/M(j).
+ Initially, G(0) = max{x(i), i=1,...,n}.
+*/
+
+ grow = .5 / max(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+ i__3 = j + j * a_dim1;
+ tjjs.r = a[i__3].r, tjjs.i = a[i__3].i;
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+
+ if (tjj >= smlnum) {
+
+/*
+ M(j) = G(j-1) / abs(A(j,j))
+
+ Computing MIN
+*/
+ d__1 = xbnd, d__2 = min(1.,tjj) * grow;
+ xbnd = min(d__1,d__2);
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.;
+ }
+
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.;
+ }
+/* L40: */
+ }
+ grow = xbnd;
+ } else {
+
+/*
+ A is unit triangular.
+
+ Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}.
+
+ Computing MIN
+*/
+ d__1 = 1., d__2 = .5 / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1. / (cnorm[j] + 1.);
+/* L50: */
+ }
+ }
+L60:
+
+ ;
+ } else {
+
+/* Compute the growth in A**T * x = b or A**H * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L90;
+ }
+
+ if (nounit) {
+
+/*
+ A is non-unit triangular.
+
+ Compute GROW = 1/G(j) and XBND = 1/M(j).
+ Initially, M(0) = max{x(i), i=1,...,n}.
+*/
+
+ grow = .5 / max(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.;
+/* Computing MIN */
+ d__1 = grow, d__2 = xbnd / xj;
+ grow = min(d__1,d__2);
+
+ i__3 = j + j * a_dim1;
+ tjjs.r = a[i__3].r, tjjs.i = a[i__3].i;
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.;
+ }
+/* L70: */
+ }
+ grow = min(grow,xbnd);
+ } else {
+
+/*
+ A is unit triangular.
+
+ Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}.
+
+ Computing MIN
+*/
+ d__1 = 1., d__2 = .5 / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.;
+ grow /= xj;
+/* L80: */
+ }
+ }
+L90:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/*
+ Use the Level 2 BLAS solve if the reciprocal of the bound on
+ elements of X is not too small.
+*/
+
+ ztrsv_(uplo, trans, diag, n, &a[a_offset], lda, &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum * .5) {
+
+/*
+ Scale X so that its components are less than or equal to
+ BIGNUM in absolute value.
+*/
+
+ *scale = bignum * .5 / xmax;
+ zdscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ } else {
+ xmax *= 2.;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ z__1.r = tscal * a[i__3].r, z__1.i = tscal * a[i__3].i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L110;
+ }
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/*
+ Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM
+ to avoid overflow when dividing by A(j,j).
+*/
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.) {
+
+/*
+ Scale by 1/CNORM(j) to avoid overflow when
+ multiplying x(j) times column j.
+*/
+
+ rec /= cnorm[j];
+ }
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ } else {
+
+/*
+ A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and
+ scale = 0, and compute a solution to A*x = 0.
+*/
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L100: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ xj = 1.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L110:
+
+/*
+ Scale x if necessary to avoid overflow when adding a
+ multiple of column j of A.
+*/
+
+ if (xj > 1.) {
+ rec = 1. / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ zdscal_(n, &c_b2210, &x[1], &c__1);
+ *scale *= .5;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/*
+ Compute the update
+ x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j)
+*/
+
+ i__3 = j - 1;
+ i__4 = j;
+ z__2.r = -x[i__4].r, z__2.i = -x[i__4].i;
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ zaxpy_(&i__3, &z__1, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ i__3 = j - 1;
+ i__ = izamax_(&i__3, &x[1], &c__1);
+ i__3 = i__;
+ xmax = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(
+ &x[i__]), abs(d__2));
+ }
+ } else {
+ if (j < *n) {
+
+/*
+ Compute the update
+ x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j)
+*/
+
+ i__3 = *n - j;
+ i__4 = j;
+ z__2.r = -x[i__4].r, z__2.i = -x[i__4].i;
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ zaxpy_(&i__3, &z__1, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ i__3 = *n - j;
+ i__ = j + izamax_(&i__3, &x[j + 1], &c__1);
+ i__3 = i__;
+ xmax = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(
+ &x[i__]), abs(d__2));
+ }
+ }
+/* L120: */
+ }
+
+ } else if (lsame_(trans, "T")) {
+
+/* Solve A**T * x = b */
+
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/*
+ Compute x(j) = b(j) - sum A(k,j)*x(k).
+ k<>j
+*/
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ uscal.r = tscal, uscal.i = 0.;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ z__1.r = tscal * a[i__3].r, z__1.i = tscal * a[i__3]
+ .i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > 1.) {
+
+/*
+ Divide by A(j,j) when scaling x if A(j,j) > 1.
+
+ Computing MIN
+*/
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ zladiv_(&z__1, &uscal, &tjjs);
+ uscal.r = z__1.r, uscal.i = z__1.i;
+ }
+ if (rec < 1.) {
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0., csumj.i = 0.;
+ if ((uscal.r == 1. && uscal.i == 0.)) {
+
+/*
+ If the scaling needed for A in the dot product is 1,
+ call ZDOTU to perform the dot product.
+*/
+
+ if (upper) {
+ i__3 = j - 1;
+ zdotu_(&z__1, &i__3, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ zdotu_(&z__1, &i__3, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * a_dim1;
+ z__3.r = a[i__4].r * uscal.r - a[i__4].i *
+ uscal.i, z__3.i = a[i__4].r * uscal.i + a[
+ i__4].i * uscal.r;
+ i__5 = i__;
+ z__2.r = z__3.r * x[i__5].r - z__3.i * x[i__5].i,
+ z__2.i = z__3.r * x[i__5].i + z__3.i * x[
+ i__5].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L130: */
+ }
+ } else if (j < *n) {
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * a_dim1;
+ z__3.r = a[i__4].r * uscal.r - a[i__4].i *
+ uscal.i, z__3.i = a[i__4].r * uscal.i + a[
+ i__4].i * uscal.r;
+ i__5 = i__;
+ z__2.r = z__3.r * x[i__5].r - z__3.i * x[i__5].i,
+ z__2.i = z__3.r * x[i__5].i + z__3.i * x[
+ i__5].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L140: */
+ }
+ }
+ }
+
+ z__1.r = tscal, z__1.i = 0.;
+ if ((uscal.r == z__1.r && uscal.i == z__1.i)) {
+
+/*
+ Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j)
+ was not used to scale the dotproduct.
+*/
+
+ i__3 = j;
+ i__4 = j;
+ z__1.r = x[i__4].r - csumj.r, z__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ z__1.r = tscal * a[i__3].r, z__1.i = tscal * a[i__3]
+ .i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L160;
+ }
+ }
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else {
+
+/*
+ A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and
+ scale = 0 and compute a solution to A**T *x = 0.
+*/
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L150: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L160:
+ ;
+ } else {
+
+/*
+ Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot
+ product has already been divided by 1/A(j,j).
+*/
+
+ i__3 = j;
+ zladiv_(&z__2, &x[j], &tjjs);
+ z__1.r = z__2.r - csumj.r, z__1.i = z__2.i - csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[j]), abs(d__2));
+ xmax = max(d__3,d__4);
+/* L170: */
+ }
+
+ } else {
+
+/* Solve A**H * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/*
+ Compute x(j) = b(j) - sum A(k,j)*x(k).
+ k<>j
+*/
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ uscal.r = tscal, uscal.i = 0.;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > 1.) {
+
+/*
+ Divide by A(j,j) when scaling x if A(j,j) > 1.
+
+ Computing MIN
+*/
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ zladiv_(&z__1, &uscal, &tjjs);
+ uscal.r = z__1.r, uscal.i = z__1.i;
+ }
+ if (rec < 1.) {
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0., csumj.i = 0.;
+ if ((uscal.r == 1. && uscal.i == 0.)) {
+
+/*
+ If the scaling needed for A in the dot product is 1,
+ call ZDOTC to perform the dot product.
+*/
+
+ if (upper) {
+ i__3 = j - 1;
+ zdotc_(&z__1, &i__3, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ zdotc_(&z__1, &i__3, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ d_cnjg(&z__4, &a[i__ + j * a_dim1]);
+ z__3.r = z__4.r * uscal.r - z__4.i * uscal.i,
+ z__3.i = z__4.r * uscal.i + z__4.i *
+ uscal.r;
+ i__4 = i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L180: */
+ }
+ } else if (j < *n) {
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ d_cnjg(&z__4, &a[i__ + j * a_dim1]);
+ z__3.r = z__4.r * uscal.r - z__4.i * uscal.i,
+ z__3.i = z__4.r * uscal.i + z__4.i *
+ uscal.r;
+ i__4 = i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L190: */
+ }
+ }
+ }
+
+ z__1.r = tscal, z__1.i = 0.;
+ if ((uscal.r == z__1.r && uscal.i == z__1.i)) {
+
+/*
+ Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j)
+ was not used to scale the dotproduct.
+*/
+
+ i__3 = j;
+ i__4 = j;
+ z__1.r = x[i__4].r - csumj.r, z__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L210;
+ }
+ }
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else {
+
+/*
+ A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and
+ scale = 0 and compute a solution to A**H *x = 0.
+*/
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L200: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L210:
+ ;
+ } else {
+
+/*
+ Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot
+ product has already been divided by 1/A(j,j).
+*/
+
+ i__3 = j;
+ zladiv_(&z__2, &x[j], &tjjs);
+ z__1.r = z__2.r - csumj.r, z__1.i = z__2.i - csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[j]), abs(d__2));
+ xmax = max(d__3,d__4);
+/* L220: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.) {
+ d__1 = 1. / tscal;
+ dscal_(n, &d__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of ZLATRS */
+
+} /* zlatrs_ */
+
+/* Subroutine */ int zpotf2_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer j;
+ static doublereal ajj;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *), zlacgv_(
+ integer *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZPOTF2 computes the Cholesky factorization of a complex Hermitian
+ positive definite matrix A.
+
+ The factorization has the form
+ A = U' * U , if UPLO = 'U', or
+ A = L * L', if UPLO = 'L',
+ where U is an upper triangular matrix and L is lower triangular.
+
+ This is the unblocked version of the algorithm, calling Level 2 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ Hermitian matrix A is stored.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the leading
+ n by n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n by n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+
+ On exit, if INFO = 0, the factor U or L from the Cholesky
+ factorization A = U'*U or A = L*L'.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+ > 0: if INFO = k, the leading minor of order k is not
+ positive definite, and the factorization could not be
+ completed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j + j * a_dim1;
+ d__1 = a[i__2].r;
+ i__3 = j - 1;
+ zdotc_(&z__2, &i__3, &a[j * a_dim1 + 1], &c__1, &a[j * a_dim1 + 1]
+ , &c__1);
+ z__1.r = d__1 - z__2.r, z__1.i = -z__2.i;
+ ajj = z__1.r;
+ if (ajj <= 0.) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ i__3 = *n - j;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__2, &i__3, &z__1, &a[(j + 1) * a_dim1
+ + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b60, &a[j + (
+ j + 1) * a_dim1], lda);
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ zdscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j + j * a_dim1;
+ d__1 = a[i__2].r;
+ i__3 = j - 1;
+ zdotc_(&z__2, &i__3, &a[j + a_dim1], lda, &a[j + a_dim1], lda);
+ z__1.r = d__1 - z__2.r, z__1.i = -z__2.i;
+ ajj = z__1.r;
+ if (ajj <= 0.) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ i__3 = j - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[j + 1 + a_dim1]
+ , lda, &a[j + a_dim1], lda, &c_b60, &a[j + 1 + j *
+ a_dim1], &c__1);
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ zdscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of ZPOTF2 */
+
+} /* zpotf2_ */
+
+/* Subroutine */ int zpotrf_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer j, jb, nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zherk_(char *, char *, integer *,
+ integer *, doublereal *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ zpotf2_(char *, integer *, doublecomplex *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZPOTRF computes the Cholesky factorization of a complex Hermitian
+ positive definite matrix A.
+
+ The factorization has the form
+ A = U**H * U, if UPLO = 'U', or
+ A = L * L**H, if UPLO = 'L',
+ where U is an upper triangular matrix and L is lower triangular.
+
+ This is the block version of the algorithm, calling Level 3 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the leading
+ N-by-N upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+
+ On exit, if INFO = 0, the factor U or L from the Cholesky
+ factorization A = U**H*U or A = L*L**H.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the leading minor of order i is not
+ positive definite, and the factorization could not be
+ completed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "ZPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code. */
+
+ zpotf2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code. */
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/*
+ Update and factorize the current diagonal block and test
+ for non-positive-definiteness.
+
+ Computing MIN
+*/
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ zherk_("Upper", "Conjugate transpose", &jb, &i__3, &c_b1294, &
+ a[j * a_dim1 + 1], lda, &c_b1015, &a[j + j * a_dim1],
+ lda);
+ zpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block row. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("Conjugate transpose", "No transpose", &jb, &i__3,
+ &i__4, &z__1, &a[j * a_dim1 + 1], lda, &a[(j + jb)
+ * a_dim1 + 1], lda, &c_b60, &a[j + (j + jb) *
+ a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ ztrsm_("Left", "Upper", "Conjugate transpose", "Non-unit",
+ &jb, &i__3, &c_b60, &a[j + j * a_dim1], lda, &a[
+ j + (j + jb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/*
+ Update and factorize the current diagonal block and test
+ for non-positive-definiteness.
+
+ Computing MIN
+*/
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ zherk_("Lower", "No transpose", &jb, &i__3, &c_b1294, &a[j +
+ a_dim1], lda, &c_b1015, &a[j + j * a_dim1], lda);
+ zpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block column. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &i__3, &jb,
+ &i__4, &z__1, &a[j + jb + a_dim1], lda, &a[j +
+ a_dim1], lda, &c_b60, &a[j + jb + j * a_dim1],
+ lda);
+ i__3 = *n - j - jb + 1;
+ ztrsm_("Right", "Lower", "Conjugate transpose", "Non-unit"
+ , &i__3, &jb, &c_b60, &a[j + j * a_dim1], lda, &a[
+ j + jb + j * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+ goto L40;
+
+L30:
+ *info = *info + j - 1;
+
+L40:
+ return 0;
+
+/* End of ZPOTRF */
+
+} /* zpotrf_ */
+
+/* Subroutine */ int zstedc_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublecomplex *z__, integer *ldz, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *lrwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k, m;
+ static doublereal p;
+ static integer ii, ll, end, lgn;
+ static doublereal eps, tiny;
+ extern logical lsame_(char *, char *);
+ static integer lwmin, start;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlaed0_(integer *, integer *,
+ doublereal *, doublereal *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, integer *, integer *);
+
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dstedc_(char *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *), dlaset_(
+ char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), zlacrm_(integer *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, doublecomplex *, integer *,
+ doublereal *);
+ static integer liwmin, icompz;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *), zlacpy_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *);
+ static doublereal orgnrm;
+ static integer lrwmin;
+ static logical lquery;
+ static integer smlsiz;
+ extern /* Subroutine */ int zsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublereal *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZSTEDC computes all eigenvalues and, optionally, eigenvectors of a
+ symmetric tridiagonal matrix using the divide and conquer method.
+ The eigenvectors of a full or band complex Hermitian matrix can also
+ be found if ZHETRD or ZHPTRD or ZHBTRD has been used to reduce this
+ matrix to tridiagonal form.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none. See DLAED3 for details.
+
+ Arguments
+ =========
+
+ COMPZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only.
+ = 'I': Compute eigenvectors of tridiagonal matrix also.
+ = 'V': Compute eigenvectors of original Hermitian matrix
+ also. On entry, Z contains the unitary matrix used
+ to reduce the original matrix to tridiagonal form.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N-1)
+ On entry, the subdiagonal elements of the tridiagonal matrix.
+ On exit, E has been destroyed.
+
+ Z (input/output) COMPLEX*16 array, dimension (LDZ,N)
+ On entry, if COMPZ = 'V', then Z contains the unitary
+ matrix used in the reduction to tridiagonal form.
+ On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
+ orthonormal eigenvectors of the original Hermitian matrix,
+ and if COMPZ = 'I', Z contains the orthonormal eigenvectors
+ of the symmetric tridiagonal matrix.
+ If COMPZ = 'N', then Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= 1.
+ If eigenvectors are desired, then LDZ >= max(1,N).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If COMPZ = 'N' or 'I', or N <= 1, LWORK must be at least 1.
+ If COMPZ = 'V' and N > 1, LWORK must be at least N*N.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ RWORK (workspace/output) DOUBLE PRECISION array,
+ dimension (LRWORK)
+ On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK.
+
+ LRWORK (input) INTEGER
+ The dimension of the array RWORK.
+ If COMPZ = 'N' or N <= 1, LRWORK must be at least 1.
+ If COMPZ = 'V' and N > 1, LRWORK must be at least
+ 1 + 3*N + 2*N*lg N + 3*N**2 ,
+ where lg( N ) = smallest integer k such
+ that 2**k >= N.
+ If COMPZ = 'I' and N > 1, LRWORK must be at least
+ 1 + 4*N + 2*N**2 .
+
+ If LRWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the RWORK array,
+ returns this value as the first entry of the RWORK array, and
+ no error message related to LRWORK is issued by XERBLA.
+
+ IWORK (workspace/output) INTEGER array, dimension (LIWORK)
+ On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
+
+ LIWORK (input) INTEGER
+ The dimension of the array IWORK.
+ If COMPZ = 'N' or N <= 1, LIWORK must be at least 1.
+ If COMPZ = 'V' or N > 1, LIWORK must be at least
+ 6 + 6*N + 5*N*lg N.
+ If COMPZ = 'I' or N > 1, LIWORK must be at least
+ 3 + 5*N .
+
+ If LIWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the IWORK array,
+ returns this value as the first entry of the IWORK array, and
+ no error message related to LIWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an eigenvalue while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through mod(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (*n <= 1 || icompz <= 0) {
+ lwmin = 1;
+ liwmin = 1;
+ lrwmin = 1;
+ } else {
+ lgn = (integer) (log((doublereal) (*n)) / log(2.));
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (icompz == 1) {
+ lwmin = *n * *n;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 3 + 1 + ((*n) << (1)) * lgn + i__1 * i__1 * 3;
+ liwmin = *n * 6 + 6 + *n * 5 * lgn;
+ } else if (icompz == 2) {
+ lwmin = 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = ((*n) << (2)) + 1 + ((i__1 * i__1) << (1));
+ liwmin = *n * 5 + 3;
+ }
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) {
+ *info = -6;
+ } else if ((*lwork < lwmin && ! lquery)) {
+ *info = -8;
+ } else if ((*lrwork < lrwmin && ! lquery)) {
+ *info = -10;
+ } else if ((*liwork < liwmin && ! lquery)) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSTEDC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ if (icompz != 0) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "ZSTEDC", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ If the following conditional clause is removed, then the routine
+ will use the Divide and Conquer routine to compute only the
+ eigenvalues, which requires (3N + 3N**2) real workspace and
+ (2 + 5N + 2N lg(N)) integer workspace.
+ Since on many architectures DSTERF is much faster than any other
+ algorithm for finding eigenvalues only, it is used here
+ as the default.
+
+ If COMPZ = 'N', use DSTERF to compute the eigenvalues.
+*/
+
+ if (icompz == 0) {
+ dsterf_(n, &d__[1], &e[1], info);
+ return 0;
+ }
+
+/*
+ If N is smaller than the minimum divide size (SMLSIZ+1), then
+ solve the problem with another solver.
+*/
+
+ if (*n <= smlsiz) {
+ if (icompz == 0) {
+ dsterf_(n, &d__[1], &e[1], info);
+ return 0;
+ } else if (icompz == 2) {
+ zsteqr_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &rwork[1],
+ info);
+ return 0;
+ } else {
+ zsteqr_("V", n, &d__[1], &e[1], &z__[z_offset], ldz, &rwork[1],
+ info);
+ return 0;
+ }
+ }
+
+/* If COMPZ = 'I', we simply call DSTEDC instead. */
+
+ if (icompz == 2) {
+ dlaset_("Full", n, n, &c_b324, &c_b1015, &rwork[1], n);
+ ll = *n * *n + 1;
+ i__1 = *lrwork - ll + 1;
+ dstedc_("I", n, &d__[1], &e[1], &rwork[1], n, &rwork[ll], &i__1, &
+ iwork[1], liwork, info);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * z_dim1;
+ i__4 = (j - 1) * *n + i__;
+ z__[i__3].r = rwork[i__4], z__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/*
+ From now on, only option left to be handled is COMPZ = 'V',
+ i.e. ICOMPZ = 1.
+
+ Scale.
+*/
+
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ return 0;
+ }
+
+ eps = EPSILON;
+
+ start = 1;
+
+/* while ( START <= N ) */
+
+L30:
+ if (start <= *n) {
+
+/*
+ Let END be the position of the next subdiagonal entry such that
+ E( END ) <= TINY or END = N if no such subdiagonal exists. The
+ matrix identified by the elements between START and END
+ constitutes an independent sub-problem.
+*/
+
+ end = start;
+L40:
+ if (end < *n) {
+ tiny = eps * sqrt((d__1 = d__[end], abs(d__1))) * sqrt((d__2 =
+ d__[end + 1], abs(d__2)));
+ if ((d__1 = e[end], abs(d__1)) > tiny) {
+ ++end;
+ goto L40;
+ }
+ }
+
+/* (Sub) Problem determined. Compute its size and solve it. */
+
+ m = end - start + 1;
+ if (m > smlsiz) {
+ *info = smlsiz;
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", &m, &d__[start], &e[start]);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, &m, &c__1, &d__[
+ start], &m, info);
+ i__1 = m - 1;
+ i__2 = m - 1;
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, &i__1, &c__1, &e[
+ start], &i__2, info);
+
+ zlaed0_(n, &m, &d__[start], &e[start], &z__[start * z_dim1 + 1],
+ ldz, &work[1], n, &rwork[1], &iwork[1], info);
+ if (*info > 0) {
+ *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info % (m
+ + 1) + start - 1;
+ return 0;
+ }
+
+/* Scale back. */
+
+ dlascl_("G", &c__0, &c__0, &c_b1015, &orgnrm, &m, &c__1, &d__[
+ start], &m, info);
+
+ } else {
+ dsteqr_("I", &m, &d__[start], &e[start], &rwork[1], &m, &rwork[m *
+ m + 1], info);
+ zlacrm_(n, &m, &z__[start * z_dim1 + 1], ldz, &rwork[1], &m, &
+ work[1], n, &rwork[m * m + 1]);
+ zlacpy_("A", n, &m, &work[1], n, &z__[start * z_dim1 + 1], ldz);
+ if (*info > 0) {
+ *info = start * (*n + 1) + end;
+ return 0;
+ }
+ }
+
+ start = end + 1;
+ goto L30;
+ }
+
+/*
+ endwhile
+
+ If the problem split any number of times, then the eigenvalues
+ will not be properly ordered. Here we permute the eigenvalues
+ (and the associated eigenvectors) into ascending order.
+*/
+
+ if (m != *n) {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L50: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L60: */
+ }
+ }
+
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of ZSTEDC */
+
+} /* zstedc_ */
+
+/* Subroutine */ int zsteqr_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublecomplex *z__, integer *ldz, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static doublereal b, c__, f, g;
+ static integer i__, j, k, l, m;
+ static doublereal p, r__, s;
+ static integer l1, ii, mm, lm1, mm1, nm1;
+ static doublereal rt1, rt2, eps;
+ static integer lsv;
+ static doublereal tst, eps2;
+ static integer lend, jtot;
+ extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ extern logical lsame_(char *, char *);
+ static doublereal anorm;
+ extern /* Subroutine */ int zlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), dlaev2_(doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *);
+ static integer lendm1, lendp1;
+
+ static integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ static doublereal safmin;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ static doublereal safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ static integer lendsv;
+ static doublereal ssfmin;
+ static integer nmaxit, icompz;
+ static doublereal ssfmax;
+ extern /* Subroutine */ int zlaset_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZSTEQR computes all eigenvalues and, optionally, eigenvectors of a
+ symmetric tridiagonal matrix using the implicit QL or QR method.
+ The eigenvectors of a full or band complex Hermitian matrix can also
+ be found if ZHETRD or ZHPTRD or ZHBTRD has been used to reduce this
+ matrix to tridiagonal form.
+
+ Arguments
+ =========
+
+ COMPZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only.
+ = 'V': Compute eigenvalues and eigenvectors of the original
+ Hermitian matrix. On entry, Z must contain the
+ unitary matrix used to reduce the original matrix
+ to tridiagonal form.
+ = 'I': Compute eigenvalues and eigenvectors of the
+ tridiagonal matrix. Z is initialized to the identity
+ matrix.
+
+ N (input) INTEGER
+ The order of the matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N-1)
+ On entry, the (n-1) subdiagonal elements of the tridiagonal
+ matrix.
+ On exit, E has been destroyed.
+
+ Z (input/output) COMPLEX*16 array, dimension (LDZ, N)
+ On entry, if COMPZ = 'V', then Z contains the unitary
+ matrix used in the reduction to tridiagonal form.
+ On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
+ orthonormal eigenvectors of the original Hermitian matrix,
+ and if COMPZ = 'I', Z contains the orthonormal eigenvectors
+ of the symmetric tridiagonal matrix.
+ If COMPZ = 'N', then Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= 1, and if
+ eigenvectors are desired, then LDZ >= max(1,N).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2))
+ If COMPZ = 'N', then WORK is not referenced.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm has failed to find all the eigenvalues in
+ a total of 30*N iterations; if INFO = i, then i
+ elements of E have not converged to zero; on exit, D
+ and E contain the elements of a symmetric tridiagonal
+ matrix which is unitarily similar to the original
+ matrix.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1 * 1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz == 2) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Determine the unit roundoff and over/underflow thresholds. */
+
+ eps = EPSILON;
+/* Computing 2nd power */
+ d__1 = eps;
+ eps2 = d__1 * d__1;
+ safmin = SAFEMINIMUM;
+ safmax = 1. / safmin;
+ ssfmax = sqrt(safmax) / 3.;
+ ssfmin = sqrt(safmin) / eps2;
+
+/*
+ Compute the eigenvalues and eigenvectors of the tridiagonal
+ matrix.
+*/
+
+ if (icompz == 2) {
+ zlaset_("Full", n, n, &c_b59, &c_b60, &z__[z_offset], ldz);
+ }
+
+ nmaxit = *n * 30;
+ jtot = 0;
+
+/*
+ Determine where the matrix splits and choose QL or QR iteration
+ for each block, according to whether top or bottom diagonal
+ element is smaller.
+*/
+
+ l1 = 1;
+ nm1 = *n - 1;
+
+L10:
+ if (l1 > *n) {
+ goto L160;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.;
+ }
+ if (l1 <= nm1) {
+ i__1 = nm1;
+ for (m = l1; m <= i__1; ++m) {
+ tst = (d__1 = e[m], abs(d__1));
+ if (tst == 0.) {
+ goto L30;
+ }
+ if (tst <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m
+ + 1], abs(d__2))) * eps) {
+ e[m] = 0.;
+ goto L30;
+ }
+/* L20: */
+ }
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = dlanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm == 0.) {
+ goto L10;
+ }
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend > l) {
+
+/*
+ QL Iteration
+
+ Look for small subdiagonal element.
+*/
+
+L40:
+ if (l != lend) {
+ lendm1 = lend - 1;
+ i__1 = lendm1;
+ for (m = l; m <= i__1; ++m) {
+/* Computing 2nd power */
+ d__2 = (d__1 = e[m], abs(d__1));
+ tst = d__2 * d__2;
+ if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
+ + 1], abs(d__2)) + safmin) {
+ goto L60;
+ }
+/* L50: */
+ }
+ }
+
+ m = lend;
+
+L60:
+ if (m < lend) {
+ e[m] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L80;
+ }
+
+/*
+ If remaining matrix is 2-by-2, use DLAE2 or SLAEV2
+ to compute its eigensystem.
+*/
+
+ if (m == l + 1) {
+ if (icompz > 0) {
+ dlaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
+ work[l] = c__;
+ work[*n - 1 + l] = s;
+ zlasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
+ z__[l * z_dim1 + 1], ldz);
+ } else {
+ dlae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
+ }
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.;
+ l += 2;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l + 1] - p) / (e[l] * 2.);
+ r__ = dlapy2_(&g, &c_b1015);
+ g = d__[m] - p + e[l] / (g + d_sign(&r__, &g));
+
+ s = 1.;
+ c__ = 1.;
+ p = 0.;
+
+/* Inner loop */
+
+ mm1 = m - 1;
+ i__1 = l;
+ for (i__ = mm1; i__ >= i__1; --i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ dlartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m - 1) {
+ e[i__ + 1] = r__;
+ }
+ g = d__[i__ + 1] - p;
+ r__ = (d__[i__] - g) * s + c__ * 2. * b;
+ p = s * r__;
+ d__[i__ + 1] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = -s;
+ }
+
+/* L70: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = m - l + 1;
+ zlasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[l] = g;
+ goto L40;
+
+/* Eigenvalue found. */
+
+L80:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+
+ } else {
+
+/*
+ QR Iteration
+
+ Look for small superdiagonal element.
+*/
+
+L90:
+ if (l != lend) {
+ lendp1 = lend + 1;
+ i__1 = lendp1;
+ for (m = l; m >= i__1; --m) {
+/* Computing 2nd power */
+ d__2 = (d__1 = e[m - 1], abs(d__1));
+ tst = d__2 * d__2;
+ if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
+ - 1], abs(d__2)) + safmin) {
+ goto L110;
+ }
+/* L100: */
+ }
+ }
+
+ m = lend;
+
+L110:
+ if (m > lend) {
+ e[m - 1] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L130;
+ }
+
+/*
+ If remaining matrix is 2-by-2, use DLAE2 or SLAEV2
+ to compute its eigensystem.
+*/
+
+ if (m == l - 1) {
+ if (icompz > 0) {
+ dlaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
+ ;
+ work[m] = c__;
+ work[*n - 1 + m] = s;
+ zlasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
+ z__[(l - 1) * z_dim1 + 1], ldz);
+ } else {
+ dlae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
+ }
+ d__[l - 1] = rt1;
+ d__[l] = rt2;
+ e[l - 1] = 0.;
+ l += -2;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l - 1] - p) / (e[l - 1] * 2.);
+ r__ = dlapy2_(&g, &c_b1015);
+ g = d__[m] - p + e[l - 1] / (g + d_sign(&r__, &g));
+
+ s = 1.;
+ c__ = 1.;
+ p = 0.;
+
+/* Inner loop */
+
+ lm1 = l - 1;
+ i__1 = lm1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ dlartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m) {
+ e[i__ - 1] = r__;
+ }
+ g = d__[i__] - p;
+ r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b;
+ p = s * r__;
+ d__[i__] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = s;
+ }
+
+/* L120: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = l - m + 1;
+ zlasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[lm1] = g;
+ goto L90;
+
+/* Eigenvalue found. */
+
+L130:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+
+ }
+
+/* Undo scaling if necessary */
+
+L140:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ } else if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ }
+
+/*
+ Check for no convergence to an eigenvalue after a total
+ of N*MAXIT iterations.
+*/
+
+ if (jtot == nmaxit) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L150: */
+ }
+ return 0;
+ }
+ goto L10;
+
+/* Order eigenvalues and eigenvectors. */
+
+L160:
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ dlasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L170: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L180: */
+ }
+ }
+ return 0;
+
+/* End of ZSTEQR */
+
+} /* zsteqr_ */
+
+/* Subroutine */ int ztrevc_(char *side, char *howmny, logical *select,
+ integer *n, doublecomplex *t, integer *ldt, doublecomplex *vl,
+ integer *ldvl, doublecomplex *vr, integer *ldvr, integer *mm, integer
+ *m, doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, k, ii, ki, is;
+ static doublereal ulp;
+ static logical allv;
+ static doublereal unfl, ovfl, smin;
+ static logical over;
+ static doublereal scale;
+ extern logical lsame_(char *, char *);
+ static doublereal remax;
+ static logical leftv, bothv;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ static logical somev;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ static logical rightv;
+ extern doublereal dzasum_(integer *, doublecomplex *, integer *);
+ static doublereal smlnum;
+ extern /* Subroutine */ int zlatrs_(char *, char *, char *, char *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZTREVC computes some or all of the right and/or left eigenvectors of
+ a complex upper triangular matrix T.
+
+ The right eigenvector x and the left eigenvector y of T corresponding
+ to an eigenvalue w are defined by:
+
+ T*x = w*x, y'*T = w*y'
+
+ where y' denotes the conjugate transpose of the vector y.
+
+ If all eigenvectors are requested, the routine may either return the
+ matrices X and/or Y of right or left eigenvectors of T, or the
+ products Q*X and/or Q*Y, where Q is an input unitary
+ matrix. If T was obtained from the Schur factorization of an
+ original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of
+ right or left eigenvectors of A.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'R': compute right eigenvectors only;
+ = 'L': compute left eigenvectors only;
+ = 'B': compute both right and left eigenvectors.
+
+ HOWMNY (input) CHARACTER*1
+ = 'A': compute all right and/or left eigenvectors;
+ = 'B': compute all right and/or left eigenvectors,
+ and backtransform them using the input matrices
+ supplied in VR and/or VL;
+ = 'S': compute selected right and/or left eigenvectors,
+ specified by the logical array SELECT.
+
+ SELECT (input) LOGICAL array, dimension (N)
+ If HOWMNY = 'S', SELECT specifies the eigenvectors to be
+ computed.
+ If HOWMNY = 'A' or 'B', SELECT is not referenced.
+ To select the eigenvector corresponding to the j-th
+ eigenvalue, SELECT(j) must be set to .TRUE..
+
+ N (input) INTEGER
+ The order of the matrix T. N >= 0.
+
+ T (input/output) COMPLEX*16 array, dimension (LDT,N)
+ The upper triangular matrix T. T is modified, but restored
+ on exit.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= max(1,N).
+
+ VL (input/output) COMPLEX*16 array, dimension (LDVL,MM)
+ On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must
+ contain an N-by-N matrix Q (usually the unitary matrix Q of
+ Schur vectors returned by ZHSEQR).
+ On exit, if SIDE = 'L' or 'B', VL contains:
+ if HOWMNY = 'A', the matrix Y of left eigenvectors of T;
+ VL is lower triangular. The i-th column
+ VL(i) of VL is the eigenvector corresponding
+ to T(i,i).
+ if HOWMNY = 'B', the matrix Q*Y;
+ if HOWMNY = 'S', the left eigenvectors of T specified by
+ SELECT, stored consecutively in the columns
+ of VL, in the same order as their
+ eigenvalues.
+ If SIDE = 'R', VL is not referenced.
+
+ LDVL (input) INTEGER
+ The leading dimension of the array VL. LDVL >= max(1,N) if
+ SIDE = 'L' or 'B'; LDVL >= 1 otherwise.
+
+ VR (input/output) COMPLEX*16 array, dimension (LDVR,MM)
+ On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must
+ contain an N-by-N matrix Q (usually the unitary matrix Q of
+ Schur vectors returned by ZHSEQR).
+ On exit, if SIDE = 'R' or 'B', VR contains:
+ if HOWMNY = 'A', the matrix X of right eigenvectors of T;
+ VR is upper triangular. The i-th column
+ VR(i) of VR is the eigenvector corresponding
+ to T(i,i).
+ if HOWMNY = 'B', the matrix Q*X;
+ if HOWMNY = 'S', the right eigenvectors of T specified by
+ SELECT, stored consecutively in the columns
+ of VR, in the same order as their
+ eigenvalues.
+ If SIDE = 'L', VR is not referenced.
+
+ LDVR (input) INTEGER
+ The leading dimension of the array VR. LDVR >= max(1,N) if
+ SIDE = 'R' or 'B'; LDVR >= 1 otherwise.
+
+ MM (input) INTEGER
+ The number of columns in the arrays VL and/or VR. MM >= M.
+
+ M (output) INTEGER
+ The number of columns in the arrays VL and/or VR actually
+ used to store the eigenvectors. If HOWMNY = 'A' or 'B', M
+ is set to N. Each selected eigenvector occupies one
+ column.
+
+ WORK (workspace) COMPLEX*16 array, dimension (2*N)
+
+ RWORK (workspace) DOUBLE PRECISION array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The algorithm used in this program is basically backward (forward)
+ substitution, with scaling to make the the code robust against
+ possible overflow.
+
+ Each eigenvector is normalized so that the element of largest
+ magnitude has magnitude 1; here the magnitude of a complex number
+ (x,y) is taken to be |x| + |y|.
+
+ =====================================================================
+
+
+ Decode and test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1 * 1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1 * 1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1 * 1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = lsame_(side, "R") || bothv;
+ leftv = lsame_(side, "L") || bothv;
+
+ allv = lsame_(howmny, "A");
+ over = lsame_(howmny, "B");
+ somev = lsame_(howmny, "S");
+
+/*
+ Set M to the number of columns required to store the selected
+ eigenvectors.
+*/
+
+ if (somev) {
+ *m = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (select[j]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ *info = 0;
+ if ((! rightv && ! leftv)) {
+ *info = -1;
+ } else if (((! allv && ! over) && ! somev)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvl < 1 || (leftv && *ldvl < *n)) {
+ *info = -8;
+ } else if (*ldvr < 1 || (rightv && *ldvr < *n)) {
+ *info = -10;
+ } else if (*mm < *m) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTREVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set the constants to control overflow. */
+
+ unfl = SAFEMINIMUM;
+ ovfl = 1. / unfl;
+ dlabad_(&unfl, &ovfl);
+ ulp = PRECISION;
+ smlnum = unfl * (*n / ulp);
+
+/* Store the diagonal elements of T in working array WORK. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + *n;
+ i__3 = i__ + i__ * t_dim1;
+ work[i__2].r = t[i__3].r, work[i__2].i = t[i__3].i;
+/* L20: */
+ }
+
+/*
+ Compute 1-norm of each column of strictly upper triangular
+ part of T to control overflow in triangular solver.
+*/
+
+ rwork[1] = 0.;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ rwork[j] = dzasum_(&i__2, &t[j * t_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+ if (rightv) {
+
+/* Compute right eigenvectors. */
+
+ is = *m;
+ for (ki = *n; ki >= 1; --ki) {
+
+ if (somev) {
+ if (! select[ki]) {
+ goto L80;
+ }
+ }
+/* Computing MAX */
+ i__1 = ki + ki * t_dim1;
+ d__3 = ulp * ((d__1 = t[i__1].r, abs(d__1)) + (d__2 = d_imag(&t[
+ ki + ki * t_dim1]), abs(d__2)));
+ smin = max(d__3,smlnum);
+
+ work[1].r = 1., work[1].i = 0.;
+
+/* Form right-hand side. */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k;
+ i__3 = k + ki * t_dim1;
+ z__1.r = -t[i__3].r, z__1.i = -t[i__3].i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+/* L40: */
+ }
+
+/*
+ Solve the triangular system:
+ (T(1:KI-1,1:KI-1) - T(KI,KI))*X = SCALE*WORK.
+*/
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + k * t_dim1;
+ i__3 = k + k * t_dim1;
+ i__4 = ki + ki * t_dim1;
+ z__1.r = t[i__3].r - t[i__4].r, z__1.i = t[i__3].i - t[i__4]
+ .i;
+ t[i__2].r = z__1.r, t[i__2].i = z__1.i;
+ i__2 = k + k * t_dim1;
+ if ((d__1 = t[i__2].r, abs(d__1)) + (d__2 = d_imag(&t[k + k *
+ t_dim1]), abs(d__2)) < smin) {
+ i__3 = k + k * t_dim1;
+ t[i__3].r = smin, t[i__3].i = 0.;
+ }
+/* L50: */
+ }
+
+ if (ki > 1) {
+ i__1 = ki - 1;
+ zlatrs_("Upper", "No transpose", "Non-unit", "Y", &i__1, &t[
+ t_offset], ldt, &work[1], &scale, &rwork[1], info);
+ i__1 = ki;
+ work[i__1].r = scale, work[i__1].i = 0.;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ zcopy_(&ki, &work[1], &c__1, &vr[is * vr_dim1 + 1], &c__1);
+
+ ii = izamax_(&ki, &vr[is * vr_dim1 + 1], &c__1);
+ i__1 = ii + is * vr_dim1;
+ remax = 1. / ((d__1 = vr[i__1].r, abs(d__1)) + (d__2 = d_imag(
+ &vr[ii + is * vr_dim1]), abs(d__2)));
+ zdscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ i__2 = k + is * vr_dim1;
+ vr[i__2].r = 0., vr[i__2].i = 0.;
+/* L60: */
+ }
+ } else {
+ if (ki > 1) {
+ i__1 = ki - 1;
+ z__1.r = scale, z__1.i = 0.;
+ zgemv_("N", n, &i__1, &c_b60, &vr[vr_offset], ldvr, &work[
+ 1], &c__1, &z__1, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+ ii = izamax_(n, &vr[ki * vr_dim1 + 1], &c__1);
+ i__1 = ii + ki * vr_dim1;
+ remax = 1. / ((d__1 = vr[i__1].r, abs(d__1)) + (d__2 = d_imag(
+ &vr[ii + ki * vr_dim1]), abs(d__2)));
+ zdscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+/* Set back the original diagonal elements of T. */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + k * t_dim1;
+ i__3 = k + *n;
+ t[i__2].r = work[i__3].r, t[i__2].i = work[i__3].i;
+/* L70: */
+ }
+
+ --is;
+L80:
+ ;
+ }
+ }
+
+ if (leftv) {
+
+/* Compute left eigenvectors. */
+
+ is = 1;
+ i__1 = *n;
+ for (ki = 1; ki <= i__1; ++ki) {
+
+ if (somev) {
+ if (! select[ki]) {
+ goto L130;
+ }
+ }
+/* Computing MAX */
+ i__2 = ki + ki * t_dim1;
+ d__3 = ulp * ((d__1 = t[i__2].r, abs(d__1)) + (d__2 = d_imag(&t[
+ ki + ki * t_dim1]), abs(d__2)));
+ smin = max(d__3,smlnum);
+
+ i__2 = *n;
+ work[i__2].r = 1., work[i__2].i = 0.;
+
+/* Form right-hand side. */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k;
+ d_cnjg(&z__2, &t[ki + k * t_dim1]);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L90: */
+ }
+
+/*
+ Solve the triangular system:
+ (T(KI+1:N,KI+1:N) - T(KI,KI))'*X = SCALE*WORK.
+*/
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k + k * t_dim1;
+ i__4 = k + k * t_dim1;
+ i__5 = ki + ki * t_dim1;
+ z__1.r = t[i__4].r - t[i__5].r, z__1.i = t[i__4].i - t[i__5]
+ .i;
+ t[i__3].r = z__1.r, t[i__3].i = z__1.i;
+ i__3 = k + k * t_dim1;
+ if ((d__1 = t[i__3].r, abs(d__1)) + (d__2 = d_imag(&t[k + k *
+ t_dim1]), abs(d__2)) < smin) {
+ i__4 = k + k * t_dim1;
+ t[i__4].r = smin, t[i__4].i = 0.;
+ }
+/* L100: */
+ }
+
+ if (ki < *n) {
+ i__2 = *n - ki;
+ zlatrs_("Upper", "Conjugate transpose", "Non-unit", "Y", &
+ i__2, &t[ki + 1 + (ki + 1) * t_dim1], ldt, &work[ki +
+ 1], &scale, &rwork[1], info);
+ i__2 = ki;
+ work[i__2].r = scale, work[i__2].i = 0.;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ zcopy_(&i__2, &work[ki], &c__1, &vl[ki + is * vl_dim1], &c__1)
+ ;
+
+ i__2 = *n - ki + 1;
+ ii = izamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki - 1;
+ i__2 = ii + is * vl_dim1;
+ remax = 1. / ((d__1 = vl[i__2].r, abs(d__1)) + (d__2 = d_imag(
+ &vl[ii + is * vl_dim1]), abs(d__2)));
+ i__2 = *n - ki + 1;
+ zdscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + is * vl_dim1;
+ vl[i__3].r = 0., vl[i__3].i = 0.;
+/* L110: */
+ }
+ } else {
+ if (ki < *n) {
+ i__2 = *n - ki;
+ z__1.r = scale, z__1.i = 0.;
+ zgemv_("N", n, &i__2, &c_b60, &vl[(ki + 1) * vl_dim1 + 1],
+ ldvl, &work[ki + 1], &c__1, &z__1, &vl[ki *
+ vl_dim1 + 1], &c__1);
+ }
+
+ ii = izamax_(n, &vl[ki * vl_dim1 + 1], &c__1);
+ i__2 = ii + ki * vl_dim1;
+ remax = 1. / ((d__1 = vl[i__2].r, abs(d__1)) + (d__2 = d_imag(
+ &vl[ii + ki * vl_dim1]), abs(d__2)));
+ zdscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+ }
+
+/* Set back the original diagonal elements of T. */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k + k * t_dim1;
+ i__4 = k + *n;
+ t[i__3].r = work[i__4].r, t[i__3].i = work[i__4].i;
+/* L120: */
+ }
+
+ ++is;
+L130:
+ ;
+ }
+ }
+
+ return 0;
+
+/* End of ZTREVC */
+
+} /* ztrevc_ */
+
+/* Subroutine */ int zung2r_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZUNG2R generates an m by n complex matrix Q with orthonormal columns,
+ which is defined as the first n columns of a product of k elementary
+ reflectors of order m
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by ZGEQRF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. M >= N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. N >= K >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the i-th column must contain the vector which
+ defines the elementary reflector H(i), for i = 1,2,...,k, as
+ returned by ZGEQRF in the first k columns of its array
+ argument A.
+ On exit, the m by n matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGEQRF.
+
+ WORK (workspace) COMPLEX*16 array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNG2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns k+1:n to columns of the unit matrix */
+
+ i__1 = *n;
+ for (j = *k + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+/* L20: */
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the left */
+
+ if (i__ < *n) {
+ i__1 = i__ + i__ * a_dim1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *m - i__ + 1;
+ i__2 = *n - i__;
+ zlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ if (i__ < *m) {
+ i__1 = *m - i__;
+ i__2 = i__;
+ z__1.r = -tau[i__2].r, z__1.i = -tau[i__2].i;
+ zscal_(&i__1, &z__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ }
+ i__1 = i__ + i__ * a_dim1;
+ i__2 = i__;
+ z__1.r = 1. - tau[i__2].r, z__1.i = 0. - tau[i__2].i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* Set A(1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ i__2 = l + i__ * a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of ZUNG2R */
+
+} /* zung2r_ */
+
+/* Subroutine */ int zungbr_(char *vect, integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, nb, mn;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ static logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer lwkopt;
+ static logical lquery;
+ extern /* Subroutine */ int zunglq_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZUNGBR generates one of the complex unitary matrices Q or P**H
+ determined by ZGEBRD when reducing a complex matrix A to bidiagonal
+ form: A = Q * B * P**H. Q and P**H are defined as products of
+ elementary reflectors H(i) or G(i) respectively.
+
+ If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q
+ is of order M:
+ if m >= k, Q = H(1) H(2) . . . H(k) and ZUNGBR returns the first n
+ columns of Q, where m >= n >= k;
+ if m < k, Q = H(1) H(2) . . . H(m-1) and ZUNGBR returns Q as an
+ M-by-M matrix.
+
+ If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**H
+ is of order N:
+ if k < n, P**H = G(k) . . . G(2) G(1) and ZUNGBR returns the first m
+ rows of P**H, where n >= m >= k;
+ if k >= n, P**H = G(n-1) . . . G(2) G(1) and ZUNGBR returns P**H as
+ an N-by-N matrix.
+
+ Arguments
+ =========
+
+ VECT (input) CHARACTER*1
+ Specifies whether the matrix Q or the matrix P**H is
+ required, as defined in the transformation applied by ZGEBRD:
+ = 'Q': generate Q;
+ = 'P': generate P**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix Q or P**H to be returned.
+ M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q or P**H to be returned.
+ N >= 0.
+ If VECT = 'Q', M >= N >= min(M,K);
+ if VECT = 'P', N >= M >= min(N,K).
+
+ K (input) INTEGER
+ If VECT = 'Q', the number of columns in the original M-by-K
+ matrix reduced by ZGEBRD.
+ If VECT = 'P', the number of rows in the original K-by-N
+ matrix reduced by ZGEBRD.
+ K >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the vectors which define the elementary reflectors,
+ as returned by ZGEBRD.
+ On exit, the M-by-N matrix Q or P**H.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= M.
+
+ TAU (input) COMPLEX*16 array, dimension
+ (min(M,K)) if VECT = 'Q'
+ (min(N,K)) if VECT = 'P'
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i) or G(i), which determines Q or P**H, as
+ returned by ZGEBRD in its array argument TAUQ or TAUP.
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,min(M,N)).
+ For optimum performance LWORK >= min(M,N)*NB, where NB
+ is the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(vect, "Q");
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if ((! wantq && ! lsame_(vect, "P"))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0 || (wantq && (*n > *m || *n < min(*m,*k))) || (! wantq
+ && (*m > *n || *m < min(*n,*k)))) {
+ *info = -3;
+ } else if (*k < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if ((*lwork < max(1,mn) && ! lquery)) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (wantq) {
+ nb = ilaenv_(&c__1, "ZUNGQR", " ", m, n, k, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ } else {
+ nb = ilaenv_(&c__1, "ZUNGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ }
+ lwkopt = max(1,mn) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ if (wantq) {
+
+/*
+ Form Q, determined by a call to ZGEBRD to reduce an m-by-k
+ matrix
+*/
+
+ if (*m >= *k) {
+
+/* If m >= k, assume m >= n >= k */
+
+ zungqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/*
+ If m < k, assume m = n
+
+ Shift the vectors which define the elementary reflectors one
+ column to the right, and set the first row and column of Q
+ to those of the unit matrix
+*/
+
+ for (j = *m; j >= 2; --j) {
+ i__1 = j * a_dim1 + 1;
+ a[i__1].r = 0., a[i__1].i = 0.;
+ i__1 = *m;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + (j - 1) * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+/* L10: */
+ }
+/* L20: */
+ }
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *m;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L30: */
+ }
+ if (*m > 1) {
+
+/* Form Q(2:m,2:m) */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zungqr_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
+ tau[1], &work[1], lwork, &iinfo);
+ }
+ }
+ } else {
+
+/*
+ Form P', determined by a call to ZGEBRD to reduce a k-by-n
+ matrix
+*/
+
+ if (*k < *n) {
+
+/* If k < n, assume k <= m <= n */
+
+ zunglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/*
+ If k >= n, assume m = n
+
+ Shift the vectors which define the elementary reflectors one
+ row downward, and set the first row and column of P' to
+ those of the unit matrix
+*/
+
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L40: */
+ }
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ for (i__ = j - 1; i__ >= 2; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ - 1 + j * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+/* L50: */
+ }
+ i__2 = j * a_dim1 + 1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Form P'(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zunglq_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
+ tau[1], &work[1], lwork, &iinfo);
+ }
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGBR */
+
+} /* zungbr_ */
+
+/* Subroutine */ int zunghr_(integer *n, integer *ilo, integer *ihi,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, nb, nh, iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer lwkopt;
+ static logical lquery;
+ extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZUNGHR generates a complex unitary matrix Q which is defined as the
+ product of IHI-ILO elementary reflectors of order N, as returned by
+ ZGEHRD:
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix Q. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ ILO and IHI must have the same values as in the previous call
+ of ZGEHRD. Q is equal to the unit matrix except in the
+ submatrix Q(ilo+1:ihi,ilo+1:ihi).
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the vectors which define the elementary reflectors,
+ as returned by ZGEHRD.
+ On exit, the N-by-N unitary matrix Q.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (input) COMPLEX*16 array, dimension (N-1)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGEHRD.
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= IHI-ILO.
+ For optimum performance LWORK >= (IHI-ILO)*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if ((*lwork < max(1,nh) && ! lquery)) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "ZUNGQR", " ", &nh, &nh, &nh, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ lwkopt = max(1,nh) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGHR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+/*
+ Shift the vectors which define the elementary reflectors one
+ column to the right, and set the first ilo and the last n-ihi
+ rows and columns to those of the unit matrix
+*/
+
+ i__1 = *ilo + 1;
+ for (j = *ihi; j >= i__1; --j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+ i__2 = *ihi;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + (j - 1) * a_dim1;
+ a[i__3].r = a[i__4].r, a[i__3].i = a[i__4].i;
+/* L20: */
+ }
+ i__2 = *n;
+ for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ i__1 = *ilo;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L50: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+/* L60: */
+ }
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L70: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+/* L80: */
+ }
+
+ if (nh > 0) {
+
+/* Generate Q(ilo+1:ihi,ilo+1:ihi) */
+
+ zungqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[*
+ ilo], &work[1], lwork, &iinfo);
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGHR */
+
+} /* zunghr_ */
+
+/* Subroutine */ int zungl2_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZUNGL2 generates an m-by-n complex matrix Q with orthonormal rows,
+ which is defined as the first m rows of a product of k elementary
+ reflectors of order n
+
+ Q = H(k)' . . . H(2)' H(1)'
+
+ as returned by ZGELQF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. N >= M.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. M >= K >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the i-th row must contain the vector which defines
+ the elementary reflector H(i), for i = 1,2,...,k, as returned
+ by ZGELQF in the first k rows of its array argument A.
+ On exit, the m by n matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGELQF.
+
+ WORK (workspace) COMPLEX*16 array, dimension (M)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGL2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows k+1:m to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = *k + 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+ if ((j > *k && j <= *m)) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ }
+/* L20: */
+ }
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i)' to A(i:m,i:n) from the right */
+
+ if (i__ < *n) {
+ i__1 = *n - i__;
+ zlacgv_(&i__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ if (i__ < *m) {
+ i__1 = i__ + i__ * a_dim1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *m - i__;
+ i__2 = *n - i__ + 1;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, &
+ z__1, &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__1 = *n - i__;
+ i__2 = i__;
+ z__1.r = -tau[i__2].r, z__1.i = -tau[i__2].i;
+ zscal_(&i__1, &z__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__1 = *n - i__;
+ zlacgv_(&i__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ i__1 = i__ + i__ * a_dim1;
+ d_cnjg(&z__2, &tau[i__]);
+ z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* Set A(i,1:i-1) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ i__2 = i__ + l * a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of ZUNGL2 */
+
+} /* zungl2_ */
+
+/* Subroutine */ int zunglq_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zungl2_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ static logical lquery;
+ static integer lwkopt;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZUNGLQ generates an M-by-N complex matrix Q with orthonormal rows,
+ which is defined as the first M rows of a product of K elementary
+ reflectors of order N
+
+ Q = H(k)' . . . H(2)' H(1)'
+
+ as returned by ZGELQF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. N >= M.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. M >= K >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the i-th row must contain the vector which defines
+ the elementary reflector H(i), for i = 1,2,...,k, as returned
+ by ZGELQF in the first k rows of its array argument A.
+ On exit, the M-by-N matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGELQF.
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,M).
+ For optimum performance LWORK >= M*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit;
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "ZUNGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
+ lwkopt = max(1,*m) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if ((*lwork < max(1,*m) && ! lquery)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if ((nb > 1 && nb < *k)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZUNGLQ", " ", m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNGLQ", " ", m, n, k, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (((nb >= nbmin && nb < *k) && nx < *k)) {
+
+/*
+ Use blocked code after the last block.
+ The first kk rows are handled by the block method.
+*/
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(kk+1:m,1:kk) to zero. */
+
+ i__1 = kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = kk + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *m) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ zungl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *m) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__2 = *n - i__ + 1;
+ zlarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i+ib:m,i:n) from the right */
+
+ i__2 = *m - i__ - ib + 1;
+ i__3 = *n - i__ + 1;
+ zlarfb_("Right", "Conjugate transpose", "Forward", "Rowwise",
+ &i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[
+ ib + 1], &ldwork);
+ }
+
+/* Apply H' to columns i:n of current block */
+
+ i__2 = *n - i__ + 1;
+ zungl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set columns 1:i-1 of current block to zero */
+
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + ib - 1;
+ for (l = i__; l <= i__3; ++l) {
+ i__4 = l + j * a_dim1;
+ a[i__4].r = 0., a[i__4].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGLQ */
+
+} /* zunglq_ */
+
+/* Subroutine */ int zungqr_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zung2r_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZUNGQR generates an M-by-N complex matrix Q with orthonormal columns,
+ which is defined as the first N columns of a product of K elementary
+ reflectors of order M
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by ZGEQRF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. M >= N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. N >= K >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the i-th column must contain the vector which
+ defines the elementary reflector H(i), for i = 1,2,...,k, as
+ returned by ZGEQRF in the first k columns of its array
+ argument A.
+ On exit, the M-by-N matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGEQRF.
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "ZUNGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
+ lwkopt = max(1,*n) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if ((*lwork < max(1,*n) && ! lquery)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if ((nb > 1 && nb < *k)) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZUNGQR", " ", m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNGQR", " ", m, n, k, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (((nb >= nbmin && nb < *k) && nx < *k)) {
+
+/*
+ Use blocked code after the last block.
+ The first kk columns are handled by the block method.
+*/
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:kk,kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = kk + 1; j <= i__1; ++j) {
+ i__2 = kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *n) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ zung2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *n) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__2 = *m - i__ + 1;
+ zlarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i:m,i+ib:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__ - ib + 1;
+ zlarfb_("Left", "No transpose", "Forward", "Columnwise", &
+ i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &
+ work[ib + 1], &ldwork);
+ }
+
+/* Apply H to rows i:m of current block */
+
+ i__2 = *m - i__ + 1;
+ zung2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set rows 1:i-1 of current block to zero */
+
+ i__2 = i__ + ib - 1;
+ for (j = i__; j <= i__2; ++j) {
+ i__3 = i__ - 1;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + j * a_dim1;
+ a[i__4].r = 0., a[i__4].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGQR */
+
+} /* zungqr_ */
+
+/* Subroutine */ int zunm2l_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, mi, ni, nq;
+ static doublecomplex aii;
+ static logical left;
+ static doublecomplex taui;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZUNM2L overwrites the general complex m-by-n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'C', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'C',
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by ZGEQLF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'C': apply Q' (Conjugate transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX*16 array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ ZGEQLF in the last k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGEQLF.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the m-by-n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) COMPLEX*16 array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "C"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNM2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ d_cnjg(&z__1, &tau[i__]);
+ taui.r = z__1.r, taui.i = z__1.i;
+ }
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ a[i__3].r = 1., a[i__3].i = 0.;
+ zlarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &taui, &c__[
+ c_offset], ldc, &work[1]);
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+/* L10: */
+ }
+ return 0;
+
+/* End of ZUNM2L */
+
+} /* zunm2l_ */
+
+/* Subroutine */ int zunm2r_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ static doublecomplex aii;
+ static logical left;
+ static doublecomplex taui;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZUNM2R overwrites the general complex m-by-n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'C', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'C',
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by ZGEQRF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'C': apply Q' (Conjugate transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX*16 array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ ZGEQRF in the first k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGEQRF.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the m-by-n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) COMPLEX*16 array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "C"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNM2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if ((left && ! notran) || (! left && notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ d_cnjg(&z__1, &tau[i__]);
+ taui.r = z__1.r, taui.i = z__1.i;
+ }
+ i__3 = i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = 1., a[i__3].i = 0.;
+ zlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &taui, &c__[ic
+ + jc * c_dim1], ldc, &work[1]);
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+/* L10: */
+ }
+ return 0;
+
+/* End of ZUNM2R */
+
+} /* zunm2r_ */
+
+/* Subroutine */ int zunmbr_(char *vect, char *side, char *trans, integer *m,
+ integer *n, integer *k, doublecomplex *a, integer *lda, doublecomplex
+ *tau, doublecomplex *c__, integer *ldc, doublecomplex *work, integer *
+ lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i1, i2, nb, mi, ni, nq, nw;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran, applyq;
+ static char transt[1];
+ static integer lwkopt;
+ static logical lquery;
+ extern /* Subroutine */ int zunmlq_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ If VECT = 'Q', ZUNMBR overwrites the general complex M-by-N matrix C
+ with
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'C': Q**H * C C * Q**H
+
+ If VECT = 'P', ZUNMBR overwrites the general complex M-by-N matrix C
+ with
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': P * C C * P
+ TRANS = 'C': P**H * C C * P**H
+
+ Here Q and P**H are the unitary matrices determined by ZGEBRD when
+ reducing a complex matrix A to bidiagonal form: A = Q * B * P**H. Q
+ and P**H are defined as products of elementary reflectors H(i) and
+ G(i) respectively.
+
+ Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the
+ order of the unitary matrix Q or P**H that is applied.
+
+ If VECT = 'Q', A is assumed to have been an NQ-by-K matrix:
+ if nq >= k, Q = H(1) H(2) . . . H(k);
+ if nq < k, Q = H(1) H(2) . . . H(nq-1).
+
+ If VECT = 'P', A is assumed to have been a K-by-NQ matrix:
+ if k < nq, P = G(1) G(2) . . . G(k);
+ if k >= nq, P = G(1) G(2) . . . G(nq-1).
+
+ Arguments
+ =========
+
+ VECT (input) CHARACTER*1
+ = 'Q': apply Q or Q**H;
+ = 'P': apply P or P**H.
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q, Q**H, P or P**H from the Left;
+ = 'R': apply Q, Q**H, P or P**H from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q or P;
+ = 'C': Conjugate transpose, apply Q**H or P**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ If VECT = 'Q', the number of columns in the original
+ matrix reduced by ZGEBRD.
+ If VECT = 'P', the number of rows in the original
+ matrix reduced by ZGEBRD.
+ K >= 0.
+
+ A (input) COMPLEX*16 array, dimension
+ (LDA,min(nq,K)) if VECT = 'Q'
+ (LDA,nq) if VECT = 'P'
+ The vectors which define the elementary reflectors H(i) and
+ G(i), whose products determine the matrices Q and P, as
+ returned by ZGEBRD.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If VECT = 'Q', LDA >= max(1,nq);
+ if VECT = 'P', LDA >= max(1,min(nq,K)).
+
+ TAU (input) COMPLEX*16 array, dimension (min(nq,K))
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i) or G(i) which determines Q or P, as returned
+ by ZGEBRD in the array argument TAUQ or TAUP.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q
+ or P*C or P**H*C or C*P or C*P**H.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ applyq = lsame_(vect, "Q");
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q or P and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if ((! applyq && ! lsame_(vect, "P"))) {
+ *info = -1;
+ } else if ((! left && ! lsame_(side, "R"))) {
+ *info = -2;
+ } else if ((! notran && ! lsame_(trans, "C"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*k < 0) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = min(nq,*k);
+ if ((applyq && *lda < max(1,nq)) || (! applyq && *lda < max(i__1,i__2)
+ )) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if ((*lwork < max(1,nw) && ! lquery)) {
+ *info = -13;
+ }
+ }
+
+ if (*info == 0) {
+ if (applyq) {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "ZUNMQR", ch__1, &i__1, n, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "ZUNMQR", ch__1, m, &i__1, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "ZUNMLQ", ch__1, &i__1, n, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "ZUNMLQ", ch__1, m, &i__1, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ }
+
+/* Quick return if possible */
+
+ work[1].r = 1., work[1].i = 0.;
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ if (applyq) {
+
+/* Apply Q */
+
+ if (nq >= *k) {
+
+/* Q was determined by a call to ZGEBRD with nq >= k */
+
+ zunmqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* Q was determined by a call to ZGEBRD with nq < k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ zunmqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1]
+ , &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ } else {
+
+/* Apply P */
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+ if (nq > *k) {
+
+/* P was determined by a call to ZGEBRD with nq > k */
+
+ zunmlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* P was determined by a call to ZGEBRD with nq <= k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ zunmlq_(side, transt, &mi, &ni, &i__1, &a[((a_dim1) << (1)) + 1],
+ lda, &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1],
+ lwork, &iinfo);
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMBR */
+
+} /* zunmbr_ */
+
+/* Subroutine */ int zunml2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ static doublecomplex aii;
+ static logical left;
+ static doublecomplex taui;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZUNML2 overwrites the general complex m-by-n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'C', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'C',
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k)' . . . H(2)' H(1)'
+
+ as returned by ZGELQF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'C': apply Q' (Conjugate transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX*16 array, dimension
+ (LDA,M) if SIDE = 'L',
+ (LDA,N) if SIDE = 'R'
+ The i-th row must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ ZGELQF in the first k rows of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,K).
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGELQF.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the m-by-n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) COMPLEX*16 array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "C"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNML2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ d_cnjg(&z__1, &tau[i__]);
+ taui.r = z__1.r, taui.i = z__1.i;
+ } else {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ }
+ if (i__ < nq) {
+ i__3 = nq - i__;
+ zlacgv_(&i__3, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ i__3 = i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = 1., a[i__3].i = 0.;
+ zlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &taui, &c__[ic +
+ jc * c_dim1], ldc, &work[1]);
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+ if (i__ < nq) {
+ i__3 = nq - i__;
+ zlacgv_(&i__3, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of ZUNML2 */
+
+} /* zunml2_ */
+
+/* Subroutine */ int zunmlq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static doublecomplex t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int zunml2_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ static logical notran;
+ static integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ static char transt[1];
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZUNMLQ overwrites the general complex M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'C': Q**H * C C * Q**H
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k)' . . . H(2)' H(1)'
+
+ as returned by ZGELQF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**H from the Left;
+ = 'R': apply Q or Q**H from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'C': Conjugate transpose, apply Q**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX*16 array, dimension
+ (LDA,M) if SIDE = 'L',
+ (LDA,N) if SIDE = 'R'
+ The i-th row must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ ZGELQF in the first k rows of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,K).
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGELQF.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "C"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if ((*lwork < max(1,nw) && ! lquery)) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMLQ", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if ((nb > 1 && nb < *k)) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMLQ", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ zunml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__4 = nq - i__ + 1;
+ zlarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1],
+ lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ zlarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__
+ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1],
+ ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMLQ */
+
+} /* zunmlq_ */
+
+/* Subroutine */ int zunmql_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static doublecomplex t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int zunm2l_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ static logical notran;
+ static integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZUNMQL overwrites the general complex M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'C': Q**H * C C * Q**H
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by ZGEQLF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**H from the Left;
+ = 'R': apply Q or Q**H from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'C': Transpose, apply Q**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX*16 array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ ZGEQLF in the last k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGEQLF.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "C"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if ((*lwork < max(1,nw) && ! lquery)) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMQL", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if ((nb > 1 && nb < *k)) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMQL", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ zunm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i+ib-1) . . . H(i+1) H(i)
+*/
+
+ i__4 = nq - *k + i__ + ib - 1;
+ zlarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1]
+ , lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ zlarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, &
+ work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMQL */
+
+} /* zunmql_ */
+
+/* Subroutine */ int zunmqr_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static doublecomplex t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int zunm2r_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ static logical notran;
+ static integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZUNMQR overwrites the general complex M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'C': Q**H * C C * Q**H
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by ZGEQRF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**H from the Left;
+ = 'R': apply Q or Q**H from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'C': Conjugate transpose, apply Q**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX*16 array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ ZGEQRF in the first k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) COMPLEX*16 array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZGEQRF.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! notran && ! lsame_(trans, "C"))) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if ((*lwork < max(1,nw) && ! lquery)) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMQR", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if ((nb > 1 && nb < *k)) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMQR", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ zunm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && ! notran) || (! left && notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__4 = nq - i__ + 1;
+ zlarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], t, &c__65)
+ ;
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ zlarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc *
+ c_dim1], ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMQR */
+
+} /* zunmqr_ */
+
+/* Subroutine */ int zunmtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i1, i2, nb, mi, ni, nq, nw;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer lwkopt;
+ static logical lquery;
+ extern /* Subroutine */ int zunmql_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ZUNMTR overwrites the general complex M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'C': Q**H * C C * Q**H
+
+ where Q is a complex unitary matrix of order nq, with nq = m if
+ SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of
+ nq-1 elementary reflectors, as returned by ZHETRD:
+
+ if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1);
+
+ if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1).
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**H from the Left;
+ = 'R': apply Q or Q**H from the Right.
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A contains elementary reflectors
+ from ZHETRD;
+ = 'L': Lower triangle of A contains elementary reflectors
+ from ZHETRD.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'C': Conjugate transpose, apply Q**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ A (input) COMPLEX*16 array, dimension
+ (LDA,M) if SIDE = 'L'
+ (LDA,N) if SIDE = 'R'
+ The vectors which define the elementary reflectors, as
+ returned by ZHETRD.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'.
+
+ TAU (input) COMPLEX*16 array, dimension
+ (M-1) if SIDE = 'L'
+ (N-1) if SIDE = 'R'
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by ZHETRD.
+
+ C (input/output) COMPLEX*16 array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) COMPLEX*16 array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >=M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1 * 1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1 * 1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if ((! left && ! lsame_(side, "R"))) {
+ *info = -1;
+ } else if ((! upper && ! lsame_(uplo, "L"))) {
+ *info = -2;
+ } else if ((! lsame_(trans, "N") && ! lsame_(trans,
+ "C"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if ((*lwork < max(1,nw) && ! lquery)) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "ZUNMQL", ch__1, &i__2, n, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "ZUNMQL", ch__1, m, &i__2, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "ZUNMQR", ch__1, &i__2, n, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "ZUNMQR", ch__1, m, &i__2, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("ZUNMTR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || nq == 1) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to ZHETRD with UPLO = 'U' */
+
+ i__2 = nq - 1;
+ zunmql_(side, trans, &mi, &ni, &i__2, &a[((a_dim1) << (1)) + 1], lda,
+ &tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo);
+ } else {
+
+/* Q was determined by a call to ZHETRD with UPLO = 'L' */
+
+ if (left) {
+ i1 = 2;
+ i2 = 1;
+ } else {
+ i1 = 1;
+ i2 = 2;
+ }
+ i__2 = nq - 1;
+ zunmqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], &
+ c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMTR */
+
+} /* zunmtr_ */
+
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
new file mode 100644
index 000000000..08cd57559
--- /dev/null
+++ b/numpy/distutils/__init__.py
@@ -0,0 +1,16 @@
+
+from __version__ import version as __version__
+# Must import local ccompiler ASAP in order to get
+# customized CCompiler.spawn effective.
+import ccompiler
+import unixccompiler
+
+try:
+ import __config__
+ _INSTALLED = True
+except ImportError:
+ _INSTALLED = False
+
+if _INSTALLED:
+ from scipy.testing import ScipyTest
+ test = ScipyTest().test
diff --git a/numpy/distutils/__version__.py b/numpy/distutils/__version__.py
new file mode 100644
index 000000000..06077f79c
--- /dev/null
+++ b/numpy/distutils/__version__.py
@@ -0,0 +1,4 @@
+major = 0
+minor = 4
+micro = 0
+version = '%(major)d.%(minor)d.%(micro)d' % (locals())
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
new file mode 100644
index 000000000..2e06e6993
--- /dev/null
+++ b/numpy/distutils/ccompiler.py
@@ -0,0 +1,359 @@
+
+import re
+import os
+import sys
+import new
+
+from distutils.ccompiler import *
+from distutils import ccompiler
+from distutils.sysconfig import customize_compiler
+from distutils.version import LooseVersion
+
+import log
+from exec_command import exec_command
+from misc_util import cyg2win32
+from distutils.spawn import _nt_quote_args
+
+# Using customized CCompiler.spawn.
+def CCompiler_spawn(self, cmd, display=None):
+ if display is None:
+ display = cmd
+ if type(display) is type([]): display = ' '.join(display)
+ log.info(display)
+ if type(cmd) is type([]) and os.name == 'nt':
+ cmd = _nt_quote_args(cmd)
+ s,o = exec_command(cmd)
+ if s:
+ if type(cmd) is type([]):
+ cmd = ' '.join(cmd)
+ print o
+ raise DistutilsExecError,\
+ 'Command "%s" failed with exit status %d' % (cmd, s)
+CCompiler.spawn = new.instancemethod(CCompiler_spawn,None,CCompiler)
+
+def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
+ if output_dir is None:
+ output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ base, ext = os.path.splitext(os.path.normpath(src_name))
+ base = os.path.splitdrive(base)[1] # Chop off the drive
+ base = base[os.path.isabs(base):] # If abs, chop off leading /
+ if base.startswith('..'):
+ # Resolve starting relative path components, middle ones
+ # (if any) have been handled by os.path.normpath above.
+ i = base.rfind('..')+2
+ d = base[:i]
+ d = os.path.basename(os.path.abspath(d))
+ base = d + base[i:]
+ if ext not in self.src_extensions:
+ raise UnknownFileError, \
+ "unknown file type '%s' (from '%s')" % (ext, src_name)
+ if strip_dir:
+ base = os.path.basename(base)
+ obj_name = os.path.join(output_dir,base + self.obj_extension)
+ obj_names.append(obj_name)
+ return obj_names
+
+CCompiler.object_filenames = new.instancemethod(CCompiler_object_filenames,
+ None,CCompiler)
+
+def CCompiler_compile(self, sources, output_dir=None, macros=None,
+ include_dirs=None, debug=0, extra_preargs=None,
+ extra_postargs=None, depends=None):
+ # This method is effective only with Python >=2.3 distutils.
+ # Any changes here should be applied also to fcompiler.compile
+ # method to support pre Python 2.3 distutils.
+ if not sources:
+ return []
+ from fcompiler import FCompiler
+ if isinstance(self, FCompiler):
+ display = []
+ for fc in ['f77','f90','fix']:
+ fcomp = getattr(self,'compiler_'+fc)
+ if fcomp is None:
+ continue
+ display.append("%s(%s) options: '%s'" % (os.path.basename(fcomp[0]),
+ fc,
+ ' '.join(fcomp[1:])))
+ display = '\n'.join(display)
+ else:
+ ccomp = self.compiler_so
+ display = "%s options: '%s'" % (os.path.basename(ccomp[0]),
+ ' '.join(ccomp[1:]))
+ log.info(display)
+ macros, objects, extra_postargs, pp_opts, build = \
+ self._setup_compile(output_dir, macros, include_dirs, sources,
+ depends, extra_postargs)
+ cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
+ display = "compile options: '%s'" % (' '.join(cc_args))
+ if extra_postargs:
+ display += "\nextra options: '%s'" % (' '.join(extra_postargs))
+ log.info(display)
+
+ # build any sources in same order as they were originally specified
+ # especially important for fortran .f90 files using modules
+ if isinstance(self, FCompiler):
+ objects_to_build = build.keys()
+ for obj in objects:
+ if obj in objects_to_build:
+ src, ext = build[obj]
+ if self.compiler_type=='absoft':
+ obj = cyg2win32(obj)
+ src = cyg2win32(src)
+ self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+ else:
+ for obj, (src, ext) in build.items():
+ self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+
+ # Return *all* object filenames, not just the ones we just built.
+ return objects
+
+CCompiler.compile = new.instancemethod(CCompiler_compile,None,CCompiler)
+
+def CCompiler_customize_cmd(self, cmd):
+ """ Customize compiler using distutils command.
+ """
+ log.info('customize %s using %s' % (self.__class__.__name__,
+ cmd.__class__.__name__))
+ if getattr(cmd,'include_dirs',None) is not None:
+ self.set_include_dirs(cmd.include_dirs)
+ if getattr(cmd,'define',None) is not None:
+ for (name,value) in cmd.define:
+ self.define_macro(name, value)
+ if getattr(cmd,'undef',None) is not None:
+ for macro in cmd.undef:
+ self.undefine_macro(macro)
+ if getattr(cmd,'libraries',None) is not None:
+ self.set_libraries(self.libraries + cmd.libraries)
+ if getattr(cmd,'library_dirs',None) is not None:
+ self.set_library_dirs(self.library_dirs + cmd.library_dirs)
+ if getattr(cmd,'rpath',None) is not None:
+ self.set_runtime_library_dirs(cmd.rpath)
+ if getattr(cmd,'link_objects',None) is not None:
+ self.set_link_objects(cmd.link_objects)
+ return
+
+CCompiler.customize_cmd = new.instancemethod(\
+ CCompiler_customize_cmd,None,CCompiler)
+
+def _compiler_to_string(compiler):
+ props = []
+ mx = 0
+ keys = compiler.executables.keys()
+ for key in ['version','libraries','library_dirs',
+ 'object_switch','compile_switch',
+ 'include_dirs','define','undef','rpath','link_objects']:
+ if key not in keys:
+ keys.append(key)
+ for key in keys:
+ if hasattr(compiler,key):
+ v = getattr(compiler, key)
+ mx = max(mx,len(key))
+ props.append((key,`v`))
+ lines = []
+ format = '%-' +`mx+1`+ 's = %s'
+ for prop in props:
+ lines.append(format % prop)
+ return '\n'.join(lines)
+
+def CCompiler_show_customization(self):
+ if 0:
+ for attrname in ['include_dirs','define','undef',
+ 'libraries','library_dirs',
+ 'rpath','link_objects']:
+ attr = getattr(self,attrname,None)
+ if not attr:
+ continue
+ log.info("compiler '%s' is set to %s" % (attrname,attr))
+ try: self.get_version()
+ except: pass
+ if log._global_log.threshold<2:
+ print '*'*80
+ print self.__class__
+ print _compiler_to_string(self)
+ print '*'*80
+
+CCompiler.show_customization = new.instancemethod(\
+ CCompiler_show_customization,None,CCompiler)
+
+
+def CCompiler_customize(self, dist, need_cxx=0):
+ # See FCompiler.customize for suggested usage.
+ log.info('customize %s' % (self.__class__.__name__))
+ customize_compiler(self)
+ if need_cxx:
+ if hasattr(self,'compiler') and self.compiler[0].find('gcc')>=0:
+ if sys.version[:3]>='2.3':
+ if not self.compiler_cxx:
+ self.compiler_cxx = [self.compiler[0].replace('gcc','g++')]\
+ + self.compiler[1:]
+ else:
+ self.compiler_cxx = [self.compiler[0].replace('gcc','g++')]\
+ + self.compiler[1:]
+ else:
+ log.warn('Missing compiler_cxx fix for '+self.__class__.__name__)
+ return
+
+CCompiler.customize = new.instancemethod(\
+ CCompiler_customize,None,CCompiler)
+
+def CCompiler_get_version(self, force=0, ok_status=[0]):
+ """ Compiler version. Returns None if compiler is not available. """
+ if not force and hasattr(self,'version'):
+ return self.version
+ if not (hasattr(self,'version_cmd') and
+ hasattr(self,'version_pattern')):
+ #log.warn('%s does not provide version_cmd and version_pattern attributes' \
+ # % (self.__class__))
+ return
+
+ cmd = ' '.join(self.version_cmd)
+ status, output = exec_command(cmd,use_tee=0)
+ version = None
+ if status in ok_status:
+ m = re.match(self.version_pattern,output)
+ if m:
+ version = m.group('version')
+ assert version,`version`
+ version = LooseVersion(version)
+ self.version = version
+ return version
+
+CCompiler.get_version = new.instancemethod(\
+ CCompiler_get_version,None,CCompiler)
+
+compiler_class['intel'] = ('intelccompiler','IntelCCompiler',
+ "Intel C Compiler for 32-bit applications")
+compiler_class['intele'] = ('intelccompiler','IntelItaniumCCompiler',
+ "Intel C Itanium Compiler for Itanium-based applications")
+ccompiler._default_compilers = ccompiler._default_compilers \
+ + (('linux.*','intel'),('linux.*','intele'))
+
+if sys.platform == 'win32':
+ compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
+ "Mingw32 port of GNU C Compiler for Win32"\
+ "(for MSC built Python)")
+ if os.environ.get('OSTYPE','')=='msys' or \
+ os.environ.get('MSYSTEM','')=='MINGW32':
+ # On windows platforms, we want to default to mingw32 (gcc)
+ # because msvc can't build blitz stuff.
+ log.info('Setting mingw32 as default compiler for nt.')
+ ccompiler._default_compilers = (('nt', 'mingw32'),) \
+ + ccompiler._default_compilers
+
+
+_distutils_new_compiler = new_compiler
+def new_compiler (plat=None,
+ compiler=None,
+ verbose=0,
+ dry_run=0,
+ force=0):
+ # Try first C compilers from scipy.distutils.
+ if plat is None:
+ plat = os.name
+ try:
+ if compiler is None:
+ compiler = get_default_compiler(plat)
+ (module_name, class_name, long_description) = compiler_class[compiler]
+ except KeyError:
+ msg = "don't know how to compile C/C++ code on platform '%s'" % plat
+ if compiler is not None:
+ msg = msg + " with '%s' compiler" % compiler
+ raise DistutilsPlatformError, msg
+ module_name = "scipy.distutils." + module_name
+ try:
+ __import__ (module_name)
+ except ImportError, msg:
+ print msg,'in scipy.distutils, trying from distutils..'
+ module_name = module_name[6:]
+ try:
+ __import__(module_name)
+ except ImportError, msg:
+ raise DistutilsModuleError, \
+ "can't compile C/C++ code: unable to load module '%s'" % \
+ module_name
+ try:
+ module = sys.modules[module_name]
+ klass = vars(module)[class_name]
+ except KeyError:
+ raise DistutilsModuleError, \
+ ("can't compile C/C++ code: unable to find class '%s' " +
+ "in module '%s'") % (class_name, module_name)
+ compiler = klass(None, dry_run, force)
+ log.debug('new_fcompiler returns %s' % (klass))
+ return compiler
+
+ccompiler.new_compiler = new_compiler
+
+
+_distutils_gen_lib_options = gen_lib_options
+def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
+ r = _distutils_gen_lib_options(compiler, library_dirs,
+ runtime_library_dirs, libraries)
+ lib_opts = []
+ for i in r:
+ if type(i) is type([]):
+ lib_opts.extend(i)
+ else:
+ lib_opts.append(i)
+ return lib_opts
+ccompiler.gen_lib_options = gen_lib_options
+
+
+##Fix distutils.util.split_quoted:
+import re,string
+_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
+_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
+_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
+_has_white_re = re.compile(r'\s')
+def split_quoted(s):
+ s = string.strip(s)
+ words = []
+ pos = 0
+
+ while s:
+ m = _wordchars_re.match(s, pos)
+ end = m.end()
+ if end == len(s):
+ words.append(s[:end])
+ break
+
+ if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
+ words.append(s[:end]) # we definitely have a word delimiter
+ s = string.lstrip(s[end:])
+ pos = 0
+
+ elif s[end] == '\\': # preserve whatever is being escaped;
+ # will become part of the current word
+ s = s[:end] + s[end+1:]
+ pos = end+1
+
+ else:
+ if s[end] == "'": # slurp singly-quoted string
+ m = _squote_re.match(s, end)
+ elif s[end] == '"': # slurp doubly-quoted string
+ m = _dquote_re.match(s, end)
+ else:
+ raise RuntimeError, \
+ "this can't happen (bad char '%c')" % s[end]
+
+ if m is None:
+ raise ValueError, \
+ "bad string (mismatched %s quotes?)" % s[end]
+
+ (beg, end) = m.span()
+ if _has_white_re.search(s[beg+1:end-1]):
+ s = s[:beg] + s[beg+1:end-1] + s[end:]
+ pos = m.end() - 2
+ else:
+ # Keeping quotes when a quoted word does not contain
+ # white-space. XXX: send a patch to distutils
+ pos = m.end()
+
+ if pos >= len(s):
+ words.append(s)
+ break
+
+ return words
+ccompiler.split_quoted = split_quoted
diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py
new file mode 100644
index 000000000..5ec3e370a
--- /dev/null
+++ b/numpy/distutils/command/__init__.py
@@ -0,0 +1,31 @@
+"""distutils.command
+
+Package containing implementation of all the standard Distutils
+commands."""
+
+__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
+
+distutils_all = [ 'build_py',
+ 'clean',
+ 'install_lib',
+ 'install_scripts',
+ 'bdist',
+ 'bdist_dumb',
+ 'bdist_wininst',
+ ]
+
+__import__('distutils.command',globals(),locals(),distutils_all)
+
+__all__ = ['build',
+ 'config_compiler',
+ 'config',
+ 'build_src',
+ 'build_ext',
+ 'build_clib',
+ 'build_scripts',
+ 'install',
+ 'install_data',
+ 'install_headers',
+ 'bdist_rpm',
+ 'sdist',
+ ] + distutils_all
diff --git a/numpy/distutils/command/bdist_rpm.py b/numpy/distutils/command/bdist_rpm.py
new file mode 100644
index 000000000..04aaea33b
--- /dev/null
+++ b/numpy/distutils/command/bdist_rpm.py
@@ -0,0 +1,17 @@
+
+import os
+from distutils.command.bdist_rpm import *
+from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm
+
+class bdist_rpm(old_bdist_rpm):
+
+ def _make_spec_file(self):
+ spec_file = old_bdist_rpm._make_spec_file(self)
+ setup_py = os.path.basename(sys.argv[0])
+ if setup_py == 'setup.py':
+ return spec_file
+ new_spec_file = []
+ for line in spec_file:
+ line = line.replace('setup.py',setup_py)
+ new_spec_file.append(line)
+ return new_spec_file
diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py
new file mode 100644
index 000000000..c7872b6ff
--- /dev/null
+++ b/numpy/distutils/command/build.py
@@ -0,0 +1,8 @@
+
+from distutils.command.build import build as old_build
+
+class build(old_build):
+
+ sub_commands = [('config_fc', lambda *args: 1),
+ ('build_src', old_build.has_ext_modules),
+ ] + old_build.sub_commands
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
new file mode 100644
index 000000000..7f7c54d8d
--- /dev/null
+++ b/numpy/distutils/command/build_clib.py
@@ -0,0 +1,185 @@
+""" Modified version of build_clib that handles fortran source files.
+"""
+
+import os
+import string
+import sys
+import re
+from glob import glob
+from types import *
+from distutils.command.build_clib import build_clib as old_build_clib
+from distutils.command.build_clib import show_compilers
+
+from scipy.distutils import log
+from distutils.dep_util import newer_group
+from scipy.distutils.misc_util import filter_sources, has_f_sources,\
+ has_cxx_sources, all_strings, get_lib_source_files
+
+class build_clib(old_build_clib):
+
+ description = "build C/C++/F libraries used by Python extensions"
+
+ user_options = old_build_clib.user_options + [
+ ('fcompiler=', None,
+ "specify the Fortran compiler type"),
+ ]
+
+ def initialize_options(self):
+ old_build_clib.initialize_options(self)
+ self.fcompiler = None
+ return
+
+ def finalize_options(self):
+ old_build_clib.finalize_options(self)
+ self.set_undefined_options('build_ext',
+ ('fcompiler', 'fcompiler'))
+ return
+
+ def have_f_sources(self):
+ for (lib_name, build_info) in self.libraries:
+ if has_f_sources(build_info.get('sources',[])):
+ return True
+ return False
+
+ def have_cxx_sources(self):
+ for (lib_name, build_info) in self.libraries:
+ if has_cxx_sources(build_info.get('sources',[])):
+ return True
+ return False
+
+ def run(self):
+ if not self.libraries:
+ return
+
+ # Make sure that library sources are complete.
+ for (lib_name, build_info) in self.libraries:
+ if not all_strings(build_info.get('sources',[])):
+ self.run_command('build_src')
+
+ from distutils.ccompiler import new_compiler
+ self.compiler = new_compiler(compiler=self.compiler,
+ dry_run=self.dry_run,
+ force=self.force)
+ self.compiler.customize(self.distribution,
+ need_cxx=self.have_cxx_sources())
+
+ libraries = self.libraries
+ self.libraries = None
+ self.compiler.customize_cmd(self)
+ self.libraries = libraries
+
+ self.compiler.show_customization()
+
+ if self.have_f_sources():
+ from scipy.distutils.fcompiler import new_fcompiler
+ self.fcompiler = new_fcompiler(compiler=self.fcompiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
+ self.fcompiler.customize(self.distribution)
+
+ libraries = self.libraries
+ self.libraries = None
+ self.fcompiler.customize_cmd(self)
+ self.libraries = libraries
+
+ self.fcompiler.show_customization()
+
+ self.build_libraries(self.libraries)
+ return
+
+ def get_source_files(self):
+ self.check_library_list(self.libraries)
+ filenames = []
+ for lib in self.libraries:
+ filenames.extend(get_lib_source_files(lib))
+ return filenames
+
+ def build_libraries(self, libraries):
+
+ compiler = self.compiler
+ fcompiler = self.fcompiler
+
+ for (lib_name, build_info) in libraries:
+ sources = build_info.get('sources')
+ if sources is None or type(sources) not in (ListType, TupleType):
+ raise DistutilsSetupError, \
+ ("in 'libraries' option (library '%s'), " +
+ "'sources' must be present and must be " +
+ "a list of source filenames") % lib_name
+ sources = list(sources)
+
+ lib_file = compiler.library_filename(lib_name,
+ output_dir=self.build_clib)
+
+ depends = sources + build_info.get('depends',[])
+ if not (self.force or newer_group(depends, lib_file, 'newer')):
+ log.debug("skipping '%s' library (up-to-date)", lib_name)
+ continue
+ else:
+ log.info("building '%s' library", lib_name)
+
+ macros = build_info.get('macros')
+ include_dirs = build_info.get('include_dirs')
+ extra_postargs = build_info.get('extra_compiler_args') or []
+
+ c_sources, cxx_sources, f_sources, fmodule_sources \
+ = filter_sources(sources)
+
+ if self.compiler.compiler_type=='msvc':
+ # this hack works around the msvc compiler attributes
+ # problem, msvc uses its own convention :(
+ c_sources += cxx_sources
+ cxx_sources = []
+
+ if fmodule_sources:
+ print 'XXX: Fortran 90 module support not implemented or tested'
+ f_sources.extend(fmodule_sources)
+
+ objects = []
+ if c_sources:
+ log.info("compiling C sources")
+ objects = compiler.compile(c_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs)
+
+ if cxx_sources:
+ log.info("compiling C++ sources")
+ old_compiler = self.compiler.compiler_so[0]
+ self.compiler.compiler_so[0] = self.compiler.compiler_cxx[0]
+
+ cxx_objects = compiler.compile(cxx_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs)
+ objects.extend(cxx_objects)
+
+ self.compiler.compiler_so[0] = old_compiler
+
+ if f_sources:
+ log.info("compiling Fortran sources")
+ f_objects = fcompiler.compile(f_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=[])
+ objects.extend(f_objects)
+
+ self.compiler.create_static_lib(objects, lib_name,
+ output_dir=self.build_clib,
+ debug=self.debug)
+
+ clib_libraries = build_info.get('libraries',[])
+ for lname,binfo in libraries:
+ if lname in clib_libraries:
+ clib_libraries.extend(binfo[1].get('libraries',[]))
+ if clib_libraries:
+ build_info['libraries'] = clib_libraries
+
+ return
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
new file mode 100644
index 000000000..7a2318b70
--- /dev/null
+++ b/numpy/distutils/command/build_ext.py
@@ -0,0 +1,349 @@
+""" Modified version of build_ext that handles fortran source files.
+"""
+
+import os
+import string
+import sys
+from glob import glob
+from types import *
+
+from distutils.dep_util import newer_group, newer
+from distutils.command.build_ext import build_ext as old_build_ext
+
+from scipy.distutils import log
+from scipy.distutils.misc_util import filter_sources, has_f_sources, \
+ has_cxx_sources, get_ext_source_files, all_strings, \
+ get_scipy_include_dirs
+from distutils.errors import DistutilsFileError
+
+class build_ext (old_build_ext):
+
+ description = "build C/C++/F extensions (compile/link to build directory)"
+
+ user_options = old_build_ext.user_options + [
+ ('fcompiler=', None,
+ "specify the Fortran compiler type"),
+ ]
+
+ def initialize_options(self):
+ old_build_ext.initialize_options(self)
+ self.fcompiler = None
+ return
+
+ def finalize_options(self):
+ old_build_ext.finalize_options(self)
+ self.set_undefined_options('config_fc',
+ ('fcompiler', 'fcompiler'))
+ return
+
+ def run(self):
+ if not self.extensions:
+ return
+
+ # Make sure that extension sources are complete.
+ for ext in self.extensions:
+ if not all_strings(ext.sources):
+ self.run_command('build_src')
+
+ if self.distribution.has_c_libraries():
+ build_clib = self.get_finalized_command('build_clib')
+ self.library_dirs.append(build_clib.build_clib)
+ else:
+ build_clib = None
+
+ # Not including C libraries to the list of
+ # extension libraries automatically to prevent
+ # bogus linking commands. Extensions must
+ # explicitly specify the C libraries that they use.
+
+ # Determine if Fortran compiler is needed.
+ if build_clib and build_clib.fcompiler is not None:
+ need_f_compiler = 1
+ else:
+ need_f_compiler = 0
+ for ext in self.extensions:
+ if has_f_sources(ext.sources):
+ need_f_compiler = 1
+ break
+ if getattr(ext,'language','c') in ['f77','f90']:
+ need_f_compiler = 1
+ break
+
+ # Determine if C++ compiler is needed.
+ need_cxx_compiler = 0
+ for ext in self.extensions:
+ if has_cxx_sources(ext.sources):
+ need_cxx_compiler = 1
+ break
+ if getattr(ext,'language','c')=='c++':
+ need_cxx_compiler = 1
+ break
+
+ from distutils.ccompiler import new_compiler
+ self.compiler = new_compiler(compiler=self.compiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
+ self.compiler.customize(self.distribution,need_cxx=need_cxx_compiler)
+ self.compiler.customize_cmd(self)
+ self.compiler.show_customization()
+
+ # Initialize Fortran/C++ compilers if needed.
+ if need_f_compiler:
+ from scipy.distutils.fcompiler import new_fcompiler
+ self.fcompiler = new_fcompiler(compiler=self.fcompiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
+ if self.fcompiler.get_version():
+ self.fcompiler.customize(self.distribution)
+ self.fcompiler.customize_cmd(self)
+ self.fcompiler.show_customization()
+ else:
+ self.warn('fcompiler=%s is not available.' % (self.fcompiler.compiler_type))
+ self.fcompiler = None
+
+ # Build extensions
+ self.build_extensions()
+ return
+
+ def swig_sources(self, sources):
+ # Do nothing. Swig sources have beed handled in build_src command.
+ return sources
+
+ def build_extension(self, ext):
+ sources = ext.sources
+ if sources is None or type(sources) not in (ListType, TupleType):
+ raise DistutilsSetupError, \
+ ("in 'ext_modules' option (extension '%s'), " +
+ "'sources' must be present and must be " +
+ "a list of source filenames") % ext.name
+ sources = list(sources)
+
+ if not sources:
+ return
+
+ fullname = self.get_ext_fullname(ext.name)
+ if self.inplace:
+ modpath = string.split(fullname, '.')
+ package = string.join(modpath[0:-1], '.')
+ base = modpath[-1]
+
+ build_py = self.get_finalized_command('build_py')
+ package_dir = build_py.get_package_dir(package)
+ ext_filename = os.path.join(package_dir,
+ self.get_ext_filename(base))
+ else:
+ ext_filename = os.path.join(self.build_lib,
+ self.get_ext_filename(fullname))
+ depends = sources + ext.depends
+
+ if not (self.force or newer_group(depends, ext_filename, 'newer')):
+ log.debug("skipping '%s' extension (up-to-date)", ext.name)
+ return
+ else:
+ log.info("building '%s' extension", ext.name)
+
+ extra_args = ext.extra_compile_args or []
+ macros = ext.define_macros[:]
+ for undef in ext.undef_macros:
+ macros.append((undef,))
+
+ clib_libraries = []
+ clib_library_dirs = []
+ if self.distribution.libraries:
+ for libname,build_info in self.distribution.libraries:
+ if libname in ext.libraries:
+ macros.extend(build_info.get('macros',[]))
+ clib_libraries.extend(build_info.get('libraries',[]))
+ clib_library_dirs.extend(build_info.get('library_dirs',[]))
+
+ c_sources, cxx_sources, f_sources, fmodule_sources = \
+ filter_sources(ext.sources)
+ if self.compiler.compiler_type=='msvc':
+ if cxx_sources:
+ # Needed to compile kiva.agg._agg extension.
+ extra_args.append('/Zm1000')
+ # this hack works around the msvc compiler attributes
+ # problem, msvc uses its own convention :(
+ c_sources += cxx_sources
+ cxx_sources = []
+
+
+ kws = {'depends':ext.depends}
+ output_dir = self.build_temp
+
+ include_dirs = ext.include_dirs + get_scipy_include_dirs()
+
+ c_objects = []
+ if c_sources:
+ log.info("compiling C sources")
+ c_objects = self.compiler.compile(c_sources,
+ output_dir=output_dir,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args,
+ **kws)
+ if cxx_sources:
+ log.info("compiling C++ sources")
+
+ old_compiler = self.compiler.compiler_so[0]
+ self.compiler.compiler_so[0] = self.compiler.compiler_cxx[0]
+
+ c_objects += self.compiler.compile(cxx_sources,
+ output_dir=output_dir,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args,
+ **kws)
+ self.compiler.compiler_so[0] = old_compiler
+
+ check_for_f90_modules = not not fmodule_sources
+
+ if f_sources or fmodule_sources:
+ extra_postargs = []
+ module_dirs = ext.module_dirs[:]
+
+ #if self.fcompiler.compiler_type=='ibm':
+ macros = []
+
+ if check_for_f90_modules:
+ module_build_dir = os.path.join(\
+ self.build_temp,os.path.dirname(\
+ self.get_ext_filename(fullname)))
+
+ self.mkpath(module_build_dir)
+ if self.fcompiler.module_dir_switch is None:
+ existing_modules = glob('*.mod')
+ extra_postargs += self.fcompiler.module_options(\
+ module_dirs,module_build_dir)
+
+ f_objects = []
+ if fmodule_sources:
+ log.info("compiling Fortran 90 module sources")
+ f_objects = self.fcompiler.compile(fmodule_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs,
+ depends=ext.depends)
+
+ if check_for_f90_modules \
+ and self.fcompiler.module_dir_switch is None:
+ for f in glob('*.mod'):
+ if f in existing_modules:
+ continue
+ try:
+ self.move_file(f, module_build_dir)
+ except DistutilsFileError: # already exists in destination
+ os.remove(f)
+
+ if f_sources:
+ log.info("compiling Fortran sources")
+ f_objects += self.fcompiler.compile(f_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs,
+ depends=ext.depends)
+ else:
+ f_objects = []
+
+ objects = c_objects + f_objects
+
+ if ext.extra_objects:
+ objects.extend(ext.extra_objects)
+ extra_args = ext.extra_link_args or []
+
+ try:
+ old_linker_so_0 = self.compiler.linker_so[0]
+ except:
+ pass
+
+ use_fortran_linker = getattr(ext,'language','c') in ['f77','f90'] \
+ and self.fcompiler is not None
+ c_libraries = []
+ c_library_dirs = []
+ if use_fortran_linker or f_sources:
+ use_fortran_linker = 1
+ elif self.distribution.has_c_libraries():
+ build_clib = self.get_finalized_command('build_clib')
+ f_libs = []
+ for (lib_name, build_info) in build_clib.libraries:
+ if has_f_sources(build_info.get('sources',[])):
+ f_libs.append(lib_name)
+ if lib_name in ext.libraries:
+ # XXX: how to determine if c_libraries contain
+ # fortran compiled sources?
+ c_libraries.extend(build_info.get('libraries',[]))
+ c_library_dirs.extend(build_info.get('library_dirs',[]))
+ for l in ext.libraries:
+ if l in f_libs:
+ use_fortran_linker = 1
+ break
+
+ # Always use system linker when using MSVC compiler.
+ if self.compiler.compiler_type=='msvc' and use_fortran_linker:
+ c_libraries.extend(self.fcompiler.libraries)
+ c_library_dirs.extend(self.fcompiler.library_dirs)
+ use_fortran_linker = 0
+
+ if use_fortran_linker:
+ if cxx_sources:
+ # XXX: Which linker should be used, Fortran or C++?
+ log.warn('mixing Fortran and C++ is untested')
+ link = self.fcompiler.link_shared_object
+ language = ext.language or self.fcompiler.detect_language(f_sources)
+ else:
+ link = self.compiler.link_shared_object
+ if sys.version[:3]>='2.3':
+ language = ext.language or self.compiler.detect_language(sources)
+ else:
+ language = ext.language
+ if cxx_sources:
+ self.compiler.linker_so[0] = self.compiler.compiler_cxx[0]
+
+ if sys.version[:3]>='2.3':
+ kws = {'target_lang':language}
+ else:
+ kws = {}
+
+ link(objects, ext_filename,
+ libraries=self.get_libraries(ext) + c_libraries + clib_libraries,
+ library_dirs=ext.library_dirs + c_library_dirs + clib_library_dirs,
+ runtime_library_dirs=ext.runtime_library_dirs,
+ extra_postargs=extra_args,
+ export_symbols=self.get_export_symbols(ext),
+ debug=self.debug,
+ build_temp=self.build_temp,**kws)
+
+ try:
+ self.compiler.linker_so[0] = old_linker_so_0
+ except:
+ pass
+
+ return
+
+ def get_source_files (self):
+ self.check_extensions_list(self.extensions)
+ filenames = []
+ for ext in self.extensions:
+ filenames.extend(get_ext_source_files(ext))
+ return filenames
+
+ def get_outputs (self):
+ self.check_extensions_list(self.extensions)
+
+ outputs = []
+ for ext in self.extensions:
+ if not ext.sources:
+ continue
+ fullname = self.get_ext_fullname(ext.name)
+ outputs.append(os.path.join(self.build_lib,
+ self.get_ext_filename(fullname)))
+ return outputs
+
diff --git a/numpy/distutils/command/build_py.py b/numpy/distutils/command/build_py.py
new file mode 100644
index 000000000..ab5bd8531
--- /dev/null
+++ b/numpy/distutils/command/build_py.py
@@ -0,0 +1,13 @@
+
+from distutils.command.build_py import build_py as old_build_py
+
+class build_py(old_build_py):
+
+ def find_package_modules(self, package, package_dir):
+ modules = old_build_py.find_package_modules(self, package, package_dir)
+
+ # Find build_src generated *.py files.
+ build_src = self.get_finalized_command('build_src')
+ modules += build_src.py_modules.get(package,[])
+
+ return modules
diff --git a/numpy/distutils/command/build_scripts.py b/numpy/distutils/command/build_scripts.py
new file mode 100644
index 000000000..a2dabfa6a
--- /dev/null
+++ b/numpy/distutils/command/build_scripts.py
@@ -0,0 +1,44 @@
+""" Modified version of build_scripts that handles building scripts from functions.
+"""
+
+from distutils.command.build_scripts import build_scripts as old_build_scripts
+from scipy.distutils import log
+
+class build_scripts(old_build_scripts):
+
+ def generate_scripts(self, scripts):
+ new_scripts = []
+ func_scripts = []
+ for script in scripts:
+ if type(script) is type(''):
+ new_scripts.append(script)
+ else:
+ func_scripts.append(script)
+ if not func_scripts:
+ return new_scripts
+
+ build_dir = self.build_dir
+ self.mkpath(build_dir)
+ for func in func_scripts:
+ script = func(build_dir)
+ if not script:
+ continue
+ if type(script) is type([]):
+ [log.info(" adding '%s' to scripts" % (s)) for s in script]
+ new_scripts.extend(script)
+ else:
+ log.info(" adding '%s' to scripts" % (script))
+ new_scripts.append(script)
+ return new_scripts
+
+ def run (self):
+ if not self.scripts:
+ return
+
+ self.scripts = self.generate_scripts(self.scripts)
+
+ return old_build_scripts.run(self)
+
+ def get_source_files(self):
+ from scipy.distutils.misc_util import get_script_files
+ return get_script_files(self.scripts)
diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py
new file mode 100644
index 000000000..65b46d173
--- /dev/null
+++ b/numpy/distutils/command/build_src.py
@@ -0,0 +1,542 @@
+""" Build swig, f2py, weave, sources.
+"""
+
+import os
+import re
+import copy
+
+from distutils.cmd import Command
+from distutils.command import build_ext, build_py
+from distutils.util import convert_path
+from distutils.dep_util import newer_group, newer
+
+from scipy.distutils import log
+from scipy.distutils.misc_util import fortran_ext_match, all_strings, dot_join,\
+ appendpath
+from scipy.distutils.from_template import process_file as process_f_file
+from scipy.distutils.conv_template import process_file as process_c_file
+from scipy.distutils.extension import Extension
+from scipy.distutils.system_info import get_info, dict_append
+
+class build_src(build_ext.build_ext):
+
+ description = "build sources from SWIG, F2PY files or a function"
+
+ user_options = [
+ ('build-src=', 'd', "directory to \"build\" sources to"),
+ ('f2pyflags=', None, "additonal flags to f2py"),
+ ('swigflags=', None, "additional flags to swig"),
+ ('force', 'f', "forcibly build everything (ignore file timestamps)"),
+ ('inplace', 'i',
+ "ignore build-lib and put compiled extensions into the source " +
+ "directory alongside your pure Python modules"),
+ ]
+
+ boolean_options = ['force','inplace']
+
+ help_options = []
+
+ def initialize_options(self):
+ self.extensions = None
+ self.package = None
+ self.py_modules = None
+ self.build_src = None
+ self.build_lib = None
+ self.build_base = None
+ self.force = None
+ self.inplace = None
+ self.package_dir = None
+ self.f2pyflags = None
+ self.swigflags = None
+ return
+
+ def finalize_options(self):
+ self.set_undefined_options('build',
+ ('build_base', 'build_base'),
+ ('build_lib', 'build_lib'),
+ ('force', 'force'))
+ if self.package is None:
+ self.package = self.distribution.ext_package
+ self.extensions = self.distribution.ext_modules
+ self.libraries = self.distribution.libraries or []
+ self.py_modules = self.distribution.py_modules
+ if self.build_src is None:
+ self.build_src = os.path.join(self.build_base, 'src')
+ if self.inplace is None:
+ build_ext = self.get_finalized_command('build_ext')
+ self.inplace = build_ext.inplace
+
+ # py_modules is used in build_py.find_package_modules
+ self.py_modules = {}
+
+ if self.f2pyflags is None:
+ self.f2pyflags = []
+ else:
+ self.f2pyflags = self.f2pyflags.split() # XXX spaces??
+
+ if self.swigflags is None:
+ self.swigflags = []
+ else:
+ self.swigflags = self.swigflags.split() # XXX spaces??
+ return
+
+ def run(self):
+ if not (self.extensions or self.libraries):
+ return
+ self.build_sources()
+
+ return
+
+ def build_sources(self):
+
+ for libname_info in self.libraries:
+ self.build_library_sources(*libname_info)
+
+ if self.extensions:
+ self.check_extensions_list(self.extensions)
+
+ for ext in self.extensions:
+ self.build_extension_sources(ext)
+
+ return
+
+ def build_library_sources(self, lib_name, build_info):
+ sources = list(build_info.get('sources',[]))
+
+ if not sources:
+ return
+
+ log.info('building library "%s" sources' % (lib_name))
+
+ sources = self.generate_sources(sources, (lib_name, build_info))
+
+ sources = self.template_sources(sources, (lib_name, build_info))
+
+ sources, h_files = self.filter_h_files(sources)
+
+ if h_files:
+ print self.package,'- nothing done with h_files=',h_files
+
+ #for f in h_files:
+ # self.distribution.headers.append((lib_name,f))
+
+ build_info['sources'] = sources
+ return
+
+ def build_extension_sources(self, ext):
+
+ sources = list(ext.sources)
+
+ log.info('building extension "%s" sources' % (ext.name))
+
+ fullname = self.get_ext_fullname(ext.name)
+
+ modpath = fullname.split('.')
+ package = '.'.join(modpath[0:-1])
+
+
+ if self.inplace:
+ build_py = self.get_finalized_command('build_py')
+ self.ext_target_dir = build_py.get_package_dir(package)
+
+
+ sources = self.generate_sources(sources, ext)
+
+ sources = self.template_sources(sources, ext)
+
+ sources = self.swig_sources(sources, ext)
+
+ sources = self.f2py_sources(sources, ext)
+
+ sources, py_files = self.filter_py_files(sources)
+
+ if not self.py_modules.has_key(package):
+ self.py_modules[package] = []
+ modules = []
+ for f in py_files:
+ module = os.path.splitext(os.path.basename(f))[0]
+ modules.append((package, module, f))
+ self.py_modules[package] += modules
+
+ sources, h_files = self.filter_h_files(sources)
+
+ if h_files:
+ print package,'- nothing done with h_files=',h_files
+ #for f in h_files:
+ # self.distribution.headers.append((package,f))
+
+ ext.sources = sources
+
+ return
+
+ def generate_sources(self, sources, extension):
+ new_sources = []
+ func_sources = []
+ for source in sources:
+ if type(source) is type(''):
+ new_sources.append(source)
+ else:
+ func_sources.append(source)
+ if not func_sources:
+ return new_sources
+ if self.inplace:
+ build_dir = self.ext_target_dir
+ else:
+ if type(extension) is type(()):
+ name = extension[0]
+ # if not extension[1].has_key('include_dirs'):
+ # extension[1]['include_dirs'] = []
+ # incl_dirs = extension[1]['include_dirs']
+ else:
+ name = extension.name
+ # incl_dirs = extension.include_dirs
+ #if self.build_src not in incl_dirs:
+ # incl_dirs.append(self.build_src)
+ build_dir = os.path.join(*([self.build_src]\
+ +name.split('.')[:-1]))
+ self.mkpath(build_dir)
+ for func in func_sources:
+ source = func(extension, build_dir)
+ if not source:
+ continue
+ if type(source) is type([]):
+ [log.info(" adding '%s' to sources." % (s)) for s in source]
+ new_sources.extend(source)
+ else:
+ log.info(" adding '%s' to sources." % (source))
+ new_sources.append(source)
+
+ return new_sources
+
+ def filter_py_files(self, sources):
+ return self.filter_files(sources,['.py'])
+
+ def filter_h_files(self, sources):
+ return self.filter_files(sources,['.h','.hpp','.inc'])
+
+ def filter_files(self, sources, exts = []):
+ new_sources = []
+ files = []
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext in exts:
+ files.append(source)
+ else:
+ new_sources.append(source)
+ return new_sources, files
+
+ def template_sources(self, sources, extension):
+ new_sources = []
+ if type(extension) is type(()):
+ depends = extension[1].get('depends')
+ include_dirs = extension[1].get('include_dirs')
+ else:
+ depends = extension.depends
+ include_dirs = extension.include_dirs
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.src': # Template file
+ if self.inplace:
+ target_dir = os.path.dirname(base)
+ else:
+ target_dir = appendpath(self.build_src, os.path.dirname(base))
+ self.mkpath(target_dir)
+ target_file = os.path.join(target_dir,os.path.basename(base))
+ if (self.force or newer_group([source] + depends, target_file)):
+ if _f_pyf_ext_match(base):
+ log.info("from_template:> %s" % (target_file))
+ outstr = process_f_file(source)
+ else:
+ log.info("conv_template:> %s" % (target_file))
+ outstr = process_c_file(source)
+ fid = open(target_file,'w')
+ fid.write(outstr)
+ fid.close()
+ if _header_ext_match(target_file):
+ d = os.path.dirname(target_file)
+ if d not in include_dirs:
+ log.info(" adding '%s' to include_dirs." % (d))
+ include_dirs.append(d)
+ new_sources.append(target_file)
+ else:
+ new_sources.append(source)
+ return new_sources
+
+ def f2py_sources(self, sources, extension):
+ new_sources = []
+ f2py_sources = []
+ f_sources = []
+ f2py_targets = {}
+ target_dirs = []
+ ext_name = extension.name.split('.')[-1]
+ skip_f2py = 0
+
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.pyf': # F2PY interface file
+ if self.inplace:
+ target_dir = os.path.dirname(base)
+ else:
+ target_dir = appendpath(self.build_src, os.path.dirname(base))
+ if os.path.isfile(source):
+ name = get_f2py_modulename(source)
+ assert name==ext_name,'mismatch of extension names: '\
+ +source+' provides'\
+ ' '+`name`+' but expected '+`ext_name`
+ target_file = os.path.join(target_dir,name+'module.c')
+ else:
+ log.debug(' source %s does not exist: skipping f2py\'ing.' \
+ % (source))
+ name = ext_name
+ skip_f2py = 1
+ target_file = os.path.join(target_dir,name+'module.c')
+ if not os.path.isfile(target_file):
+ log.debug(' target %s does not exist:\n '\
+ 'Assuming %smodule.c was generated with '\
+ '"build_src --inplace" command.' \
+ % (target_file, name))
+ target_dir = os.path.dirname(base)
+ target_file = os.path.join(target_dir,name+'module.c')
+ assert os.path.isfile(target_file),`target_file`+' missing'
+ log.debug(' Yes! Using %s as up-to-date target.' \
+ % (target_file))
+ target_dirs.append(target_dir)
+ f2py_sources.append(source)
+ f2py_targets[source] = target_file
+ new_sources.append(target_file)
+ elif fortran_ext_match(ext):
+ f_sources.append(source)
+ else:
+ new_sources.append(source)
+
+ if not (f2py_sources or f_sources):
+ return new_sources
+
+ map(self.mkpath, target_dirs)
+
+ f2py_options = extension.f2py_options + self.f2pyflags
+
+ if self.distribution.libraries:
+ for name,build_info in self.distribution.libraries:
+ if name in extension.libraries:
+ f2py_options.extend(build_info.get('f2py_options',[]))
+
+ log.info("f2py options: %s" % (f2py_options))
+
+ if f2py_sources:
+ assert len(f2py_sources)==1,\
+ 'only one .pyf file is allowed per extension module but got'\
+ ' more:'+`f2py_sources`
+ source = f2py_sources[0]
+ target_file = f2py_targets[source]
+ target_dir = os.path.dirname(target_file) or '.'
+ depends = [source] + extension.depends
+ if (self.force or newer_group(depends, target_file,'newer')) \
+ and not skip_f2py:
+ log.info("f2py: %s" % (source))
+ import scipy.f2py as f2py2e
+ f2py2e.run_main(f2py_options + ['--build-dir',target_dir,source])
+ else:
+ log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
+ else:
+ #XXX TODO: --inplace support for sdist command
+ if type(extension) is type(()): name = extension[0]
+ else: name = extension.name
+ target_dir = os.path.join(*([self.build_src]\
+ +name.split('.')[:-1]))
+ target_file = os.path.join(target_dir,ext_name + 'module.c')
+ new_sources.append(target_file)
+ depends = f_sources + extension.depends
+ if (self.force or newer_group(depends, target_file, 'newer')) \
+ and not skip_f2py:
+ import scipy.f2py as f2py2e
+ log.info("f2py:> %s" % (target_file))
+ self.mkpath(target_dir)
+ f2py2e.run_main(f2py_options + ['--lower',
+ '--build-dir',target_dir]+\
+ ['-m',ext_name]+f_sources)
+ else:
+ log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
+ % (target_file))
+
+ assert os.path.isfile(target_file),`target_file`+' missing'
+
+ target_c = os.path.join(self.build_src,'fortranobject.c')
+ target_h = os.path.join(self.build_src,'fortranobject.h')
+ log.info(" adding '%s' to sources." % (target_c))
+ new_sources.append(target_c)
+ if self.build_src not in extension.include_dirs:
+ log.info(" adding '%s' to include_dirs." \
+ % (self.build_src))
+ extension.include_dirs.append(self.build_src)
+
+ if not skip_f2py:
+ import scipy.f2py as f2py2e
+ d = os.path.dirname(f2py2e.__file__)
+ source_c = os.path.join(d,'src','fortranobject.c')
+ source_h = os.path.join(d,'src','fortranobject.h')
+ if newer(source_c,target_c) or newer(source_h,target_h):
+ self.mkpath(os.path.dirname(target_c))
+ self.copy_file(source_c,target_c)
+ self.copy_file(source_h,target_h)
+ else:
+ assert os.path.isfile(target_c),`target_c` + ' missing'
+ assert os.path.isfile(target_h),`target_h` + ' missing'
+
+ for name_ext in ['-f2pywrappers.f','-f2pywrappers2.f90']:
+ filename = os.path.join(target_dir,ext_name + name_ext)
+ if os.path.isfile(filename):
+ log.info(" adding '%s' to sources." % (filename))
+ f_sources.append(filename)
+
+ return new_sources + f_sources
+
+ def swig_sources(self, sources, extension):
+ # Assuming SWIG 1.3.14 or later. See compatibility note in
+ # http://www.swig.org/Doc1.3/Python.html#Python_nn6
+
+ new_sources = []
+ swig_sources = []
+ swig_targets = {}
+ target_dirs = []
+ py_files = [] # swig generated .py files
+ target_ext = '.c'
+ typ = None
+ is_cpp = 0
+ skip_swig = 0
+ ext_name = extension.name.split('.')[-1]
+
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.i': # SWIG interface file
+ if self.inplace:
+ target_dir = os.path.dirname(base)
+ py_target_dir = self.ext_target_dir
+ else:
+ target_dir = appendpath(self.build_src, os.path.dirname(base))
+ py_target_dir = target_dir
+ if os.path.isfile(source):
+ name = get_swig_modulename(source)
+ assert name==ext_name[1:],'mismatch of extension names: '\
+ +source+' provides'\
+ ' '+`name`+' but expected '+`ext_name[1:]`
+ if typ is None:
+ typ = get_swig_target(source)
+ is_cpp = typ=='c++'
+ if is_cpp:
+ target_ext = '.cpp'
+ else:
+ assert typ == get_swig_target(source),`typ`
+ target_file = os.path.join(target_dir,'%s_wrap%s' \
+ % (name, target_ext))
+ else:
+ log.debug(' source %s does not exist: skipping swig\'ing.' \
+ % (source))
+ name = ext_name[1:]
+ skip_swig = 1
+ target_file = _find_swig_target(target_dir, name)
+ if not os.path.isfile(target_file):
+ log.debug(' target %s does not exist:\n '\
+ 'Assuming %s_wrap.{c,cpp} was generated with '\
+ '"build_src --inplace" command.' \
+ % (target_file, name))
+ target_dir = os.path.dirname(base)
+ target_file = _find_swig_target(target_dir, name)
+ assert os.path.isfile(target_file),`target_file`+' missing'
+ log.debug(' Yes! Using %s as up-to-date target.' \
+ % (target_file))
+ target_dirs.append(target_dir)
+ new_sources.append(target_file)
+ py_files.append(os.path.join(py_target_dir, name+'.py'))
+ swig_sources.append(source)
+ swig_targets[source] = new_sources[-1]
+ else:
+ new_sources.append(source)
+
+ if not swig_sources:
+ return new_sources
+
+ if skip_swig:
+ return new_sources + py_files
+
+ map(self.mkpath, target_dirs)
+ swig = self.find_swig()
+ swig_cmd = [swig, "-python"]
+ if is_cpp:
+ swig_cmd.append('-c++')
+ for d in extension.include_dirs:
+ swig_cmd.append('-I'+d)
+ for source in swig_sources:
+ target = swig_targets[source]
+ depends = [source] + extension.depends
+ if self.force or newer_group(depends, target, 'newer'):
+ log.info("%s: %s" % (os.path.basename(swig) \
+ + (is_cpp and '++' or ''), source))
+ self.spawn(swig_cmd + self.swigflags \
+ + ["-o", target, '-outdir', py_target_dir, source])
+ else:
+ log.debug(" skipping '%s' swig interface (up-to-date)" \
+ % (source))
+
+ return new_sources + py_files
+
+_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z',re.I).match
+_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z',re.I).match
+
+#### SWIG related auxiliary functions ####
+_swig_module_name_match = re.compile(r'\s*%module\s*(?P<name>[\w_]+)',
+ re.I).match
+_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-',re.I).search
+_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-',re.I).search
+
+def get_swig_target(source):
+ f = open(source,'r')
+ result = 'c'
+ line = f.readline()
+ if _has_cpp_header(line):
+ result = 'c++'
+ if _has_c_header(line):
+ result = 'c'
+ f.close()
+ return result
+
+def get_swig_modulename(source):
+ f = open(source,'r')
+ f_readlines = getattr(f,'xreadlines',f.readlines)
+ for line in f_readlines():
+ m = _swig_module_name_match(line)
+ if m:
+ name = m.group('name')
+ break
+ f.close()
+ return name
+
+def _find_swig_target(target_dir,name):
+ for ext in ['.cpp','.c']:
+ target = os.path.join(target_dir,'%s_wrap%s' % (name, ext))
+ if os.path.isfile(target):
+ break
+ return target
+
+#### F2PY related auxiliary functions ####
+
+_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
+ re.I).match
+_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'\
+ '__user__[\w_]*)',re.I).match
+
+def get_f2py_modulename(source):
+ name = None
+ f = open(source)
+ f_readlines = getattr(f,'xreadlines',f.readlines)
+ for line in f_readlines():
+ m = _f2py_module_name_match(line)
+ if m:
+ if _f2py_user_module_name_match(line): # skip *__user__* names
+ continue
+ name = m.group('name')
+ break
+ f.close()
+ return name
+
+##########################################
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
new file mode 100644
index 000000000..aff79067e
--- /dev/null
+++ b/numpy/distutils/command/config.py
@@ -0,0 +1,63 @@
+# Added Fortran compiler support to config. Currently useful only for
+# try_compile call. try_run works but is untested for most of Fortran
+# compilers (they must define linker_exe first).
+# Pearu Peterson
+
+from distutils.command.config import config as old_config
+from distutils.command.config import LANG_EXT
+LANG_EXT['f77'] = '.f'
+LANG_EXT['f90'] = '.f90'
+
+class config(old_config):
+ old_config.user_options += [
+ ('fcompiler=', None,
+ "specify the Fortran compiler type"),
+ ]
+
+ def initialize_options(self):
+ self.fcompiler = None
+ old_config.initialize_options(self)
+ return
+
+ def finalize_options(self):
+ old_config.finalize_options(self)
+ f = self.distribution.get_command_obj('config_fc')
+ self.set_undefined_options('config_fc',
+ ('fcompiler', 'fcompiler'))
+ return
+
+ def _check_compiler (self):
+ old_config._check_compiler(self)
+ from scipy.distutils.fcompiler import FCompiler, new_fcompiler
+ if not isinstance(self.fcompiler, FCompiler):
+ self.fcompiler = new_fcompiler(compiler=self.fcompiler,
+ dry_run=self.dry_run, force=1)
+ self.fcompiler.customize(self.distribution)
+ self.fcompiler.customize_cmd(self)
+ self.fcompiler.show_customization()
+ return
+
+ def _wrap_method(self,mth,lang,args):
+ from distutils.ccompiler import CompileError
+ from distutils.errors import DistutilsExecError
+ save_compiler = self.compiler
+ if lang in ['f77','f90']:
+ self.compiler = self.fcompiler
+ try:
+ ret = mth(*((self,)+args))
+ except (DistutilsExecError,CompileError),msg:
+ self.compiler = save_compiler
+ raise CompileError
+ self.compiler = save_compiler
+ return ret
+
+ def _compile (self, body, headers, include_dirs, lang):
+ return self._wrap_method(old_config._compile,lang,
+ (body, headers, include_dirs, lang))
+
+ def _link (self, body,
+ headers, include_dirs,
+ libraries, library_dirs, lang):
+ return self._wrap_method(old_config._link,lang,
+ (body, headers, include_dirs,
+ libraries, library_dirs, lang))
diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py
new file mode 100644
index 000000000..0db601fc6
--- /dev/null
+++ b/numpy/distutils/command/config_compiler.py
@@ -0,0 +1,56 @@
+
+import sys
+from distutils.core import Command
+
+#XXX: Implement confic_cc for enhancing C/C++ compiler options.
+#XXX: Linker flags
+
+class config_fc(Command):
+ """ Distutils command to hold user specified options
+ to Fortran compilers.
+
+ config_fc command is used by the FCompiler.customize() method.
+ """
+
+ user_options = [
+ ('fcompiler=',None,"specify Fortran compiler type"),
+ ('f77exec=', None, "specify F77 compiler command"),
+ ('f90exec=', None, "specify F90 compiler command"),
+ ('f77flags=',None,"specify F77 compiler flags"),
+ ('f90flags=',None,"specify F90 compiler flags"),
+ ('opt=',None,"specify optimization flags"),
+ ('arch=',None,"specify architecture specific optimization flags"),
+ ('debug','g',"compile with debugging information"),
+ ('noopt',None,"compile without optimization"),
+ ('noarch',None,"compile without arch-dependent optimization"),
+ ('help-fcompiler',None,"list available Fortran compilers"),
+ ]
+
+ boolean_options = ['debug','noopt','noarch','help-fcompiler']
+
+ def initialize_options(self):
+ self.fcompiler = None
+ self.f77exec = None
+ self.f90exec = None
+ self.f77flags = None
+ self.f90flags = None
+ self.opt = None
+ self.arch = None
+ self.debug = None
+ self.noopt = None
+ self.noarch = None
+ self.help_fcompiler = None
+ return
+
+ def finalize_options(self):
+ if self.help_fcompiler:
+ from scipy.distutils.fcompiler import show_fcompilers
+ show_fcompilers(self.distribution)
+ sys.exit()
+ return
+
+ def run(self):
+ # Do nothing.
+ return
+
+
diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py
new file mode 100644
index 000000000..64d613569
--- /dev/null
+++ b/numpy/distutils/command/install.py
@@ -0,0 +1,9 @@
+
+from distutils.command.install import *
+from distutils.command.install import install as old_install
+
+class install(old_install):
+
+ def finalize_options (self):
+ old_install.finalize_options(self)
+ self.install_lib = self.install_libbase
diff --git a/numpy/distutils/command/install_data.py b/numpy/distutils/command/install_data.py
new file mode 100644
index 000000000..e170ba4d8
--- /dev/null
+++ b/numpy/distutils/command/install_data.py
@@ -0,0 +1,14 @@
+from distutils.command.install_data import *
+from distutils.command.install_data import install_data as old_install_data
+
+#data installer with improved intelligence over distutils
+#data files are copied into the project directory instead
+#of willy-nilly
+class install_data (old_install_data):
+
+ def finalize_options (self):
+ self.set_undefined_options('install',
+ ('install_lib', 'install_dir'),
+ ('root', 'root'),
+ ('force', 'force'),
+ )
diff --git a/numpy/distutils/command/install_headers.py b/numpy/distutils/command/install_headers.py
new file mode 100644
index 000000000..043f024f5
--- /dev/null
+++ b/numpy/distutils/command/install_headers.py
@@ -0,0 +1,26 @@
+import os
+from distutils.command.install import *
+from distutils.command.install_headers import install_headers as old_install_headers
+
+class install_headers (old_install_headers):
+
+ def run (self):
+ headers = self.distribution.headers
+ if not headers:
+ return
+
+ prefix = os.path.dirname(self.install_dir)
+ for header in headers:
+ if isinstance(header,tuple):
+ # Kind of a hack, but I don't know where else to change this...
+ if header[0] == 'scipy.base':
+ header = ('scipy', header[1])
+ if os.path.splitext(header[1])[1] == '.inc':
+ continue
+ d = os.path.join(*([prefix]+header[0].split('.')))
+ header = header[1]
+ else:
+ d = self.install_dir
+ self.mkpath(d)
+ (out, _) = self.copy_file(header, d)
+ self.outfiles.append(out)
diff --git a/numpy/distutils/command/sdist.py b/numpy/distutils/command/sdist.py
new file mode 100644
index 000000000..289bd0357
--- /dev/null
+++ b/numpy/distutils/command/sdist.py
@@ -0,0 +1,26 @@
+
+from distutils.command.sdist import *
+from distutils.command.sdist import sdist as old_sdist
+from scipy.distutils.misc_util import get_data_files
+
+class sdist(old_sdist):
+
+ def add_defaults (self):
+ old_sdist.add_defaults(self)
+
+ dist = self.distribution
+
+ if dist.has_data_files():
+ for data in dist.data_files:
+ self.filelist.extend(get_data_files(data))
+
+ if dist.has_headers():
+ headers = []
+ for h in dist.headers:
+ if isinstance(h,str): headers.append(h)
+ else: headers.append(h[1])
+ self.filelist.extend(headers)
+
+ return
+
+
diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py
new file mode 100644
index 000000000..d3e2de357
--- /dev/null
+++ b/numpy/distutils/conv_template.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+
+# takes templated file .xxx.src and produces .xxx file where .xxx is .i or .c or .h
+# using the following template rules
+
+# /**begin repeat on a line by itself marks the beginning of a segment of code to be repeated
+# /**end repeat**/ on a line by itself marks it's end
+
+# after the /**begin repeat and before the */
+# all the named templates are placed
+# these should all have the same number of replacements
+
+# in the main body, the names are used.
+# Each replace will use one entry from the list of named replacements
+
+# Note that all #..# forms in a block must have the same number of
+# comma-separated entries.
+
+__all__ = ['process_str', 'process_file']
+
+import string,os,sys
+if sys.version[:3]>='2.3':
+ import re
+else:
+ import pre as re
+ False = 0
+ True = 1
+
+def parse_structure(astr):
+ spanlist = []
+ # subroutines
+ ind = 0
+ while 1:
+ start = astr.find("/**begin repeat", ind)
+ if start == -1:
+ break
+ start2 = astr.find("*/",start)
+ start2 = astr.find("\n",start2)
+ fini1 = astr.find("/**end repeat**/",start2)
+ fini2 = astr.find("\n",fini1)
+ spanlist.append((start, start2+1, fini1, fini2+1))
+ ind = fini2
+ spanlist.sort()
+ return spanlist
+
+# return n copies of substr with template replacement
+_special_names = {}
+
+template_re = re.compile(r"@([\w]+)@")
+named_re = re.compile(r"#([\w]*)=([^#]*?)#")
+
+parenrep = re.compile(r"[(]([^)]*?)[)]\*(\d+)")
+def paren_repl(obj):
+ torep = obj.group(1)
+ numrep = obj.group(2)
+ return ','.join([torep]*int(numrep))
+
+plainrep = re.compile(r"([^*]+)\*(\d+)")
+
+def conv(astr):
+ # replaces all occurrences of '(a,b,c)*4' in astr
+ # with 'a,b,c,a,b,c,a,b,c,a,b,c'
+ astr = parenrep.sub(paren_repl,astr)
+ # replaces occurences of xxx*3 with xxx, xxx, xxx
+ astr = ','.join([plainrep.sub(paren_repl,x.strip()) for x in astr.split(',')])
+ return astr
+
+def unique_key(adict):
+ # this obtains a unique key given a dictionary
+ # currently it works by appending together n of the letters of the
+ # current keys and increasing n until a unique key is found
+ # -- not particularly quick
+ allkeys = adict.keys()
+ done = False
+ n = 1
+ while not done:
+ newkey = "".join([x[:n] for x in allkeys])
+ if newkey in allkeys:
+ n += 1
+ else:
+ done = True
+ return newkey
+
+def namerepl(match):
+ global _names, _thissub
+ name = match.group(1)
+ return _names[name][_thissub]
+
+def expand_sub(substr,namestr):
+ global _names, _thissub
+ # find all named replacements
+ reps = named_re.findall(namestr)
+ _names = {}
+ _names.update(_special_names)
+ numsubs = None
+ for rep in reps:
+ name = rep[0].strip()
+ thelist = conv(rep[1])
+ _names[name] = thelist
+
+ # make lists out of string entries in name dictionary
+ for name in _names.keys():
+ entry = _names[name]
+ entrylist = entry.split(',')
+ _names[name] = entrylist
+ num = len(entrylist)
+ if numsubs is None:
+ numsubs = num
+ elif (numsubs != num):
+ print namestr
+ print substr
+ raise ValueError, "Mismatch in number to replace"
+
+ # now replace all keys for each of the lists
+ mystr = ''
+ for k in range(numsubs):
+ _thissub = k
+ mystr += template_re.sub(namerepl, substr)
+ mystr += "\n\n"
+ return mystr
+
+
+_head = \
+"""/* This file was autogenerated from a template DO NOT EDIT!!!!
+ Changes should be made to the original source (.src) file
+*/
+
+"""
+
+def get_line_header(str,beg):
+ extra = []
+ ind = beg-1
+ char = str[ind]
+ while (ind > 0) and (char != '\n'):
+ extra.insert(0,char)
+ ind = ind - 1
+ char = str[ind]
+ return ''.join(extra)
+
+def process_str(allstr):
+ newstr = allstr
+ writestr = _head
+
+ struct = parse_structure(newstr)
+ # return a (sorted) list of tuples for each begin repeat section
+ # each tuple is the start and end of a region to be template repeated
+
+ oldend = 0
+ for sub in struct:
+ writestr += newstr[oldend:sub[0]]
+ expanded = expand_sub(newstr[sub[1]:sub[2]],newstr[sub[0]:sub[1]])
+ writestr += expanded
+ oldend = sub[3]
+
+
+ writestr += newstr[oldend:]
+ return writestr
+
+include_src_re = re.compile(r"(\n|\A)#include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]",re.I)
+
+def resolve_includes(source):
+ d = os.path.dirname(source)
+ fid = open(source)
+ lines = []
+ for line in fid.readlines():
+ m = include_src_re.match(line)
+ if m:
+ fn = m.group('name')
+ if not os.path.isabs(fn):
+ fn = os.path.join(d,fn)
+ if os.path.isfile(fn):
+ print 'Including file',fn
+ lines.extend(resolve_includes(fn))
+ else:
+ lines.append(line)
+ else:
+ lines.append(line)
+ fid.close()
+ return lines
+
+def process_file(source):
+ lines = resolve_includes(source)
+ return process_str(''.join(lines))
+
+if __name__ == "__main__":
+
+ try:
+ file = sys.argv[1]
+ except IndexError:
+ fid = sys.stdin
+ outfile = sys.stdout
+ else:
+ fid = open(file,'r')
+ (base, ext) = os.path.splitext(file)
+ newname = base
+ outfile = open(newname,'w')
+
+ allstr = fid.read()
+ writestr = process_str(allstr)
+ outfile.write(writestr)
diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py
new file mode 100644
index 000000000..842a94756
--- /dev/null
+++ b/numpy/distutils/core.py
@@ -0,0 +1,138 @@
+
+import types
+from distutils.core import *
+try:
+ from setuptools import setup as old_setup
+ # very old setuptools don't have this
+ from setuptools.command import bdist_egg
+ have_setuptools = 1
+except ImportError:
+ from distutils.core import setup as old_setup
+ have_setuptools = 0
+
+from scipy.distutils.extension import Extension
+from scipy.distutils.command import config
+from scipy.distutils.command import build
+from scipy.distutils.command import build_py
+from scipy.distutils.command import config_compiler
+from scipy.distutils.command import build_ext
+from scipy.distutils.command import build_clib
+from scipy.distutils.command import build_src
+from scipy.distutils.command import build_scripts
+from scipy.distutils.command import sdist
+from scipy.distutils.command import install_data
+from scipy.distutils.command import install_headers
+from scipy.distutils.command import install
+from scipy.distutils.command import bdist_rpm
+from scipy.distutils.misc_util import get_data_files
+
+scipy_cmdclass = {'build': build.build,
+ 'build_src': build_src.build_src,
+ 'build_scripts': build_scripts.build_scripts,
+ 'config_fc': config_compiler.config_fc,
+ 'config': config.config,
+ 'build_ext': build_ext.build_ext,
+ 'build_py': build_py.build_py,
+ 'build_clib': build_clib.build_clib,
+ 'sdist': sdist.sdist,
+ 'install_data': install_data.install_data,
+ 'install_headers': install_headers.install_headers,
+ 'install': install.install,
+ 'bdist_rpm': bdist_rpm.bdist_rpm,
+ }
+if have_setuptools:
+ from setuptools.command import bdist_egg, develop, easy_install, egg_info
+ scipy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg
+ scipy_cmdclass['develop'] = develop.develop
+ scipy_cmdclass['easy_install'] = easy_install.easy_install
+ scipy_cmdclass['egg_info'] = egg_info.egg_info
+
+def setup(**attr):
+
+ cmdclass = scipy_cmdclass.copy()
+
+ new_attr = attr.copy()
+ if new_attr.has_key('cmdclass'):
+ cmdclass.update(new_attr['cmdclass'])
+ new_attr['cmdclass'] = cmdclass
+
+ # Move extension source libraries to libraries
+ libraries = []
+ for ext in new_attr.get('ext_modules',[]):
+ new_libraries = []
+ for item in ext.libraries:
+ if type(item) is type(()):
+ lib_name,build_info = item
+ _check_append_ext_library(libraries, item)
+ new_libraries.append(lib_name)
+ else:
+ assert type(item) is type(''),`item`
+ new_libraries.append(item)
+ ext.libraries = new_libraries
+ if libraries:
+ if not new_attr.has_key('libraries'):
+ new_attr['libraries'] = []
+ for item in libraries:
+ _check_append_library(new_attr['libraries'], item)
+
+ # sources in ext_modules or libraries may contain header files
+ if (new_attr.has_key('ext_modules') or new_attr.has_key('libraries')) \
+ and not new_attr.has_key('headers'):
+ new_attr['headers'] = []
+
+ # Expand directories in data_files to files
+ if new_attr.has_key('data_files'):
+ new_data_files = []
+ for data in new_attr['data_files']:
+ if type(data) is types.StringType:
+ new_data_files.append(get_data_files(data)[0])
+ else:
+ new_data_files.append((data[0],get_data_files(data)))
+ new_attr['data_files'] = new_data_files
+
+ return old_setup(**new_attr)
+
+def _check_append_library(libraries, item):
+ import warnings
+ for libitem in libraries:
+ if type(libitem) is type(()):
+ if type(item) is type(()):
+ if item[0]==libitem[0]:
+ if item[1] is libitem[1]:
+ return
+ warnings.warn("[0] libraries list contains '%s' with"\
+ " different build_info" % (item[0]))
+ break
+ else:
+ if item==libitem[0]:
+ warnings.warn("[1] libraries list contains '%s' with"\
+ " no build_info" % (item[0]))
+ break
+ else:
+ if type(item) is type(()):
+ if item[0]==libitem:
+ warnings.warn("[2] libraries list contains '%s' with"\
+ " no build_info" % (item[0]))
+ break
+ else:
+ if item==libitem:
+ return
+ libraries.append(item)
+ return
+
+def _check_append_ext_library(libraries, (lib_name,build_info)):
+ import warnings
+ for item in libraries:
+ if type(item) is type(()):
+ if item[0]==lib_name:
+ if item[1] is build_info:
+ return
+ warnings.warn("[3] libraries list contains '%s' with"\
+ " different build_info" % (lib_name))
+ break
+ elif item==lib_name:
+ warnings.warn("[4] libraries list contains '%s' with"\
+ " no build_info" % (lib_name))
+ break
+ libraries.append((lib_name,build_info))
+ return
diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py
new file mode 100644
index 000000000..92dd5486c
--- /dev/null
+++ b/numpy/distutils/cpuinfo.py
@@ -0,0 +1,687 @@
+#!/usr/bin/env python
+"""
+cpuinfo
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@cens.ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the SciPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+Note: This should be merged into proc at some point. Perhaps proc should
+be returning classes like this instead of using dictionaries.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Revision: 1.1 $
+$Date: 2005/04/09 19:29:34 $
+Pearu Peterson
+"""
+
+__version__ = "$Id: cpuinfo.py,v 1.1 2005/04/09 19:29:34 pearu Exp $"
+
+__all__ = ['cpu']
+
+import sys,string,re,types
+
+class cpuinfo_base:
+ """Holds CPU information and provides methods for requiring
+ the availability of various CPU features.
+ """
+
+ def _try_call(self,func):
+ try:
+ return func()
+ except:
+ pass
+
+ def __getattr__(self,name):
+ if name[0]!='_':
+ if hasattr(self,'_'+name):
+ attr = getattr(self,'_'+name)
+ if type(attr) is types.MethodType:
+ return lambda func=self._try_call,attr=attr : func(attr)
+ else:
+ return lambda : None
+ raise AttributeError,name
+
+ def _getNCPUs(self):
+ return 1
+
+ def _is_32bit(self):
+ return not self.is_64bit()
+
+class linux_cpuinfo(cpuinfo_base):
+
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = []
+ try:
+ for line in open('/proc/cpuinfo').readlines():
+ name_value = map(string.strip,string.split(line,':',1))
+ if len(name_value)!=2:
+ continue
+ name,value = name_value
+ if not info or info[-1].has_key(name): # next processor
+ info.append({})
+ info[-1][name] = value
+ import commands
+ status,output = commands.getstatusoutput('uname -m')
+ if not status:
+ if not info: info.append({})
+ info[-1]['uname_m'] = string.strip(output)
+ except:
+ print sys.exc_value,'(ignoring)'
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ # Athlon
+
+ def _is_AMD(self):
+ return self.info[0]['vendor_id']=='AuthenticAMD'
+
+ def _is_AthlonK6_2(self):
+ return self._is_AMD() and self.info[0]['model'] == '2'
+
+ def _is_AthlonK6_3(self):
+ return self._is_AMD() and self.info[0]['model'] == '3'
+
+ def _is_AthlonK6(self):
+ return re.match(r'.*?AMD-K6',self.info[0]['model name']) is not None
+
+ def _is_AthlonK7(self):
+ return re.match(r'.*?AMD-K7',self.info[0]['model name']) is not None
+
+ def _is_AthlonMP(self):
+ return re.match(r'.*?Athlon\(tm\) MP\b',
+ self.info[0]['model name']) is not None
+
+ def _is_Athlon64(self):
+ return re.match(r'.*?Athlon\(tm\) 64\b',
+ self.info[0]['model name']) is not None
+
+ def _is_AthlonHX(self):
+ return re.match(r'.*?Athlon HX\b',
+ self.info[0]['model name']) is not None
+
+ def _is_Opteron(self):
+ return re.match(r'.*?Opteron\b',
+ self.info[0]['model name']) is not None
+
+ def _is_Hammer(self):
+ return re.match(r'.*?Hammer\b',
+ self.info[0]['model name']) is not None
+
+ # Alpha
+
+ def _is_Alpha(self):
+ return self.info[0]['cpu']=='Alpha'
+
+ def _is_EV4(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
+
+ def _is_EV5(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
+
+ def _is_EV56(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
+
+ def _is_PCA56(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
+
+ # Intel
+
+ #XXX
+ _is_i386 = _not_impl
+
+ def _is_Intel(self):
+ return self.info[0]['vendor_id']=='GenuineIntel'
+
+ def _is_i486(self):
+ return self.info[0]['cpu']=='i486'
+
+ def _is_i586(self):
+ return self.is_Intel() and self.info[0]['cpu family'] == '5'
+
+ def _is_i686(self):
+ return self.is_Intel() and self.info[0]['cpu family'] == '6'
+
+ def _is_Celeron(self):
+ return re.match(r'.*?Celeron',
+ self.info[0]['model name']) is not None
+
+ def _is_Pentium(self):
+ return re.match(r'.*?Pentium',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumII(self):
+ return re.match(r'.*?Pentium.*?II\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumPro(self):
+ return re.match(r'.*?PentiumPro\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumMMX(self):
+ return re.match(r'.*?Pentium.*?MMX\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumIII(self):
+ return re.match(r'.*?Pentium.*?III\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumIV(self):
+ return re.match(r'.*?Pentium.*?(IV|4)\b',
+ self.info[0]['model name']) is not None
+
+ def _is_Prescott(self):
+ return self.is_PentiumIV() and self.has_sse3()
+
+ def _is_Nocona(self):
+ return self.is_PentiumIV() and self.is_64bit()
+
+ def _is_Itanium(self):
+ return re.match(r'.*?Itanium\b',
+ self.info[0]['family']) is not None
+
+ def _is_XEON(self):
+ return re.match(r'.*?XEON\b',
+ self.info[0]['model name']) is not None
+
+ _is_Xeon = _is_XEON
+
+ # Varia
+
+ def _is_singleCPU(self):
+ return len(self.info) == 1
+
+ def _getNCPUs(self):
+ return len(self.info)
+
+ def _has_fdiv_bug(self):
+ return self.info[0]['fdiv_bug']=='yes'
+
+ def _has_f00f_bug(self):
+ return self.info[0]['f00f_bug']=='yes'
+
+ def _has_mmx(self):
+ return re.match(r'.*?\bmmx\b',self.info[0]['flags']) is not None
+
+ def _has_sse(self):
+ return re.match(r'.*?\bsse\b',self.info[0]['flags']) is not None
+
+ def _has_sse2(self):
+ return re.match(r'.*?\bsse2\b',self.info[0]['flags']) is not None
+
+ def _has_sse3(self):
+ return re.match(r'.*?\bsse3\b',self.info[0]['flags']) is not None
+
+ def _has_3dnow(self):
+ return re.match(r'.*?\b3dnow\b',self.info[0]['flags']) is not None
+
+ def _has_3dnowext(self):
+ return re.match(r'.*?\b3dnowext\b',self.info[0]['flags']) is not None
+
+ def _is_64bit(self):
+ if self.is_Alpha():
+ return True
+ if self.info[0].get('clflush size','')=='64':
+ return True
+ if self.info[0].get('uname_m','')=='x86_64':
+ return True
+ if self.info[0].get('arch','')=='IA-64':
+ return True
+ return False
+
+ def _is_32bit(self):
+ return not self.is_64bit()
+
+class irix_cpuinfo(cpuinfo_base):
+
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = []
+ try:
+ import commands
+ status,output = commands.getstatusoutput('sysconf')
+ if status not in [0,256]:
+ return
+ for line in output.split('\n'):
+ name_value = map(string.strip,string.split(line,' ',1))
+ if len(name_value)!=2:
+ continue
+ name,value = name_value
+ if not info:
+ info.append({})
+ info[-1][name] = value
+ except:
+ print sys.exc_value,'(ignoring)'
+ self.__class__.info = info
+
+ #print info
+ def _not_impl(self): pass
+
+ def _is_singleCPU(self):
+ return self.info[0].get('NUM_PROCESSORS') == '1'
+
+ def _getNCPUs(self):
+ return int(self.info[0].get('NUM_PROCESSORS'))
+
+ def __cputype(self,n):
+ return self.info[0].get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
+ def _is_r2000(self): return self.__cputype(2000)
+ def _is_r3000(self): return self.__cputype(3000)
+ def _is_r3900(self): return self.__cputype(3900)
+ def _is_r4000(self): return self.__cputype(4000)
+ def _is_r4100(self): return self.__cputype(4100)
+ def _is_r4300(self): return self.__cputype(4300)
+ def _is_r4400(self): return self.__cputype(4400)
+ def _is_r4600(self): return self.__cputype(4600)
+ def _is_r4650(self): return self.__cputype(4650)
+ def _is_r5000(self): return self.__cputype(5000)
+ def _is_r6000(self): return self.__cputype(6000)
+ def _is_r8000(self): return self.__cputype(8000)
+ def _is_r10000(self): return self.__cputype(10000)
+ def _is_r12000(self): return self.__cputype(12000)
+ def _is_rorion(self): return self.__cputype('orion')
+
+ def get_ip(self):
+ try: return self.info[0].get('MACHINE')
+ except: pass
+ def __machine(self,n):
+ return self.info[0].get('MACHINE').lower() == 'ip%s' % (n)
+ def _is_IP19(self): return self.__machine(19)
+ def _is_IP20(self): return self.__machine(20)
+ def _is_IP21(self): return self.__machine(21)
+ def _is_IP22(self): return self.__machine(22)
+ def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
+ def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000()
+ def _is_IP24(self): return self.__machine(24)
+ def _is_IP25(self): return self.__machine(25)
+ def _is_IP26(self): return self.__machine(26)
+ def _is_IP27(self): return self.__machine(27)
+ def _is_IP28(self): return self.__machine(28)
+ def _is_IP30(self): return self.__machine(30)
+ def _is_IP32(self): return self.__machine(32)
+ def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
+ def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
+
+class darwin_cpuinfo(cpuinfo_base):
+
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = []
+ try:
+ import commands
+ status,output = commands.getstatusoutput('arch')
+ if not status:
+ if not info: info.append({})
+ info[-1]['arch'] = string.strip(output)
+ status,output = commands.getstatusoutput('machine')
+ if not status:
+ if not info: info.append({})
+ info[-1]['machine'] = string.strip(output)
+ status,output = commands.getstatusoutput('sysctl hw')
+ if not status:
+ if not info: info.append({})
+ d = {}
+ for l in string.split(output,'\n'):
+ l = map(string.strip,string.split(l, '='))
+ if len(l)==2:
+ d[l[0]]=l[1]
+ info[-1]['sysctl_hw'] = d
+ except:
+ print sys.exc_value,'(ignoring)'
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ def _getNCPUs(self):
+ try: return int(self.info[0]['sysctl_hw']['hw.ncpu'])
+ except: return 1
+
+ def _is_Power_Macintosh(self):
+ return self.info[0]['sysctl_hw']['hw.machine']=='Power Macintosh'
+
+ def _is_i386(self):
+ return self.info[0]['arch']=='i386'
+ def _is_ppc(self):
+ return self.info[0]['arch']=='ppc'
+
+ def __machine(self,n):
+ return self.info[0]['machine'] == 'ppc%s'%n
+ def _is_ppc601(self): return self.__machine(601)
+ def _is_ppc602(self): return self.__machine(602)
+ def _is_ppc603(self): return self.__machine(603)
+ def _is_ppc603e(self): return self.__machine('603e')
+ def _is_ppc604(self): return self.__machine(604)
+ def _is_ppc604e(self): return self.__machine('604e')
+ def _is_ppc620(self): return self.__machine(620)
+ def _is_ppc630(self): return self.__machine(630)
+ def _is_ppc740(self): return self.__machine(740)
+ def _is_ppc7400(self): return self.__machine(7400)
+ def _is_ppc7450(self): return self.__machine(7450)
+ def _is_ppc750(self): return self.__machine(750)
+ def _is_ppc403(self): return self.__machine(403)
+ def _is_ppc505(self): return self.__machine(505)
+ def _is_ppc801(self): return self.__machine(801)
+ def _is_ppc821(self): return self.__machine(821)
+ def _is_ppc823(self): return self.__machine(823)
+ def _is_ppc860(self): return self.__machine(860)
+
+class sunos_cpuinfo(cpuinfo_base):
+
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = []
+ try:
+ import commands
+ status,output = commands.getstatusoutput('arch')
+ if not status:
+ if not info: info.append({})
+ info[-1]['arch'] = string.strip(output)
+ status,output = commands.getstatusoutput('mach')
+ if not status:
+ if not info: info.append({})
+ info[-1]['mach'] = string.strip(output)
+ status,output = commands.getstatusoutput('uname -i')
+ if not status:
+ if not info: info.append({})
+ info[-1]['uname_i'] = string.strip(output)
+ status,output = commands.getstatusoutput('uname -X')
+ if not status:
+ if not info: info.append({})
+ d = {}
+ for l in string.split(output,'\n'):
+ l = map(string.strip,string.split(l, '='))
+ if len(l)==2:
+ d[l[0]]=l[1]
+ info[-1]['uname_X'] = d
+ status,output = commands.getstatusoutput('isainfo -b')
+ if not status:
+ if not info: info.append({})
+ info[-1]['isainfo_b'] = string.strip(output)
+ status,output = commands.getstatusoutput('isainfo -n')
+ if not status:
+ if not info: info.append({})
+ info[-1]['isainfo_n'] = string.strip(output)
+ status,output = commands.getstatusoutput('psrinfo -v 0')
+ if not status:
+ if not info: info.append({})
+ for l in string.split(output,'\n'):
+ m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at',l)
+ if m:
+ info[-1]['processor'] = m.group('p')
+ break
+ except:
+ print sys.exc_value,'(ignoring)'
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ def _is_32bit(self):
+ return self.info[0]['isainfo_b']=='32'
+ def _is_64bit(self):
+ return self.info[0]['isainfo_b']=='64'
+
+ def _is_i386(self):
+ return self.info[0]['isainfo_n']=='i386'
+ def _is_sparc(self):
+ return self.info[0]['isainfo_n']=='sparc'
+ def _is_sparcv9(self):
+ return self.info[0]['isainfo_n']=='sparcv9'
+
+ def _getNCPUs(self):
+ try: return int(self.info[0]['uname_X']['NumCPU'])
+ except: return 1
+
+ def _is_sun4(self):
+ return self.info[0]['arch']=='sun4'
+
+ def _is_SUNW(self):
+ return re.match(r'SUNW',self.info[0]['uname_i']) is not None
+ def _is_sparcstation5(self):
+ return re.match(r'.*SPARCstation-5',self.info[0]['uname_i']) is not None
+ def _is_ultra1(self):
+ return re.match(r'.*Ultra-1',self.info[0]['uname_i']) is not None
+ def _is_ultra250(self):
+ return re.match(r'.*Ultra-250',self.info[0]['uname_i']) is not None
+ def _is_ultra2(self):
+ return re.match(r'.*Ultra-2',self.info[0]['uname_i']) is not None
+ def _is_ultra30(self):
+ return re.match(r'.*Ultra-30',self.info[0]['uname_i']) is not None
+ def _is_ultra4(self):
+ return re.match(r'.*Ultra-4',self.info[0]['uname_i']) is not None
+ def _is_ultra5_10(self):
+ return re.match(r'.*Ultra-5_10',self.info[0]['uname_i']) is not None
+ def _is_ultra5(self):
+ return re.match(r'.*Ultra-5',self.info[0]['uname_i']) is not None
+ def _is_ultra60(self):
+ return re.match(r'.*Ultra-60',self.info[0]['uname_i']) is not None
+ def _is_ultra80(self):
+ return re.match(r'.*Ultra-80',self.info[0]['uname_i']) is not None
+ def _is_ultraenterprice(self):
+ return re.match(r'.*Ultra-Enterprise',self.info[0]['uname_i']) is not None
+ def _is_ultraenterprice10k(self):
+ return re.match(r'.*Ultra-Enterprise-10000',self.info[0]['uname_i']) is not None
+ def _is_sunfire(self):
+ return re.match(r'.*Sun-Fire',self.info[0]['uname_i']) is not None
+ def _is_ultra(self):
+ return re.match(r'.*Ultra',self.info[0]['uname_i']) is not None
+
+ def _is_cpusparcv7(self):
+ return self.info[0]['processor']=='sparcv7'
+ def _is_cpusparcv8(self):
+ return self.info[0]['processor']=='sparcv8'
+ def _is_cpusparcv9(self):
+ return self.info[0]['processor']=='sparcv9'
+
+class win32_cpuinfo(cpuinfo_base):
+
+ info = None
+ pkey = "HARDWARE\\DESCRIPTION\\System\\CentralProcessor"
+ # XXX: what does the value of
+ # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
+ # mean?
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = []
+ try:
+ #XXX: Bad style to use so long `try:...except:...`. Fix it!
+ import _winreg
+ pkey = "HARDWARE\\DESCRIPTION\\System\\CentralProcessor"
+ prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"\
+ "\s+stepping\s+(?P<STP>\d+)",re.IGNORECASE)
+ chnd=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,pkey)
+ pnum=0
+ while 1:
+ try:
+ proc=_winreg.EnumKey(chnd,pnum)
+ except _winreg.error:
+ break
+ else:
+ pnum+=1
+ print proc
+ info.append({"Processor":proc})
+ phnd=_winreg.OpenKey(chnd,proc)
+ pidx=0
+ while True:
+ try:
+ name,value,vtpe=_winreg.EnumValue(phnd,pidx)
+ except _winreg.error:
+ break
+ else:
+ pidx=pidx+1
+ info[-1][name]=value
+ if name=="Identifier":
+ srch=prgx.search(value)
+ if srch:
+ info[-1]["Family"]=int(srch.group("FML"))
+ info[-1]["Model"]=int(srch.group("MDL"))
+ info[-1]["Stepping"]=int(srch.group("STP"))
+ except:
+ print sys.exc_value,'(ignoring)'
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ # Athlon
+
+ def _is_AMD(self):
+ return self.info[0]['VendorIdentifier']=='AuthenticAMD'
+
+ def _is_Am486(self):
+ return self.is_AMD() and self.info[0]['Family']==4
+
+ def _is_Am5x86(self):
+ return self.is_AMD() and self.info[0]['Family']==4
+
+ def _is_AMDK5(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model'] in [0,1,2,3]
+
+ def _is_AMDK6(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model'] in [6,7]
+
+ def _is_AMDK6_2(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model']==8
+
+ def _is_AMDK6_3(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model']==9
+
+ def _is_Athlon(self):
+ return self.is_AMD() and self.info[0]['Family']==6
+
+ def _is_Athlon64(self):
+ return self.is_AMD() and self.info[0]['Family']==15 \
+ and self.info[0]['Model']==4
+
+ def _is_Opteron(self):
+ return self.is_AMD() and self.info[0]['Family']==15 \
+ and self.info[0]['Model']==5
+
+ # Intel
+
+ def _is_Intel(self):
+ return self.info[0]['VendorIdentifier']=='GenuineIntel'
+
+ def _is_i386(self):
+ return self.info[0]['Family']==3
+
+ def _is_i486(self):
+ return self.info[0]['Family']==4
+
+ def _is_i586(self):
+ return self.is_Intel() and self.info[0]['Family']==5
+
+ def _is_i686(self):
+ return self.is_Intel() and self.info[0]['Family']==6
+
+ def _is_Pentium(self):
+ return self.is_Intel() and self.info[0]['Family']==5
+
+ def _is_PentiumMMX(self):
+ return self.is_Intel() and self.info[0]['Family']==5 \
+ and self.info[0]['Model']==4
+
+ def _is_PentiumPro(self):
+ return self.is_Intel() and self.info[0]['Family']==6 \
+ and self.info[0]['Model']==1
+
+ def _is_PentiumII(self):
+ return self.is_Intel() and self.info[0]['Family']==6 \
+ and self.info[0]['Model'] in [3,5,6]
+
+ def _is_PentiumIII(self):
+ return self.is_Intel() and self.info[0]['Family']==6 \
+ and self.info[0]['Model'] in [7,8,9,10,11]
+
+ def _is_PentiumIV(self):
+ return self.is_Intel() and self.info[0]['Family']==15
+
+ # Varia
+
+ def _is_singleCPU(self):
+ return len(self.info) == 1
+
+ def _getNCPUs(self):
+ return len(self.info)
+
+ def _has_mmx(self):
+ if self.is_Intel():
+ return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \
+ or (self.info[0]['Family'] in [6,15])
+ elif self.is_AMD():
+ return self.info[0]['Family'] in [5,6,15]
+
+ def _has_sse(self):
+ if self.is_Intel():
+ return (self.info[0]['Family']==6 and \
+ self.info[0]['Model'] in [7,8,9,10,11]) \
+ or self.info[0]['Family']==15
+ elif self.is_AMD():
+ return (self.info[0]['Family']==6 and \
+ self.info[0]['Model'] in [6,7,8,10]) \
+ or self.info[0]['Family']==15
+
+ def _has_sse2(self):
+ return self.info[0]['Family']==15
+
+ def _has_3dnow(self):
+ # XXX: does only AMD have 3dnow??
+ return self.is_AMD() and self.info[0]['Family'] in [5,6,15]
+
+ def _has_3dnowext(self):
+ return self.is_AMD() and self.info[0]['Family'] in [6,15]
+
+if sys.platform[:5] == 'linux': # variations: linux2,linux-i386 (any others?)
+ cpuinfo = linux_cpuinfo
+elif sys.platform[:4] == 'irix':
+ cpuinfo = irix_cpuinfo
+elif sys.platform == 'darwin':
+ cpuinfo = darwin_cpuinfo
+elif sys.platform[:5] == 'sunos':
+ cpuinfo = sunos_cpuinfo
+elif sys.platform[:5] == 'win32':
+ cpuinfo = win32_cpuinfo
+elif sys.platform[:6] == 'cygwin':
+ cpuinfo = linux_cpuinfo
+#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
+else:
+ cpuinfo = cpuinfo_base
+
+cpu = cpuinfo()
+
+if __name__ == "__main__":
+
+ cpu.is_blaa()
+ cpu.is_Intel()
+ cpu.is_Alpha()
+
+ print 'CPU information:',
+ for name in dir(cpuinfo):
+ if name[0]=='_' and name[1]!='_':
+ r = getattr(cpu,name[1:])()
+ if r:
+ if r!=1:
+ print '%s=%s' %(name[1:],r),
+ else:
+ print name[1:],
+ print
diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py
new file mode 100644
index 000000000..6c5554bba
--- /dev/null
+++ b/numpy/distutils/exec_command.py
@@ -0,0 +1,645 @@
+#!/usr/bin/env python
+"""
+exec_command
+
+Implements exec_command function that is (almost) equivalent to
+commands.getstatusoutput function but on NT, DOS systems the
+returned status is actually correct (though, the returned status
+values may be different by a factor). In addition, exec_command
+takes keyword arguments for (re-)defining environment variables.
+
+Provides functions:
+ exec_command --- execute command in a specified directory and
+ in the modified environment.
+ splitcmdline --- inverse of ' '.join(argv)
+ find_executable --- locate a command using info from environment
+ variable PATH. Equivalent to posix `which`
+ command.
+
+Author: Pearu Peterson <pearu@cens.ioc.ee>
+Created: 11 January 2003
+
+Requires: Python 2.x
+
+Succesfully tested on:
+ os.name | sys.platform | comments
+ --------+--------------+----------
+ posix | linux2 | Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
+ PyCrust 0.9.3, Idle 1.0.2
+ posix | linux2 | Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
+ posix | sunos5 | SunOS 5.9, Python 2.2, 2.3.2
+ posix | darwin | Darwin 7.2.0, Python 2.3
+ nt | win32 | Windows Me
+ Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
+ Python 2.1.1 Idle 0.8
+ nt | win32 | Windows 98, Python 2.1.1. Idle 0.8
+ nt | win32 | Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
+ fail i.e. redefining environment variables may
+ not work. FIXED: don't use cygwin echo!
+ Comment: also `cmd /c echo` will not work
+ but redefining environment variables do work.
+ posix | cygwin | Cygwin 98-4.10, Python 2.3.3(cygming special)
+ nt | win32 | Windows XP, Python 2.3.3
+
+Known bugs:
+- Tests, that send messages to stderr, fail when executed from MSYS prompt
+ because the messages are lost at some point.
+"""
+
+__all__ = ['exec_command','find_executable']
+
+import os
+import re
+import sys
+import tempfile
+
+############################################################
+
+from log import _global_log as log
+
+############################################################
+
+def get_pythonexe():
+ pythonexe = sys.executable
+ if os.name in ['nt','dos']:
+ fdir,fn = os.path.split(pythonexe)
+ fn = fn.upper().replace('PYTHONW','PYTHON')
+ pythonexe = os.path.join(fdir,fn)
+ assert os.path.isfile(pythonexe),`pythonexe`+' is not a file'
+ return pythonexe
+
+############################################################
+
+def splitcmdline(line):
+ """ Inverse of ' '.join(sys.argv).
+ """
+ log.debug('splitcmdline(%r)' % (line))
+ lst = []
+ flag = 0
+ s,pc,cc = '','',''
+ for nc in line+' ':
+ if flag==0:
+ flag = (pc != '\\' and \
+ ((cc=='"' and 1) or (cc=="'" and 2) or \
+ (cc==' ' and pc!=' ' and -2))) or flag
+ elif flag==1:
+ flag = (cc=='"' and pc!='\\' and nc==' ' and -1) or flag
+ elif flag==2:
+ flag = (cc=="'" and pc!='\\' and nc==' ' and -1) or flag
+ if flag!=-2:
+ s += cc
+ if flag<0:
+ flag = 0
+ s = s.strip()
+ if s:
+ lst.append(s)
+ s = ''
+ pc,cc = cc,nc
+ else:
+ s = s.strip()
+ if s:
+ lst.append(s)
+ log.debug('splitcmdline -> %r' % (lst))
+ return lst
+
+def test_splitcmdline():
+ l = splitcmdline('a b cc')
+ assert l==['a','b','cc'],`l`
+ l = splitcmdline('a')
+ assert l==['a'],`l`
+ l = splitcmdline('a " b cc"')
+ assert l==['a','" b cc"'],`l`
+ l = splitcmdline('"a bcc" -h')
+ assert l==['"a bcc"','-h'],`l`
+ l = splitcmdline(r'"\"a \" bcc" -h')
+ assert l==[r'"\"a \" bcc"','-h'],`l`
+ l = splitcmdline(" 'a bcc' -h")
+ assert l==["'a bcc'",'-h'],`l`
+ l = splitcmdline(r"'\'a \' bcc' -h")
+ assert l==[r"'\'a \' bcc'",'-h'],`l`
+
+############################################################
+
+def find_executable(exe, path=None):
+ """ Return full path of a executable.
+ """
+ log.debug('find_executable(%r)' % exe)
+ orig_exe = exe
+ if path is None:
+ path = os.environ.get('PATH',os.defpath)
+ if os.name=='posix' and sys.version[:3]>'2.1':
+ realpath = os.path.realpath
+ else:
+ realpath = lambda a:a
+ if exe[0]=='"':
+ exe = exe[1:-1]
+ suffices = ['']
+ if os.name in ['nt','dos','os2']:
+ fn,ext = os.path.splitext(exe)
+ extra_suffices = ['.exe','.com','.bat']
+ if ext.lower() not in extra_suffices:
+ suffices = extra_suffices
+ if os.path.isabs(exe):
+ paths = ['']
+ else:
+ paths = map(os.path.abspath, path.split(os.pathsep))
+ if 0 and os.name == 'nt':
+ new_paths = []
+ cygwin_paths = []
+ for path in paths:
+ d,p = os.path.splitdrive(path)
+ if p.lower().find('cygwin') >= 0:
+ cygwin_paths.append(path)
+ else:
+ new_paths.append(path)
+ paths = new_paths + cygwin_paths
+ for path in paths:
+ fn = os.path.join(path,exe)
+ for s in suffices:
+ f_ext = fn+s
+ if not os.path.islink(f_ext):
+ # see comment below.
+ f_ext = realpath(f_ext)
+ if os.path.isfile(f_ext) and os.access(f_ext,os.X_OK):
+ log.debug('Found executable %s' % f_ext)
+ return f_ext
+ if os.path.islink(exe):
+ # Don't follow symbolic links. E.g. when using colorgcc then
+ # gcc -> /usr/bin/colorgcc
+ # g77 -> /usr/bin/colorgcc
+ pass
+ else:
+ exe = realpath(exe)
+ if not os.path.isfile(exe) or os.access(exe,os.X_OK):
+ log.warn('Could not locate executable %s' % orig_exe)
+ return orig_exe
+ return exe
+
+############################################################
+
+def _preserve_environment( names ):
+ log.debug('_preserve_environment(%r)' % (names))
+ env = {}
+ for name in names:
+ env[name] = os.environ.get(name)
+ return env
+
+def _update_environment( **env ):
+ log.debug('_update_environment(...)')
+ for name,value in env.items():
+ os.environ[name] = value or ''
+
+def exec_command( command,
+ execute_in='', use_shell=None, use_tee = None,
+ _with_python = 1,
+ **env ):
+ """ Return (status,output) of executed command.
+
+ command is a concatenated string of executable and arguments.
+ The output contains both stdout and stderr messages.
+ The following special keyword arguments can be used:
+ use_shell - execute `sh -c command`
+ use_tee - pipe the output of command through tee
+ execute_in - before command `cd execute_in` and after `cd -`.
+
+ On NT, DOS systems the returned status is correct for external commands.
+ Wild cards will not work for non-posix systems or when use_shell=0.
+ """
+ log.debug('exec_command(%r,%s)' % (command,\
+ ','.join(['%s=%r'%kv for kv in env.items()])))
+
+ if use_tee is None:
+ use_tee = os.name=='posix'
+ if use_shell is None:
+ use_shell = os.name=='posix'
+ execute_in = os.path.abspath(execute_in)
+ oldcwd = os.path.abspath(os.getcwd())
+
+ if __name__[-12:] == 'exec_command':
+ exec_dir = os.path.dirname(os.path.abspath(__file__))
+ elif os.path.isfile('exec_command.py'):
+ exec_dir = os.path.abspath('.')
+ else:
+ exec_dir = os.path.abspath(sys.argv[0])
+ if os.path.isfile(exec_dir):
+ exec_dir = os.path.dirname(exec_dir)
+
+ if oldcwd!=execute_in:
+ os.chdir(execute_in)
+ log.debug('New cwd: %s' % execute_in)
+ else:
+ log.debug('Retaining cwd: %s' % oldcwd)
+
+ oldenv = _preserve_environment( env.keys() )
+ _update_environment( **env )
+
+ try:
+ # _exec_command is robust but slow, it relies on
+ # usable sys.std*.fileno() descriptors. If they
+ # are bad (like in win32 Idle, PyCrust environments)
+ # then _exec_command_python (even slower)
+ # will be used as a last resort.
+ #
+ # _exec_command_posix uses os.system and is faster
+ # but not on all platforms os.system will return
+ # a correct status.
+ if _with_python and (0 or sys.__stdout__.fileno()==-1):
+ st = _exec_command_python(command,
+ exec_command_dir = exec_dir,
+ **env)
+ elif os.name=='posix':
+ st = _exec_command_posix(command,
+ use_shell=use_shell,
+ use_tee=use_tee,
+ **env)
+ else:
+ st = _exec_command(command, use_shell=use_shell,
+ use_tee=use_tee,**env)
+ finally:
+ if oldcwd!=execute_in:
+ os.chdir(oldcwd)
+ log.debug('Restored cwd to %s' % oldcwd)
+ _update_environment(**oldenv)
+
+ return st
+
+def _exec_command_posix( command,
+ use_shell = None,
+ use_tee = None,
+ **env ):
+ log.debug('_exec_command_posix(...)')
+
+ if type(command) is type([]):
+ command_str = ' '.join(command)
+ else:
+ command_str = command
+
+ tmpfile = tempfile.mktemp()
+ stsfile = None
+ if use_tee:
+ stsfile = tempfile.mktemp()
+ filter = ''
+ if use_tee == 2:
+ filter = r'| tr -cd "\n" | tr "\n" "."; echo'
+ command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\
+ % (command_str,stsfile,tmpfile,filter)
+ else:
+ stsfile = tempfile.mktemp()
+ command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\
+ % (command_str,stsfile,tmpfile)
+ #command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile)
+
+ log.debug('Running os.system(%r)' % (command_posix))
+ status = os.system(command_posix)
+
+ if use_tee:
+ if status:
+ # if command_tee fails then fall back to robust exec_command
+ log.warn('_exec_command_posix failed (status=%s)' % status)
+ return _exec_command(command, use_shell=use_shell, **env)
+
+ if stsfile is not None:
+ f = open(stsfile,'r')
+ status_text = f.read()
+ status = int(status_text)
+ f.close()
+ os.remove(stsfile)
+
+ f = open(tmpfile,'r')
+ text = f.read()
+ f.close()
+ os.remove(tmpfile)
+
+ if text[-1:]=='\n':
+ text = text[:-1]
+
+ return status, text
+
+
+def _exec_command_python(command,
+ exec_command_dir='', **env):
+ log.debug('_exec_command_python(...)')
+
+ python_exe = get_pythonexe()
+ cmdfile = tempfile.mktemp()
+ stsfile = tempfile.mktemp()
+ outfile = tempfile.mktemp()
+
+ f = open(cmdfile,'w')
+ f.write('import os\n')
+ f.write('import sys\n')
+ f.write('sys.path.insert(0,%r)\n' % (exec_command_dir))
+ f.write('from exec_command import exec_command\n')
+ f.write('del sys.path[0]\n')
+ f.write('cmd = %r\n' % command)
+ f.write('os.environ = %r\n' % (os.environ))
+ f.write('s,o = exec_command(cmd, _with_python=0, **%r)\n' % (env))
+ f.write('f=open(%r,"w")\nf.write(str(s))\nf.close()\n' % (stsfile))
+ f.write('f=open(%r,"w")\nf.write(o)\nf.close()\n' % (outfile))
+ f.close()
+
+ cmd = '%s %s' % (python_exe, cmdfile)
+ status = os.system(cmd)
+ assert not status,`cmd`+' failed'
+ os.remove(cmdfile)
+
+ f = open(stsfile,'r')
+ status = int(f.read())
+ f.close()
+ os.remove(stsfile)
+
+ f = open(outfile,'r')
+ text = f.read()
+ f.close()
+ os.remove(outfile)
+
+ return status, text
+
+def quote_arg(arg):
+ if arg[0]!='"' and ' ' in arg:
+ return '"%s"' % arg
+ return arg
+
+def _exec_command( command, use_shell=None, use_tee = None, **env ):
+ log.debug('_exec_command(...)')
+
+ if use_shell is None:
+ use_shell = os.name=='posix'
+ if use_tee is None:
+ use_tee = os.name=='posix'
+
+ using_command = 0
+ if use_shell:
+ # We use shell (unless use_shell==0) so that wildcards can be
+ # used.
+ sh = os.environ.get('SHELL','/bin/sh')
+ if type(command) is type([]):
+ argv = [sh,'-c',' '.join(command)]
+ else:
+ argv = [sh,'-c',command]
+ else:
+ # On NT, DOS we avoid using command.com as it's exit status is
+ # not related to the exit status of a command.
+ if type(command) is type([]):
+ argv = command[:]
+ else:
+ argv = splitcmdline(command)
+
+ if hasattr(os,'spawnvpe'):
+ spawn_command = os.spawnvpe
+ else:
+ spawn_command = os.spawnve
+ argv[0] = find_executable(argv[0])
+ if not os.path.isfile(argv[0]):
+ log.warn('Executable %s does not exist' % (argv[0]))
+ if os.name in ['nt','dos']:
+ # argv[0] might be internal command
+ argv = [os.environ['COMSPEC'],'/C'] + argv
+ using_command = 1
+
+ # sys.__std*__ is used instead of sys.std* because environments
+ # like IDLE, PyCrust, etc overwrite sys.std* commands.
+ so_fileno = sys.__stdout__.fileno()
+ se_fileno = sys.__stderr__.fileno()
+ so_flush = sys.__stdout__.flush
+ se_flush = sys.__stderr__.flush
+ so_dup = os.dup(so_fileno)
+ se_dup = os.dup(se_fileno)
+
+ outfile = tempfile.mktemp()
+ fout = open(outfile,'w')
+ if using_command:
+ errfile = tempfile.mktemp()
+ ferr = open(errfile,'w')
+
+ log.debug('Running %s(%s,%r,%r,os.environ)' \
+ % (spawn_command.__name__,os.P_WAIT,argv[0],argv))
+
+ argv0 = argv[0]
+ if not using_command:
+ argv[0] = quote_arg(argv0)
+
+ so_flush()
+ se_flush()
+ os.dup2(fout.fileno(),so_fileno)
+ if using_command:
+ #XXX: disabled for now as it does not work from cmd under win32.
+ # Tests fail on msys
+ os.dup2(ferr.fileno(),se_fileno)
+ else:
+ os.dup2(fout.fileno(),se_fileno)
+ try:
+ status = spawn_command(os.P_WAIT,argv0,argv,os.environ)
+ except OSError,errmess:
+ status = 999
+ sys.stderr.write('%s: %s'%(errmess,argv[0]))
+
+ so_flush()
+ se_flush()
+ os.dup2(so_dup,so_fileno)
+ os.dup2(se_dup,se_fileno)
+
+ fout.close()
+ fout = open(outfile,'r')
+ text = fout.read()
+ fout.close()
+ os.remove(outfile)
+
+ if using_command:
+ ferr.close()
+ ferr = open(errfile,'r')
+ errmess = ferr.read()
+ ferr.close()
+ os.remove(errfile)
+ if errmess and not status:
+ # Not sure how to handle the case where errmess
+ # contains only warning messages and that should
+ # not be treated as errors.
+ #status = 998
+ if text:
+ text = text + '\n'
+ #text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess)
+ text = text + errmess
+ print errmess
+ if text[-1:]=='\n':
+ text = text[:-1]
+ if status is None:
+ status = 0
+
+ if use_tee:
+ print text
+
+ return status, text
+
+
+def test_nt(**kws):
+ pythonexe = get_pythonexe()
+ echo = find_executable('echo')
+ using_cygwin_echo = echo != 'echo'
+ if using_cygwin_echo:
+ log.warn('Using cygwin echo in win32 environment is not supported')
+
+ s,o=exec_command(pythonexe\
+ +' -c "import os;print os.environ.get(\'AAA\',\'\')"')
+ assert s==0 and o=='',(s,o)
+
+ s,o=exec_command(pythonexe\
+ +' -c "import os;print os.environ.get(\'AAA\')"',
+ AAA='Tere')
+ assert s==0 and o=='Tere',(s,o)
+
+ os.environ['BBB'] = 'Hi'
+ s,o=exec_command(pythonexe\
+ +' -c "import os;print os.environ.get(\'BBB\',\'\')"')
+ assert s==0 and o=='Hi',(s,o)
+
+ s,o=exec_command(pythonexe\
+ +' -c "import os;print os.environ.get(\'BBB\',\'\')"',
+ BBB='Hey')
+ assert s==0 and o=='Hey',(s,o)
+
+ s,o=exec_command(pythonexe\
+ +' -c "import os;print os.environ.get(\'BBB\',\'\')"')
+ assert s==0 and o=='Hi',(s,o)
+ elif 0:
+ s,o=exec_command('echo Hello')
+ assert s==0 and o=='Hello',(s,o)
+
+ s,o=exec_command('echo a%AAA%')
+ assert s==0 and o=='a',(s,o)
+
+ s,o=exec_command('echo a%AAA%',AAA='Tere')
+ assert s==0 and o=='aTere',(s,o)
+
+ os.environ['BBB'] = 'Hi'
+ s,o=exec_command('echo a%BBB%')
+ assert s==0 and o=='aHi',(s,o)
+
+ s,o=exec_command('echo a%BBB%',BBB='Hey')
+ assert s==0 and o=='aHey', (s,o)
+ s,o=exec_command('echo a%BBB%')
+ assert s==0 and o=='aHi',(s,o)
+
+ s,o=exec_command('this_is_not_a_command')
+ assert s and o!='',(s,o)
+
+ s,o=exec_command('type not_existing_file')
+ assert s and o!='',(s,o)
+
+ s,o=exec_command('echo path=%path%')
+ assert s==0 and o!='',(s,o)
+
+ s,o=exec_command('%s -c "import sys;sys.stderr.write(sys.platform)"' \
+ % pythonexe)
+ assert s==0 and o=='win32',(s,o)
+
+ s,o=exec_command('%s -c "raise \'Ignore me.\'"' % pythonexe)
+ assert s==1 and o,(s,o)
+
+ s,o=exec_command('%s -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"'\
+ % pythonexe)
+ assert s==0 and o=='012',(s,o)
+
+ s,o=exec_command('%s -c "import sys;sys.exit(15)"' % pythonexe)
+ assert s==15 and o=='',(s,o)
+
+ s,o=exec_command('%s -c "print \'Heipa\'"' % pythonexe)
+ assert s==0 and o=='Heipa',(s,o)
+
+ print 'ok'
+
+def test_posix(**kws):
+ s,o=exec_command("echo Hello",**kws)
+ assert s==0 and o=='Hello',(s,o)
+
+ s,o=exec_command('echo $AAA',**kws)
+ assert s==0 and o=='',(s,o)
+
+ s,o=exec_command('echo "$AAA"',AAA='Tere',**kws)
+ assert s==0 and o=='Tere',(s,o)
+
+
+ s,o=exec_command('echo "$AAA"',**kws)
+ assert s==0 and o=='',(s,o)
+
+ os.environ['BBB'] = 'Hi'
+ s,o=exec_command('echo "$BBB"',**kws)
+ assert s==0 and o=='Hi',(s,o)
+
+ s,o=exec_command('echo "$BBB"',BBB='Hey',**kws)
+ assert s==0 and o=='Hey',(s,o)
+
+ s,o=exec_command('echo "$BBB"',**kws)
+ assert s==0 and o=='Hi',(s,o)
+
+
+ s,o=exec_command('this_is_not_a_command',**kws)
+ assert s!=0 and o!='',(s,o)
+
+ s,o=exec_command('echo path=$PATH',**kws)
+ assert s==0 and o!='',(s,o)
+
+ s,o=exec_command('python -c "import sys,os;sys.stderr.write(os.name)"',**kws)
+ assert s==0 and o=='posix',(s,o)
+
+ s,o=exec_command('python -c "raise \'Ignore me.\'"',**kws)
+ assert s==1 and o,(s,o)
+
+ s,o=exec_command('python -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"',**kws)
+ assert s==0 and o=='012',(s,o)
+
+ s,o=exec_command('python -c "import sys;sys.exit(15)"',**kws)
+ assert s==15 and o=='',(s,o)
+
+ s,o=exec_command('python -c "print \'Heipa\'"',**kws)
+ assert s==0 and o=='Heipa',(s,o)
+
+ print 'ok'
+
+def test_execute_in(**kws):
+ pythonexe = get_pythonexe()
+ tmpfile = tempfile.mktemp()
+ fn = os.path.basename(tmpfile)
+ tmpdir = os.path.dirname(tmpfile)
+ f = open(tmpfile,'w')
+ f.write('Hello')
+ f.close()
+
+ s,o = exec_command('%s -c "print \'Ignore the following IOError:\','\
+ 'open(%r,\'r\')"' % (pythonexe,fn),**kws)
+ assert s and o!='',(s,o)
+ s,o = exec_command('%s -c "print open(%r,\'r\').read()"' % (pythonexe,fn),
+ execute_in = tmpdir,**kws)
+ assert s==0 and o=='Hello',(s,o)
+ os.remove(tmpfile)
+ print 'ok'
+
+def test_svn(**kws):
+ s,o = exec_command(['svn','status'],**kws)
+ assert s,(s,o)
+ print 'svn ok'
+
+def test_cl(**kws):
+ if os.name=='nt':
+ s,o = exec_command(['cl','/V'],**kws)
+ assert s,(s,o)
+ print 'cl ok'
+
+if os.name=='posix':
+ test = test_posix
+elif os.name in ['nt','dos']:
+ test = test_nt
+else:
+ raise NotImplementedError,'exec_command tests for '+os.name
+
+############################################################
+
+if __name__ == "__main__":
+
+ test_splitcmdline()
+ test(use_tee=0)
+ test(use_tee=1)
+ test_execute_in(use_tee=0)
+ test_execute_in(use_tee=1)
+ test_svn(use_tee=1)
+ test_cl(use_tee=1)
diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py
new file mode 100644
index 000000000..a1cab52f4
--- /dev/null
+++ b/numpy/distutils/extension.py
@@ -0,0 +1,74 @@
+"""distutils.extension
+
+Provides the Extension class, used to describe C/C++ extension
+modules in setup scripts.
+
+Overridden to support f2py.
+"""
+
+__revision__ = "$Id: extension.py,v 1.1 2005/04/09 19:29:34 pearu Exp $"
+
+from distutils.extension import Extension as old_Extension
+
+import re
+cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match
+fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z',re.I).match
+
+class Extension(old_Extension):
+ def __init__ (self, name, sources,
+ include_dirs=None,
+ define_macros=None,
+ undef_macros=None,
+ library_dirs=None,
+ libraries=None,
+ runtime_library_dirs=None,
+ extra_objects=None,
+ extra_compile_args=None,
+ extra_link_args=None,
+ export_symbols=None,
+ swig_opts=None,
+ depends=None,
+ language=None,
+ f2py_options=None,
+ module_dirs=None,
+ ):
+ old_Extension.__init__(self,name, [],
+ include_dirs,
+ define_macros,
+ undef_macros,
+ library_dirs,
+ libraries,
+ runtime_library_dirs,
+ extra_objects,
+ extra_compile_args,
+ extra_link_args,
+ export_symbols)
+ # Avoid assert statements checking that sources contains strings:
+ self.sources = sources
+
+ # Python 2.4 distutils new features
+ self.swig_opts = swig_opts or []
+
+ # Python 2.3 distutils new features
+ self.depends = depends or []
+ self.language = language
+
+ # scipy_distutils features
+ self.f2py_options = f2py_options or []
+ self.module_dirs = module_dirs or []
+
+ return
+
+ def has_cxx_sources(self):
+ for source in self.sources:
+ if cxx_ext_re(str(source)):
+ return True
+ return False
+
+ def has_f2py_sources(self):
+ for source in self.sources:
+ if fortran_pyf_ext_re(source):
+ return True
+ return False
+
+# class Extension
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
new file mode 100644
index 000000000..f7cbc3bad
--- /dev/null
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -0,0 +1,755 @@
+"""scipy.distutils.fcompiler
+
+Contains FCompiler, an abstract base class that defines the interface
+for the scipy.distutils Fortran compiler abstraction model.
+"""
+
+__all__ = ['FCompiler','new_fcompiler','show_fcompilers',
+ 'dummy_fortran_file']
+
+import os
+import sys
+import re
+from types import StringType,NoneType
+from distutils.sysconfig import get_config_var
+from distutils.fancy_getopt import FancyGetopt
+from distutils.errors import DistutilsModuleError,DistutilsArgError,\
+ DistutilsExecError,CompileError,LinkError,DistutilsPlatformError
+from distutils.util import split_quoted
+
+from scipy.distutils.ccompiler import CCompiler, gen_lib_options
+from scipy.distutils import log
+from scipy.distutils.command.config_compiler import config_fc
+from distutils.spawn import _nt_quote_args
+
+class FCompiler(CCompiler):
+ """ Abstract base class to define the interface that must be implemented
+ by real Fortran compiler classes.
+
+ Methods that subclasses may redefine:
+
+ get_version_cmd(), get_linker_so(), get_version()
+ get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()
+ get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),
+ get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),
+ get_flags_arch_f90(), get_flags_debug_f90(),
+ get_flags_fix(), get_flags_linker_so(), get_flags_version()
+
+ DON'T call these methods (except get_version) after
+ constructing a compiler instance or inside any other method.
+ All methods, except get_version_cmd() and get_flags_version(), may
+ call get_version() method.
+
+ After constructing a compiler instance, always call customize(dist=None)
+ method that finalizes compiler construction and makes the following
+ attributes available:
+ compiler_f77
+ compiler_f90
+ compiler_fix
+ linker_so
+ archiver
+ ranlib
+ libraries
+ library_dirs
+ """
+
+
+ language_map = {'.f':'f77',
+ '.for':'f77',
+ '.F':'f77', # XXX: needs preprocessor
+ '.ftn':'f77',
+ '.f77':'f77',
+ '.f90':'f90',
+ '.F90':'f90', # XXX: needs preprocessor
+ '.f95':'f90',
+ }
+ language_order = ['f90','f77']
+
+ version_pattern = None
+
+ executables = {
+ 'version_cmd' : ["f77","-v"],
+ 'compiler_f77' : ["f77"],
+ 'compiler_f90' : ["f90"],
+ 'compiler_fix' : ["f90","-fixed"],
+ 'linker_so' : ["f90","-shared"],
+ 'linker_exe' : ["f90"],
+ 'archiver' : ["ar","-cr"],
+ 'ranlib' : None,
+ }
+
+ compile_switch = "-c"
+ object_switch = "-o " # Ending space matters! It will be stripped
+ # but if it is missing then object_switch
+ # will be prefixed to object file name by
+ # string concatenation.
+ library_switch = "-o " # Ditto!
+
+ # Switch to specify where module files are created and searched
+ # for USE statement. Normally it is a string and also here ending
+ # space matters. See above.
+ module_dir_switch = None
+
+ # Switch to specify where module files are searched for USE statement.
+ module_include_switch = '-I'
+
+ pic_flags = [] # Flags to create position-independent code
+
+ src_extensions = ['.for','.ftn','.f77','.f','.f90','.f95','.F','.F90']
+ obj_extension = ".o"
+ shared_lib_extension = get_config_var('SO') # or .dll
+ static_lib_extension = ".a" # or .lib
+ static_lib_format = "lib%s%s" # or %s%s
+ shared_lib_format = "%s%s"
+ exe_extension = ""
+
+ ######################################################################
+ ## Methods that subclasses may redefine. But don't call these methods!
+ ## They are private to FCompiler class and may return unexpected
+ ## results if used elsewhere. So, you have been warned..
+
+ def get_version_cmd(self):
+ """ Compiler command to print out version information. """
+ f77 = self.executables['compiler_f77']
+ if f77 is not None:
+ f77 = f77[0]
+ cmd = self.executables['version_cmd']
+ if cmd is not None:
+ cmd = cmd[0]
+ if cmd==f77:
+ cmd = self.compiler_f77[0]
+ else:
+ f90 = self.executables['compiler_f90']
+ if f90 is not None:
+ f90 = f90[0]
+ if cmd==f90:
+ cmd = self.compiler_f90[0]
+ return cmd
+
+ def get_linker_so(self):
+ """ Linker command to build shared libraries. """
+ f77 = self.executables['compiler_f77']
+ if f77 is not None:
+ f77 = f77[0]
+ ln = self.executables['linker_so']
+ if ln is not None:
+ ln = ln[0]
+ if ln==f77:
+ ln = self.compiler_f77[0]
+ else:
+ f90 = self.executables['compiler_f90']
+ if f90 is not None:
+ f90 = f90[0]
+ if ln==f90:
+ ln = self.compiler_f90[0]
+ return ln
+
+ def get_linker_exe(self):
+ """ Linker command to build shared libraries. """
+ f77 = self.executables['compiler_f77']
+ if f77 is not None:
+ f77 = f77[0]
+ ln = self.executables.get('linker_exe')
+ if ln is not None:
+ ln = ln[0]
+ if ln==f77:
+ ln = self.compiler_f77[0]
+ else:
+ f90 = self.executables['compiler_f90']
+ if f90 is not None:
+ f90 = f90[0]
+ if ln==f90:
+ ln = self.compiler_f90[0]
+ return ln
+
+ def get_flags(self):
+ """ List of flags common to all compiler types. """
+ return [] + self.pic_flags
+ def get_flags_version(self):
+ """ List of compiler flags to print out version information. """
+ if self.executables['version_cmd']:
+ return self.executables['version_cmd'][1:]
+ return []
+ def get_flags_f77(self):
+ """ List of Fortran 77 specific flags. """
+ if self.executables['compiler_f77']:
+ return self.executables['compiler_f77'][1:]
+ return []
+ def get_flags_f90(self):
+ """ List of Fortran 90 specific flags. """
+ if self.executables['compiler_f90']:
+ return self.executables['compiler_f90'][1:]
+ return []
+ def get_flags_free(self):
+ """ List of Fortran 90 free format specific flags. """
+ return []
+ def get_flags_fix(self):
+ """ List of Fortran 90 fixed format specific flags. """
+ if self.executables['compiler_fix']:
+ return self.executables['compiler_fix'][1:]
+ return []
+ def get_flags_linker_so(self):
+ """ List of linker flags to build a shared library. """
+ if self.executables['linker_so']:
+ return self.executables['linker_so'][1:]
+ return []
+ def get_flags_linker_exe(self):
+ """ List of linker flags to build an executable. """
+ if self.executables['linker_exe']:
+ return self.executables['linker_exe'][1:]
+ return []
+ def get_flags_ar(self):
+ """ List of archiver flags. """
+ if self.executables['archiver']:
+ return self.executables['archiver'][1:]
+ return []
+ def get_flags_opt(self):
+ """ List of architecture independent compiler flags. """
+ return []
+ def get_flags_arch(self):
+ """ List of architecture dependent compiler flags. """
+ return []
+ def get_flags_debug(self):
+ """ List of compiler flags to compile with debugging information. """
+ return []
+
+ get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt
+ get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch
+ get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug
+
+ def get_libraries(self):
+ """ List of compiler libraries. """
+ return self.libraries[:]
+ def get_library_dirs(self):
+ """ List of compiler library directories. """
+ return self.library_dirs[:]
+
+ ############################################################
+
+ ## Public methods:
+
+ def customize(self, dist=None):
+ """ Customize Fortran compiler.
+
+ This method gets Fortran compiler specific information from
+ (i) class definition, (ii) environment, (iii) distutils config
+ files, and (iv) command line.
+
+ This method should be always called after constructing a
+ compiler instance. But not in __init__ because Distribution
+ instance is needed for (iii) and (iv).
+ """
+ log.info('customize %s' % (self.__class__.__name__))
+ if dist is None:
+ # These hooks are for testing only!
+ from distutils.dist import Distribution
+ dist = Distribution()
+ dist.script_name = os.path.basename(sys.argv[0])
+ dist.script_args = ['config_fc'] + sys.argv[1:]
+ dist.cmdclass['config_fc'] = config_fc
+ dist.parse_config_files()
+ dist.parse_command_line()
+ conf = dist.get_option_dict('config_fc')
+ noopt = conf.get('noopt',[None,0])[1]
+ if 0: # change to `if 1:` when making release.
+ # Don't use architecture dependent compiler flags:
+ noarch = 1
+ else:
+ noarch = conf.get('noarch',[None,noopt])[1]
+ debug = conf.get('debug',[None,0])[1]
+
+
+ f77 = self.__get_cmd('compiler_f77','F77',(conf,'f77exec'))
+ f90 = self.__get_cmd('compiler_f90','F90',(conf,'f90exec'))
+ # Temporarily setting f77,f90 compilers so that
+ # version_cmd can use their executables.
+ if f77:
+ self.set_executables(compiler_f77=[f77])
+ if f90:
+ self.set_executables(compiler_f90=[f90])
+
+ # Must set version_cmd before others as self.get_flags*
+ # methods may call self.get_version.
+ vers_cmd = self.__get_cmd(self.get_version_cmd)
+ if vers_cmd:
+ vflags = self.__get_flags(self.get_flags_version)
+ self.set_executables(version_cmd=[vers_cmd]+vflags)
+
+ if f77:
+ f77flags = self.__get_flags(self.get_flags_f77,'F77FLAGS',
+ (conf,'f77flags'))
+ if f90:
+ f90flags = self.__get_flags(self.get_flags_f90,'F90FLAGS',
+ (conf,'f90flags'))
+ freeflags = self.__get_flags(self.get_flags_free,'FREEFLAGS',
+ (conf,'freeflags'))
+ # XXX Assuming that free format is default for f90 compiler.
+ fix = self.__get_cmd('compiler_fix','F90',(conf,'f90exec'))
+ if fix:
+ fixflags = self.__get_flags(self.get_flags_fix) + f90flags
+
+ oflags,aflags,dflags = [],[],[]
+ if not noopt:
+ oflags = self.__get_flags(self.get_flags_opt,'FOPT',(conf,'opt'))
+ if f77 and self.get_flags_opt is not self.get_flags_opt_f77:
+ f77flags += self.__get_flags(self.get_flags_opt_f77)
+ if f90 and self.get_flags_opt is not self.get_flags_opt_f90:
+ f90flags += self.__get_flags(self.get_flags_opt_f90)
+ if fix and self.get_flags_opt is not self.get_flags_opt_f90:
+ fixflags += self.__get_flags(self.get_flags_opt_f90)
+ if not noarch:
+ aflags = self.__get_flags(self.get_flags_arch,'FARCH',
+ (conf,'arch'))
+ if f77 and self.get_flags_arch is not self.get_flags_arch_f77:
+ f77flags += self.__get_flags(self.get_flags_arch_f77)
+ if f90 and self.get_flags_arch is not self.get_flags_arch_f90:
+ f90flags += self.__get_flags(self.get_flags_arch_f90)
+ if fix and self.get_flags_arch is not self.get_flags_arch_f90:
+ fixflags += self.__get_flags(self.get_flags_arch_f90)
+ if debug:
+ dflags = self.__get_flags(self.get_flags_debug,'FDEBUG')
+ if f77 and self.get_flags_debug is not self.get_flags_debug_f77:
+ f77flags += self.__get_flags(self.get_flags_debug_f77)
+ if f90 and self.get_flags_debug is not self.get_flags_debug_f90:
+ f90flags += self.__get_flags(self.get_flags_debug_f90)
+ if fix and self.get_flags_debug is not self.get_flags_debug_f90:
+ fixflags += self.__get_flags(self.get_flags_debug_f90)
+
+ fflags = self.__get_flags(self.get_flags,'FFLAGS') \
+ + dflags + oflags + aflags
+
+ if f77:
+ self.set_executables(compiler_f77=[f77]+f77flags+fflags)
+ if f90:
+ self.set_executables(compiler_f90=[f90]+freeflags+f90flags+fflags)
+ if fix:
+ self.set_executables(compiler_fix=[fix]+fixflags+fflags)
+ #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
+ linker_so = self.__get_cmd(self.get_linker_so,'LDSHARED')
+ if linker_so:
+ linker_so_flags = self.__get_flags(self.get_flags_linker_so,'LDFLAGS')
+ self.set_executables(linker_so=[linker_so]+linker_so_flags)
+
+ linker_exe = self.__get_cmd(self.get_linker_exe,'LD')
+ if linker_exe:
+ linker_exe_flags = self.__get_flags(self.get_flags_linker_exe,'LDFLAGS')
+ self.set_executables(linker_exe=[linker_exe]+linker_exe_flags)
+ ar = self.__get_cmd('archiver','AR')
+ if ar:
+ arflags = self.__get_flags(self.get_flags_ar,'ARFLAGS')
+ self.set_executables(archiver=[ar]+arflags)
+
+ ranlib = self.__get_cmd('ranlib','RANLIB')
+ if ranlib:
+ self.set_executables(ranlib=[ranlib])
+
+ self.set_library_dirs(self.get_library_dirs())
+ self.set_libraries(self.get_libraries())
+
+
+ verbose = conf.get('verbose',[None,0])[1]
+ if verbose:
+ self.dump_properties()
+ return
+
+ def dump_properties(self):
+ """ Print out the attributes of a compiler instance. """
+ props = []
+ for key in self.executables.keys() + \
+ ['version','libraries','library_dirs',
+ 'object_switch','compile_switch']:
+ if hasattr(self,key):
+ v = getattr(self,key)
+ props.append((key, None, '= '+`v`))
+ props.sort()
+
+ pretty_printer = FancyGetopt(props)
+ for l in pretty_printer.generate_help("%s instance properties:" \
+ % (self.__class__.__name__)):
+ if l[:4]==' --':
+ l = ' ' + l[4:]
+ print l
+ return
+
+ ###################
+
+ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ """Compile 'src' to product 'obj'."""
+ if is_f_file(src) and not has_f90_header(src):
+ flavor = ':f77'
+ compiler = self.compiler_f77
+ elif is_free_format(src):
+ flavor = ':f90'
+ compiler = self.compiler_f90
+ if compiler is None:
+ raise DistutilsExecError, 'f90 not supported by '\
+ +self.__class__.__name__
+ else:
+ flavor = ':fix'
+ compiler = self.compiler_fix
+ if compiler is None:
+ raise DistutilsExecError, 'f90 (fixed) not supported by '\
+ +self.__class__.__name__
+ if self.object_switch[-1]==' ':
+ o_args = [self.object_switch.strip(),obj]
+ else:
+ o_args = [self.object_switch.strip()+obj]
+
+ assert self.compile_switch.strip()
+ s_args = [self.compile_switch, src]
+
+ if os.name == 'nt':
+ compiler = _nt_quote_args(compiler)
+ command = compiler + cc_args + s_args + o_args + extra_postargs
+
+ display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,
+ src)
+ try:
+ self.spawn(command,display=display)
+ except DistutilsExecError, msg:
+ raise CompileError, msg
+
+ return
+
+ def module_options(self, module_dirs, module_build_dir):
+ options = []
+ if self.module_dir_switch is not None:
+ if self.module_dir_switch[-1]==' ':
+ options.extend([self.module_dir_switch.strip(),module_build_dir])
+ else:
+ options.append(self.module_dir_switch.strip()+module_build_dir)
+ else:
+ print 'XXX: module_build_dir=%r option ignored' % (module_build_dir)
+ print 'XXX: Fix module_dir_switch for ',self.__class__.__name__
+ if self.module_include_switch is not None:
+ for d in [module_build_dir]+module_dirs:
+ options.append('%s%s' % (self.module_include_switch, d))
+ else:
+ print 'XXX: module_dirs=%r option ignored' % (module_dirs)
+ print 'XXX: Fix module_include_switch for ',self.__class__.__name__
+ return options
+
+ def library_option(self, lib):
+ return "-l" + lib
+ def library_dir_option(self, dir):
+ return "-L" + dir
+
+ def link(self, target_desc, objects,
+ output_filename, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None,
+ export_symbols=None, debug=0, extra_preargs=None,
+ extra_postargs=None, build_temp=None, target_lang=None):
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+ libraries, library_dirs, runtime_library_dirs = \
+ self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
+
+ lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
+ libraries)
+ if type(output_dir) not in (StringType, NoneType):
+ raise TypeError, "'output_dir' must be a string or None"
+ if output_dir is not None:
+ output_filename = os.path.join(output_dir, output_filename)
+
+ if self._need_link(objects, output_filename):
+ if self.library_switch[-1]==' ':
+ o_args = [self.library_switch.strip(),output_filename]
+ else:
+ o_args = [self.library_switch.strip()+output_filename]
+
+ if type(self.objects) is type(''):
+ ld_args = objects + [self.objects]
+ else:
+ ld_args = objects + self.objects
+ ld_args = ld_args + lib_opts + o_args
+ if debug:
+ ld_args[:0] = ['-g']
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+ self.mkpath(os.path.dirname(output_filename))
+ if target_desc == CCompiler.EXECUTABLE:
+ linker = self.linker_exe[:]
+ else:
+ linker = self.linker_so[:]
+ if os.name == 'nt':
+ linker = _nt_quote_args(linker)
+ command = linker + ld_args
+ try:
+ self.spawn(command)
+ except DistutilsExecError, msg:
+ raise LinkError, msg
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+ return
+
+
+ ## Private methods:
+
+ def __get_cmd(self, command, envvar=None, confvar=None):
+ if command is None:
+ var = None
+ elif type(command) is type(''):
+ var = self.executables[command]
+ if var is not None:
+ var = var[0]
+ else:
+ var = command()
+ if envvar is not None:
+ var = os.environ.get(envvar, var)
+ if confvar is not None:
+ var = confvar[0].get(confvar[1], [None,var])[1]
+ return var
+
+ def __get_flags(self, command, envvar=None, confvar=None):
+ if command is None:
+ var = []
+ elif type(command) is type(''):
+ var = self.executables[command][1:]
+ else:
+ var = command()
+ if envvar is not None:
+ var = os.environ.get(envvar, var)
+ if confvar is not None:
+ var = confvar[0].get(confvar[1], [None,var])[1]
+ if type(var) is type(''):
+ var = split_quoted(var)
+ return var
+
+ ## class FCompiler
+
+fcompiler_class = {'gnu':('gnu','GnuFCompiler',
+ "GNU Fortran Compiler"),
+ 'gnu95':('gnu','Gnu95FCompiler',
+ "GNU 95 Fortran Compiler"),
+ 'g95':('g95','G95FCompiler',
+ "GNU Fortran 95 Compiler"),
+ 'pg':('pg','PGroupFCompiler',
+ "Portland Group Fortran Compiler"),
+ 'absoft':('absoft','AbsoftFCompiler',
+ "Absoft Corp Fortran Compiler"),
+ 'mips':('mips','MipsFCompiler',
+ "MIPSpro Fortran Compiler"),
+ 'sun':('sun','SunFCompiler',
+ "Sun|Forte Fortran 95 Compiler"),
+ 'intel':('intel','IntelFCompiler',
+ "Intel Fortran Compiler for 32-bit apps"),
+ 'intelv':('intel','IntelVisualFCompiler',
+ "Intel Visual Fortran Compiler for 32-bit apps"),
+ 'intele':('intel','IntelItaniumFCompiler',
+ "Intel Fortran Compiler for Itanium apps"),
+ 'intelev':('intel','IntelItaniumVisualFCompiler',
+ "Intel Visual Fortran Compiler for Itanium apps"),
+ 'nag':('nag','NAGFCompiler',
+ "NAGWare Fortran 95 Compiler"),
+ 'compaq':('compaq','CompaqFCompiler',
+ "Compaq Fortran Compiler"),
+ 'compaqv':('compaq','CompaqVisualFCompiler',
+ "DIGITAL|Compaq Visual Fortran Compiler"),
+ 'vast':('vast','VastFCompiler',
+ "Pacific-Sierra Research Fortran 90 Compiler"),
+ 'hpux':('hpux','HPUXFCompiler',
+ "HP Fortran 90 Compiler"),
+ 'lahey':('lahey','LaheyFCompiler',
+ "Lahey/Fujitsu Fortran 95 Compiler"),
+ 'ibm':('ibm','IbmFCompiler',
+ "IBM XL Fortran Compiler"),
+ 'f':('f','FFCompiler',
+ "Fortran Company/NAG F Compiler"),
+ 'none':('none','NoneFCompiler',"Fake Fortran compiler")
+ }
+
+_default_compilers = (
+ # Platform mappings
+ ('win32',('gnu','intelv','absoft','compaqv','intelev','gnu95','g95')),
+ ('cygwin.*',('gnu','intelv','absoft','compaqv','intelev','gnu95','g95')),
+ ('linux.*',('gnu','intel','lahey','pg','absoft','nag','vast','compaq',
+ 'intele','gnu95','g95')),
+ ('darwin.*',('nag','absoft','ibm','gnu','gnu95','g95')),
+ ('sunos.*',('sun','gnu','gnu95','g95')),
+ ('irix.*',('mips','gnu','gnu95',)),
+ ('aix.*',('ibm','gnu','gnu95',)),
+ # OS mappings
+ ('posix',('gnu','gnu95',)),
+ ('nt',('gnu','gnu95',)),
+ ('mac',('gnu','gnu95',)),
+ )
+
+def _find_existing_fcompiler(compilers, osname=None, platform=None):
+ for compiler in compilers:
+ v = None
+ try:
+ c = new_fcompiler(plat=platform, compiler=compiler)
+ c.customize()
+ v = c.get_version()
+ except DistutilsModuleError:
+ pass
+ except Exception, msg:
+ log.warn(msg)
+ if v is not None:
+ return compiler
+ return
+
+def get_default_fcompiler(osname=None, platform=None):
+ """ Determine the default Fortran compiler to use for the given platform. """
+ if osname is None:
+ osname = os.name
+ if platform is None:
+ platform = sys.platform
+ matching_compilers = []
+ for pattern, compiler in _default_compilers:
+ if re.match(pattern, platform) is not None or \
+ re.match(pattern, osname) is not None:
+ if type(compiler) is type(()):
+ matching_compilers.extend(list(compiler))
+ else:
+ matching_compilers.append(compiler)
+ if not matching_compilers:
+ matching_compilers.append('gnu')
+ compiler = _find_existing_fcompiler(matching_compilers,
+ osname=osname,
+ platform=platform)
+ if compiler is not None:
+ return compiler
+ return matching_compilers[0]
+
+def new_fcompiler(plat=None,
+ compiler=None,
+ verbose=0,
+ dry_run=0,
+ force=0):
+ """ Generate an instance of some FCompiler subclass for the supplied
+ platform/compiler combination.
+ """
+ if plat is None:
+ plat = os.name
+ try:
+ if compiler is None:
+ compiler = get_default_fcompiler(plat)
+ (module_name, class_name, long_description) = fcompiler_class[compiler]
+ except KeyError:
+ msg = "don't know how to compile Fortran code on platform '%s'" % plat
+ if compiler is not None:
+ msg = msg + " with '%s' compiler." % compiler
+ msg = msg + " Supported compilers are: %s)" \
+ % (','.join(fcompiler_class.keys()))
+ raise DistutilsPlatformError, msg
+
+ try:
+ module_name = 'scipy.distutils.fcompiler.'+module_name
+ __import__ (module_name)
+ module = sys.modules[module_name]
+ klass = vars(module)[class_name]
+ except ImportError:
+ raise DistutilsModuleError, \
+ "can't compile Fortran code: unable to load module '%s'" % \
+ module_name
+ except KeyError:
+ raise DistutilsModuleError, \
+ ("can't compile Fortran code: unable to find class '%s' " +
+ "in module '%s'") % (class_name, module_name)
+ compiler = klass(None, dry_run, force)
+ log.debug('new_fcompiler returns %s' % (klass))
+ return compiler
+
+def show_fcompilers(dist = None):
+ """ Print list of available compilers (used by the "--help-fcompiler"
+ option to "config_fc").
+ """
+ if dist is None:
+ from distutils.dist import Distribution
+ dist = Distribution()
+ dist.script_name = os.path.basename(sys.argv[0])
+ dist.script_args = ['config_fc'] + sys.argv[1:]
+ dist.cmdclass['config_fc'] = config_fc
+ dist.parse_config_files()
+ dist.parse_command_line()
+
+ compilers = []
+ compilers_na = []
+ compilers_ni = []
+ for compiler in fcompiler_class.keys():
+ v = 'N/A'
+ try:
+ c = new_fcompiler(compiler=compiler)
+ c.customize(dist)
+ v = c.get_version()
+ except DistutilsModuleError:
+ pass
+ except Exception, msg:
+ log.warn(msg)
+ if v is None:
+ compilers_na.append(("fcompiler="+compiler, None,
+ fcompiler_class[compiler][2]))
+ elif v=='N/A':
+ compilers_ni.append(("fcompiler="+compiler, None,
+ fcompiler_class[compiler][2]))
+ else:
+ compilers.append(("fcompiler="+compiler, None,
+ fcompiler_class[compiler][2] + ' (%s)' % v))
+
+ compilers.sort()
+ compilers_na.sort()
+ pretty_printer = FancyGetopt(compilers)
+ pretty_printer.print_help("List of available Fortran compilers:")
+ pretty_printer = FancyGetopt(compilers_na)
+ pretty_printer.print_help("List of unavailable Fortran compilers:")
+ if compilers_ni:
+ pretty_printer = FancyGetopt(compilers_ni)
+ pretty_printer.print_help("List of unimplemented Fortran compilers:")
+ print "For compiler details, run 'config_fc --verbose' setup command."
+
+def dummy_fortran_file():
+ import atexit
+ import tempfile
+ dummy_name = tempfile.mktemp()+'__dummy'
+ dummy = open(dummy_name+'.f','w')
+ dummy.write(" subroutine dummy()\n end\n")
+ dummy.close()
+ def rm_file(name=dummy_name,log_threshold=log._global_log.threshold):
+ save_th = log._global_log.threshold
+ log.set_threshold(log_threshold)
+ try: os.remove(name+'.f'); log.debug('removed '+name+'.f')
+ except OSError: pass
+ try: os.remove(name+'.o'); log.debug('removed '+name+'.o')
+ except OSError: pass
+ log.set_threshold(save_th)
+ atexit.register(rm_file)
+ return dummy_name
+
+is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match
+_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search
+_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search
+_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search
+_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]',re.I).match
+def is_free_format(file):
+ """Check if file is in free format Fortran."""
+ # f90 allows both fixed and free format, assuming fixed unless
+ # signs of free format are detected.
+ result = 0
+ f = open(file,'r')
+ line = f.readline()
+ n = 15 # the number of non-comment lines to scan for hints
+ if _has_f_header(line):
+ n = 0
+ elif _has_f90_header(line):
+ n = 0
+ result = 1
+ while n>0 and line:
+ if line[0]!='!':
+ n -= 1
+ if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-2:-1]=='&':
+ result = 1
+ break
+ line = f.readline()
+ f.close()
+ return result
+
+def has_f90_header(src):
+ f = open(src,'r')
+ line = f.readline()
+ f.close()
+ return _has_f90_header(line) or _has_fix_header(line)
+
+if __name__ == '__main__':
+ show_fcompilers()
diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py
new file mode 100644
index 000000000..51c3548db
--- /dev/null
+++ b/numpy/distutils/fcompiler/absoft.py
@@ -0,0 +1,137 @@
+
+# http://www.absoft.com/literature/osxuserguide.pdf
+# http://www.absoft.com/documentation.html
+
+# Notes:
+# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
+# generated extension modules (works for f2py v2.45.241_1936 and up)
+
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler, dummy_fortran_file
+from scipy.distutils.misc_util import cyg2win32
+
+class AbsoftFCompiler(FCompiler):
+
+ compiler_type = 'absoft'
+ #version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\s*,]*).*?Absoft Corp'
+ version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version))'+\
+ r' (?P<version>[^\s*,]*)(.*?Absoft Corp|)'
+
+ # samt5735(8)$ f90 -V -c dummy.f
+ # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0
+ # Note that fink installs g77 as f77, so need to use f90 for detection.
+
+ executables = {
+ 'version_cmd' : ["f90", "-V -c %(fname)s.f -o %(fname)s.o" \
+ % {'fname':cyg2win32(dummy_fortran_file())}],
+ 'compiler_f77' : ["f77"],
+ 'compiler_fix' : ["f90"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["f90"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ if os.name=='nt':
+ library_switch = '/out:' #No space after /out:!
+
+ module_dir_switch = None
+ module_include_switch = '-p'
+
+ def get_flags_linker_so(self):
+ if os.name=='nt':
+ opt = ['/dll']
+ # The "-K shared" switches are being left in for pre-9.0 versions
+ # of Absoft though I don't think versions earlier than 9 can
+ # actually be used to build shared libraries. In fact, version
+ # 8 of Absoft doesn't recognize "-K shared" and will fail.
+ elif self.get_version() >= '9.0':
+ opt = ['-shared']
+ else:
+ opt = ["-K","shared"]
+ return opt
+
+ def library_dir_option(self, dir):
+ if os.name=='nt':
+ return ['-link','/PATH:"%s"' % (dir)]
+ return "-L" + dir
+
+ def library_option(self, lib):
+ if os.name=='nt':
+ return '%s.lib' % (lib)
+ return "-l" + lib
+
+ def get_library_dirs(self):
+ opt = FCompiler.get_library_dirs(self)
+ d = os.environ.get('ABSOFT')
+ if d:
+ opt.append(os.path.join(d,'lib'))
+ return opt
+
+ def get_libraries(self):
+ opt = FCompiler.get_libraries(self)
+ if self.get_version() >= '8.0':
+ opt.extend(['f90math','fio','f77math','U77'])
+ else:
+ opt.extend(['fio','f90math','fmath','U77'])
+ if os.name =='nt':
+ opt.append('COMDLG32')
+ return opt
+
+ def get_flags(self):
+ opt = FCompiler.get_flags(self)
+ if os.name != 'nt':
+ opt.extend(['-s'])
+ if self.get_version():
+ if self.get_version()>='8.2':
+ opt.append('-fpic')
+ return opt
+
+ def get_flags_f77(self):
+ opt = FCompiler.get_flags_f77(self)
+ opt.extend(['-N22','-N90','-N110'])
+ v = self.get_version()
+ if os.name == 'nt':
+ if v and v>='8.0':
+ opt.extend(['-f','-N15'])
+ else:
+ opt.append('-f')
+ if v:
+ if v<='4.6':
+ opt.append('-B108')
+ else:
+ # Though -N15 is undocumented, it works with
+ # Absoft 8.0 on Linux
+ opt.append('-N15')
+ return opt
+
+ def get_flags_f90(self):
+ opt = FCompiler.get_flags_f90(self)
+ opt.extend(["-YCFRL=1","-YCOM_NAMES=LCS","-YCOM_PFX","-YEXT_PFX",
+ "-YCOM_SFX=_","-YEXT_SFX=_","-YEXT_NAMES=LCS"])
+ if self.get_version():
+ if self.get_version()>'4.6':
+ opt.extend(["-YDEALLOC=ALL"])
+ return opt
+
+ def get_flags_fix(self):
+ opt = FCompiler.get_flags_fix(self)
+ opt.extend(["-YCFRL=1","-YCOM_NAMES=LCS","-YCOM_PFX","-YEXT_PFX",
+ "-YCOM_SFX=_","-YEXT_SFX=_","-YEXT_NAMES=LCS"])
+ opt.extend(["-f","fixed"])
+ return opt
+
+ def get_flags_opt(self):
+ opt = ['-O']
+ return opt
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='absoft')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py
new file mode 100644
index 000000000..7abb23ae2
--- /dev/null
+++ b/numpy/distutils/fcompiler/compaq.py
@@ -0,0 +1,94 @@
+
+#http://www.compaq.com/fortran/docs/
+
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler
+
+class CompaqFCompiler(FCompiler):
+
+ compiler_type = 'compaq'
+ version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
+
+ if sys.platform[:5]=='linux':
+ fc_exe = 'fort'
+ else:
+ fc_exe = 'f90'
+
+ executables = {
+ 'version_cmd' : [fc_exe, "-version"],
+ 'compiler_f77' : [fc_exe, "-f77rtl","-fixed"],
+ 'compiler_fix' : [fc_exe, "-fixed"],
+ 'compiler_f90' : [fc_exe],
+ 'linker_so' : [fc_exe],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ module_dir_switch = '-module ' # not tested
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ return ['-assume no2underscore','-nomixed_str_len_arg']
+ def get_flags_debug(self):
+ return ['-g','-check bounds']
+ def get_flags_opt(self):
+ return ['-O4','-align dcommons','-assume bigarrays',
+ '-assume nozsize','-math_library fast']
+ def get_flags_arch(self):
+ return ['-arch host', '-tune host']
+ def get_flags_linker_so(self):
+ if sys.platform[:5]=='linux':
+ return ['-shared']
+ return ['-shared','-Wl,-expect_unresolved,*']
+
+class CompaqVisualFCompiler(FCompiler):
+
+ compiler_type = 'compaqv'
+ version_pattern = r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'\
+ ' Version (?P<version>[^\s]*).*'
+
+ compile_switch = '/compile_only'
+ object_switch = '/object:'
+ library_switch = '/OUT:' #No space after /OUT:!
+
+ static_lib_extension = ".lib"
+ static_lib_format = "%s%s"
+ module_dir_switch = '/module:'
+ module_include_switch = '/I'
+
+ ar_exe = 'lib.exe'
+ fc_exe = 'DF'
+ if sys.platform=='win32':
+ from distutils.msvccompiler import MSVCCompiler
+ ar_exe = MSVCCompiler().lib
+
+ executables = {
+ 'version_cmd' : ['DF', "/what"],
+ 'compiler_f77' : ['DF', "/f77rtl","/fixed"],
+ 'compiler_fix' : ['DF', "/fixed"],
+ 'compiler_f90' : ['DF'],
+ 'linker_so' : ['DF'],
+ 'archiver' : [ar_exe, "/OUT:"],
+ 'ranlib' : None
+ }
+
+ def get_flags(self):
+ return ['/nologo','/MD','/WX','/iface=(cref,nomixed_str_len_arg)',
+ '/names:lowercase','/assume:underscore']
+ def get_flags_opt(self):
+ return ['/Ox','/fast','/optimize:5','/unroll:0','/math_library:fast']
+ def get_flags_arch(self):
+ return ['/threads']
+ def get_flags_debug(self):
+ return ['/debug']
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='compaq')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/g95.py b/numpy/distutils/fcompiler/g95.py
new file mode 100644
index 000000000..a3bf374a8
--- /dev/null
+++ b/numpy/distutils/fcompiler/g95.py
@@ -0,0 +1,41 @@
+# http://g95.sourceforge.net/
+
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler
+
+class G95FCompiler(FCompiler):
+
+ compiler_type = 'g95'
+ version_pattern = r'G95.*\(experimental\) \(g95!\) (?P<version>.*)\).*'
+
+ executables = {
+ 'version_cmd' : ["g95", "--version"],
+ 'compiler_f77' : ["g95", "-ffixed-form"],
+ 'compiler_fix' : ["g95", "-ffixed-form"],
+ 'compiler_f90' : ["g95"],
+ 'linker_so' : ["g95","-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ pic_flags = ['-fpic']
+ module_dir_switch = '-fmod='
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ return ['-fno-second-underscore']
+ def get_flags_opt(self):
+ return ['-O']
+ def get_flags_debug(self):
+ return ['-g']
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ #compiler = new_fcompiler(compiler='g95')
+ compiler = G95FCompiler()
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
new file mode 100644
index 000000000..b0072571f
--- /dev/null
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -0,0 +1,244 @@
+
+import re
+import os
+import sys
+import warnings
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler
+from scipy.distutils.exec_command import exec_command, find_executable
+
+class GnuFCompiler(FCompiler):
+
+ compiler_type = 'gnu'
+ version_pattern = r'GNU Fortran ((\(GCC[^\)]*(\)\)|\)))|)\s*'\
+ '(?P<version>[^\s*\)]+)'
+
+ # 'g77 --version' results
+ # SunOS: GNU Fortran (GCC 3.2) 3.2 20020814 (release)
+ # Debian: GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)
+ # GNU Fortran (GCC) 3.3.3 (Debian 20040401)
+ # GNU Fortran 0.5.25 20010319 (prerelease)
+ # Redhat: GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2 20030222 (Red Hat Linux 3.2.2-5)
+
+ for fc_exe in map(find_executable,['g77','f77']):
+ if os.path.isfile(fc_exe):
+ break
+ executables = {
+ 'version_cmd' : [fc_exe,"--version"],
+ 'compiler_f77' : [fc_exe,"-Wall","-fno-second-underscore"],
+ 'compiler_f90' : None,
+ 'compiler_fix' : None,
+ 'linker_so' : [fc_exe,"-Wall"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"],
+ 'linker_exe' : [fc_exe,"-Wall"]
+ }
+ module_dir_switch = None
+ module_include_switch = None
+
+ # Cygwin: f771: warning: -fPIC ignored for target (all code is position independent)
+ if os.name != 'nt' and sys.platform!='cygwin':
+ pic_flags = ['-fPIC']
+
+ g2c = 'g2c'
+
+ #def get_linker_so(self):
+ # # win32 linking should be handled by standard linker
+ # # Darwin g77 cannot be used as a linker.
+ # #if re.match(r'(darwin)', sys.platform):
+ # # return
+ # return FCompiler.get_linker_so(self)
+
+ def get_flags_linker_so(self):
+ opt = []
+ if sys.platform=='darwin':
+ target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
+ if target is None:
+ target = '10.3'
+ major, minor = target.split('.')
+ if int(minor) < 3:
+ minor = '3'
+ warnings.warn('Environment variable '
+ 'MACOSX_DEPLOYMENT_TARGET reset to 10.3')
+ os.environ['MACOSX_DEPLOYMENT_TARGET'] = '%s.%s' % (major,
+ minor)
+
+ opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
+ else:
+ opt.append("-shared")
+ if sys.platform[:5]=='sunos':
+ # SunOS often has dynamically loaded symbols defined in the
+ # static library libg2c.a The linker doesn't like this. To
+ # ignore the problem, use the -mimpure-text flag. It isn't
+ # the safest thing, but seems to work. 'man gcc' says:
+ # ".. Instead of using -mimpure-text, you should compile all
+ # source code with -fpic or -fPIC."
+ opt.append('-mimpure-text')
+ return opt
+
+ def get_libgcc_dir(self):
+ status, output = exec_command('%s -print-libgcc-file-name' \
+ % (self.compiler_f77[0]),use_tee=0)
+ if not status:
+ return os.path.dirname(output)
+ return
+
+ def get_library_dirs(self):
+ opt = []
+ if sys.platform[:5] != 'linux':
+ d = self.get_libgcc_dir()
+ if d:
+ opt.append(d)
+ return opt
+
+ def get_libraries(self):
+ opt = []
+ d = self.get_libgcc_dir()
+ if d is not None:
+ g2c = self.g2c + '-pic'
+ f = self.static_lib_format % (g2c, self.static_lib_extension)
+ if not os.path.isfile(os.path.join(d,f)):
+ g2c = self.g2c
+ else:
+ g2c = self.g2c
+
+ if sys.platform=='win32':
+ opt.append('gcc')
+ if g2c is not None:
+ opt.append(g2c)
+ if sys.platform == 'darwin':
+ opt.append('cc_dynamic')
+ return opt
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_opt(self):
+ if self.get_version()<='3.3.3':
+ # With this compiler version building Fortran BLAS/LAPACK
+ # with -O3 caused failures in lib.lapack heevr,syevr tests.
+ opt = ['-O2']
+ else:
+ opt = ['-O3']
+ opt.append('-funroll-loops')
+ return opt
+
+ def get_flags_arch(self):
+ opt = []
+ if sys.platform=='darwin':
+ if os.name != 'posix':
+ # this should presumably correspond to Apple
+ if cpu.is_ppc():
+ opt.append('-arch ppc')
+ elif cpu.is_i386():
+ opt.append('-arch i386')
+ for a in '601 602 603 603e 604 604e 620 630 740 7400 7450 750'\
+ '403 505 801 821 823 860'.split():
+ if getattr(cpu,'is_ppc%s'%a)():
+ opt.append('-mcpu='+a)
+ opt.append('-mtune='+a)
+ break
+ return opt
+ march_flag = 1
+ # 0.5.25 corresponds to 2.95.x
+ if self.get_version() == '0.5.26': # gcc 3.0
+ if cpu.is_AthlonK6():
+ opt.append('-march=k6')
+ elif cpu.is_AthlonK7():
+ opt.append('-march=athlon')
+ else:
+ march_flag = 0
+ # Note: gcc 3.2 on win32 has breakage with -march specified
+ elif self.get_version() >= '3.1.1' \
+ and not sys.platform=='win32': # gcc >= 3.1.1
+ if cpu.is_AthlonK6():
+ opt.append('-march=k6')
+ elif cpu.is_AthlonK6_2():
+ opt.append('-march=k6-2')
+ elif cpu.is_AthlonK6_3():
+ opt.append('-march=k6-3')
+ elif cpu.is_AthlonK7():
+ opt.append('-march=athlon')
+ elif cpu.is_AthlonMP():
+ opt.append('-march=athlon-mp')
+ # there's also: athlon-tbird, athlon-4, athlon-xp
+ elif cpu.is_Nocona():
+ opt.append('-march=nocona')
+ elif cpu.is_Prescott():
+ opt.append('-march=prescott')
+ elif cpu.is_PentiumIV():
+ opt.append('-march=pentium4')
+ elif cpu.is_PentiumIII():
+ opt.append('-march=pentium3')
+ elif cpu.is_PentiumII():
+ opt.append('-march=pentium2')
+ else:
+ march_flag = 0
+ if self.get_version() >= '3.4' and not march_flag:
+ march_flag = 1
+ if cpu.is_Opteron():
+ opt.append('-march=opteron')
+ elif cpu.is_Athlon64():
+ opt.append('-march=athlon64')
+ else:
+ march_flag = 0
+ if cpu.has_mmx(): opt.append('-mmmx')
+ if self.get_version() > '3.2.2':
+ if cpu.has_sse2(): opt.append('-msse2')
+ if cpu.has_sse(): opt.append('-msse')
+ if self.get_version() >= '3.4':
+ if cpu.has_sse3(): opt.append('-msse3')
+ if cpu.has_3dnow(): opt.append('-m3dnow')
+ else:
+ march_flag = 0
+ if march_flag:
+ pass
+ elif cpu.is_i686():
+ opt.append('-march=i686')
+ elif cpu.is_i586():
+ opt.append('-march=i586')
+ elif cpu.is_i486():
+ opt.append('-march=i486')
+ elif cpu.is_i386():
+ opt.append('-march=i386')
+ if cpu.is_Intel():
+ opt.append('-fomit-frame-pointer')
+ if cpu.is_32bit():
+ opt.append('-malign-double')
+ return opt
+
+class Gnu95FCompiler(GnuFCompiler):
+
+ compiler_type = 'gnu95'
+ version_pattern = r'GNU Fortran 95 \(GCC (?P<version>[^\s*\)]+)'
+
+ # 'gfortran --version' results:
+ # Debian: GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))
+
+ for fc_exe in map(find_executable,['gfortran','f95']):
+ if os.path.isfile(fc_exe):
+ break
+ executables = {
+ 'version_cmd' : [fc_exe,"--version"],
+ 'compiler_f77' : [fc_exe,"-Wall","-ffixed-form","-fno-second-underscore"],
+ 'compiler_f90' : [fc_exe,"-Wall","-fno-second-underscore"],
+ 'compiler_fix' : [fc_exe,"-Wall","-ffixed-form","-fno-second-underscore"],
+ 'linker_so' : [fc_exe,"-Wall"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"],
+ 'linker_exe' : [fc_exe,"-Wall"]
+ }
+ module_dir_switch = '-M'
+ module_include_switch = '-I'
+
+ g2c = 'gfortran'
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ #compiler = new_fcompiler(compiler='gnu')
+ compiler = GnuFCompiler()
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/hpux.py b/numpy/distutils/fcompiler/hpux.py
new file mode 100644
index 000000000..95f326cea
--- /dev/null
+++ b/numpy/distutils/fcompiler/hpux.py
@@ -0,0 +1,41 @@
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler
+
+class HPUXFCompiler(FCompiler):
+
+ compiler_type = 'hpux'
+ version_pattern = r'HP F90 (?P<version>[^\s*,]*)'
+
+ executables = {
+ 'version_cmd' : ["f90", "+version"],
+ 'compiler_f77' : ["f90"],
+ 'compiler_fix' : ["f90"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : None,
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ module_dir_switch = None #XXX: fix me
+ module_include_switch = None #XXX: fix me
+ pic_flags = ['+pic=long']
+ def get_flags(self):
+ return self.pic_flags + ['+ppu']
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_libraries(self):
+ return ['m']
+ def get_version(self, force=0, ok_status=[256,0]):
+ # XXX status==256 may indicate 'unrecognized option' or
+ # 'no input file'. So, version_cmd needs more work.
+ return FCompiler.get_version(self,force,ok_status)
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(10)
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='hpux')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py
new file mode 100644
index 000000000..5fc59d1e6
--- /dev/null
+++ b/numpy/distutils/fcompiler/ibm.py
@@ -0,0 +1,80 @@
+import os
+import re
+import sys
+
+from scipy.distutils.fcompiler import FCompiler
+from distutils import log
+
+class IbmFCompiler(FCompiler):
+
+ compiler_type = 'ibm'
+ version_pattern = r'xlf\(1\)\s*IBM XL Fortran (Advanced Edition |)Version (?P<version>[^\s*]*)'
+
+ executables = {
+ 'version_cmd' : ["xlf"],
+ 'compiler_f77' : ["xlf"],
+ 'compiler_fix' : ["xlf90", "-qfixed"],
+ 'compiler_f90' : ["xlf90"],
+ 'linker_so' : ["xlf95"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ def get_version(self,*args,**kwds):
+ version = FCompiler.get_version(self,*args,**kwds)
+ xlf_dir = '/etc/opt/ibmcmp/xlf'
+ if version is None and os.path.isdir(xlf_dir):
+ # If the output of xlf does not contain version info
+ # (that's the case with xlf 8.1, for instance) then
+ # let's try another method:
+ l = os.listdir(xlf_dir)
+ l.sort()
+ l.reverse()
+ l = [d for d in l if os.path.isfile(os.path.join(xlf_dir,d,'xlf.cfg'))]
+ if not l:
+ from distutils.version import LooseVersion
+ self.version = version = LooseVersion(l[0])
+ return version
+
+ def get_flags(self):
+ return ['-qextname']
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_linker_so(self):
+ opt = []
+ if sys.platform=='darwin':
+ opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress')
+ else:
+ opt.append('-bshared')
+ version = self.get_version(ok_status=[0,40])
+ if version is not None:
+ import tempfile
+ xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
+ new_cfg = tempfile.mktemp()+'_xlf.cfg'
+ log.info('Creating '+new_cfg)
+ fi = open(xlf_cfg,'r')
+ fo = open(new_cfg,'w')
+ crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match
+ for line in fi.readlines():
+ m = crt1_match(line)
+ if m:
+ fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
+ else:
+ fo.write(line)
+ fi.close()
+ fo.close()
+ opt.append('-F'+new_cfg)
+ return opt
+
+ def get_flags_opt(self):
+ return ['-O5']
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='ibm')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
new file mode 100644
index 000000000..0e7375943
--- /dev/null
+++ b/numpy/distutils/fcompiler/intel.py
@@ -0,0 +1,174 @@
+# http://developer.intel.com/software/products/compilers/flin/
+
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler, dummy_fortran_file
+from scipy.distutils.exec_command import find_executable
+
+class IntelFCompiler(FCompiler):
+
+ compiler_type = 'intel'
+ version_pattern = r'Intel\(R\) Fortran Compiler for 32-bit '\
+ 'applications, Version (?P<version>[^\s*]*)'
+
+ for fc_exe in map(find_executable,['ifort','ifc']):
+ if os.path.isfile(fc_exe):
+ break
+
+ executables = {
+ 'version_cmd' : [fc_exe, "-FI -V -c %(fname)s.f -o %(fname)s.o" \
+ % {'fname':dummy_fortran_file()}],
+ 'compiler_f77' : [fc_exe,"-72","-w90","-w95"],
+ 'compiler_fix' : [fc_exe,"-FI"],
+ 'compiler_f90' : [fc_exe],
+ 'linker_so' : [fc_exe,"-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ pic_flags = ['-KPIC']
+ module_dir_switch = '-module ' # Don't remove ending space!
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ opt = self.pic_flags + ["-cm"]
+ return opt
+
+ def get_flags_free(self):
+ return ["-FR"]
+
+ def get_flags_opt(self):
+ return ['-O3','-unroll']
+
+ def get_flags_arch(self):
+ opt = []
+ if cpu.has_fdiv_bug():
+ opt.append('-fdiv_check')
+ if cpu.has_f00f_bug():
+ opt.append('-0f_check')
+ if cpu.is_PentiumPro() or cpu.is_PentiumII():
+ opt.extend(['-tpp6','-xi'])
+ elif cpu.is_PentiumIII():
+ opt.append('-tpp6')
+ elif cpu.is_Pentium():
+ opt.append('-tpp5')
+ elif cpu.is_PentiumIV() or cpu.is_XEON():
+ opt.extend(['-tpp7','-xW'])
+ if cpu.has_mmx():
+ opt.append('-xM')
+ return opt
+
+ def get_flags_linker_so(self):
+ opt = FCompiler.get_flags_linker_so(self)
+ v = self.get_version()
+ if v and v >= '8.0':
+ opt.append('-nofor_main')
+ return opt
+
+class IntelItaniumFCompiler(IntelFCompiler):
+ compiler_type = 'intele'
+ version_pattern = r'Intel\(R\) Fortran 90 Compiler Itanium\(TM\) Compiler'\
+ ' for the Itanium\(TM\)-based applications,'\
+ ' Version (?P<version>[^\s*]*)'
+
+ for fc_exe in map(find_executable,['ifort','efort','efc']):
+ if os.path.isfile(fc_exe):
+ break
+
+ executables = {
+ 'version_cmd' : [fc_exe, "-FI -V -c %(fname)s.f -o %(fname)s.o" \
+ % {'fname':dummy_fortran_file()}],
+ 'compiler_f77' : [fc_exe,"-FI","-w90","-w95"],
+ 'compiler_fix' : [fc_exe,"-FI"],
+ 'compiler_f90' : [fc_exe],
+ 'linker_so' : [fc_exe,"-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+class IntelVisualFCompiler(FCompiler):
+
+ compiler_type = 'intelv'
+ version_pattern = r'Intel\(R\) Fortran Compiler for 32-bit applications, '\
+ 'Version (?P<version>[^\s*]*)'
+
+ ar_exe = 'lib.exe'
+ fc_exe = 'ifl'
+ if sys.platform=='win32':
+ from distutils.msvccompiler import MSVCCompiler
+ ar_exe = MSVCCompiler().lib
+
+ executables = {
+ 'version_cmd' : [fc_exe, "-FI -V -c %(fname)s.f -o %(fname)s.o" \
+ % {'fname':dummy_fortran_file()}],
+ 'compiler_f77' : [fc_exe,"-FI","-w90","-w95"],
+ 'compiler_fix' : [fc_exe,"-FI","-4L72","-w"],
+ 'compiler_f90' : [fc_exe],
+ 'linker_so' : [fc_exe,"-shared"],
+ 'archiver' : [ar_exe, "/verbose", "/OUT:"],
+ 'ranlib' : None
+ }
+
+ compile_switch = '/c '
+ object_switch = '/Fo' #No space after /Fo!
+ library_switch = '/OUT:' #No space after /OUT:!
+ module_dir_switch = '/module:' #No space after /module:
+ module_include_switch = '/I'
+
+ def get_flags(self):
+ opt = ['/nologo','/MD','/nbs','/Qlowercase','/us']
+ return opt
+
+ def get_flags_free(self):
+ return ["-FR"]
+
+ def get_flags_debug(self):
+ return ['/4Yb','/d2']
+
+ def get_flags_opt(self):
+ return ['/O3','/Qip','/Qipo','/Qipo_obj']
+
+ def get_flags_arch(self):
+ opt = []
+ if cpu.is_PentiumPro() or cpu.is_PentiumII():
+ opt.extend(['/G6','/Qaxi'])
+ elif cpu.is_PentiumIII():
+ opt.extend(['/G6','/QaxK'])
+ elif cpu.is_Pentium():
+ opt.append('/G5')
+ elif cpu.is_PentiumIV():
+ opt.extend(['/G7','/QaxW'])
+ if cpu.has_mmx():
+ opt.append('/QaxM')
+ return opt
+
+class IntelItaniumVisualFCompiler(IntelVisualFCompiler):
+
+ compiler_type = 'intelev'
+ version_pattern = r'Intel\(R\) Fortran 90 Compiler Itanium\(TM\) Compiler'\
+ ' for the Itanium\(TM\)-based applications,'\
+ ' Version (?P<version>[^\s*]*)'
+
+ fc_exe = 'efl' # XXX this is a wild guess
+ ar_exe = IntelVisualFCompiler.ar_exe
+
+ executables = {
+ 'version_cmd' : [fc_exe, "-FI -V -c %(fname)s.f -o %(fname)s.o" \
+ % {'fname':dummy_fortran_file()}],
+ 'compiler_f77' : [fc_exe,"-FI","-w90","-w95"],
+ 'compiler_fix' : [fc_exe,"-FI","-4L72","-w"],
+ 'compiler_f90' : [fc_exe],
+ 'linker_so' : [fc_exe,"-shared"],
+ 'archiver' : [ar_exe, "/verbose", "/OUT:"],
+ 'ranlib' : None
+ }
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='intel')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/lahey.py b/numpy/distutils/fcompiler/lahey.py
new file mode 100644
index 000000000..9b88cc264
--- /dev/null
+++ b/numpy/distutils/fcompiler/lahey.py
@@ -0,0 +1,46 @@
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler
+
+class LaheyFCompiler(FCompiler):
+
+ compiler_type = 'lahey'
+ version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
+
+ executables = {
+ 'version_cmd' : ["lf95", "--version"],
+ 'compiler_f77' : ["lf95", "--fix"],
+ 'compiler_fix' : ["lf95", "--fix"],
+ 'compiler_f90' : ["lf95"],
+ 'linker_so' : ["lf95","-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ module_dir_switch = None #XXX Fix me
+ module_include_switch = None #XXX Fix me
+
+ def get_flags_opt(self):
+ return ['-O']
+ def get_flags_debug(self):
+ return ['-g','--chk','--chkglobal']
+ def get_library_dirs(self):
+ opt = []
+ d = os.environ.get('LAHEY')
+ if d:
+ opt.append(os.path.join(d,'lib'))
+ return opt
+ def get_libraries(self):
+ opt = []
+ opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
+ return opt
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='lahey')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/mips.py b/numpy/distutils/fcompiler/mips.py
new file mode 100644
index 000000000..547e91423
--- /dev/null
+++ b/numpy/distutils/fcompiler/mips.py
@@ -0,0 +1,56 @@
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler
+
+class MipsFCompiler(FCompiler):
+
+ compiler_type = 'mips'
+ version_pattern = r'MIPSpro Compilers: Version (?P<version>[^\s*,]*)'
+
+ executables = {
+ 'version_cmd' : ["f90", "-version"],
+ 'compiler_f77' : ["f77", "-f77"],
+ 'compiler_fix' : ["f90", "-fixedform"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["f90","-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : None
+ }
+ module_dir_switch = None #XXX: fix me
+ module_include_switch = None #XXX: fix me
+ pic_flags = ['-KPIC']
+
+ def get_flags(self):
+ return self.pic_flags + ['-n32']
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_flags_arch(self):
+ opt = []
+ for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split():
+ if getattr(cpu,'is_IP%s'%a)():
+ opt.append('-TARG:platform=IP%s' % a)
+ break
+ return opt
+ def get_flags_arch_f77(self):
+ r = None
+ if cpu.is_r10000(): r = 10000
+ elif cpu.is_r12000(): r = 12000
+ elif cpu.is_r8000(): r = 8000
+ elif cpu.is_r5000(): r = 5000
+ elif cpu.is_r4000(): r = 4000
+ if r is not None:
+ return ['r%s' % (r)]
+ return []
+ def get_flags_arch_f90(self):
+ r = self.get_flags_arch_f77()
+ if r:
+ r[0] = '-' + r[0]
+ return r
+
+if __name__ == '__main__':
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='mips')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py
new file mode 100644
index 000000000..e17d972c4
--- /dev/null
+++ b/numpy/distutils/fcompiler/nag.py
@@ -0,0 +1,39 @@
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler
+
+class NAGFCompiler(FCompiler):
+
+ compiler_type = 'nag'
+ version_pattern = r'NAGWare Fortran 95 compiler Release (?P<version>[^\s]*)'
+
+ executables = {
+ 'version_cmd' : ["f95", "-V"],
+ 'compiler_f77' : ["f95", "-fixed"],
+ 'compiler_fix' : ["f95", "-fixed"],
+ 'compiler_f90' : ["f95"],
+ 'linker_so' : ["f95"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ def get_flags_linker_so(self):
+ if sys.platform=='darwin':
+ return ['-unsharedf95','-Wl,-bundle,-flat_namespace,-undefined,suppress']
+ return ["-Wl,shared"]
+ def get_flags_opt(self):
+ return ['-O4']
+ def get_flags_arch(self):
+ return ['-target=native']
+ def get_flags_debug(self):
+ return ['-g','-gline','-g90','-nan','-C']
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='nag')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/none.py b/numpy/distutils/fcompiler/none.py
new file mode 100644
index 000000000..984ea7dda
--- /dev/null
+++ b/numpy/distutils/fcompiler/none.py
@@ -0,0 +1,24 @@
+
+from scipy.distutils.fcompiler import FCompiler
+
+class NoneFCompiler(FCompiler):
+
+ compiler_type = 'none'
+
+ executables = {'compiler_f77':['/path/to/nowhere/none'],
+ 'compiler_f90':['/path/to/nowhere/none'],
+ 'compiler_fix':['/path/to/nowhere/none'],
+ 'linker_so':['/path/to/nowhere/none'],
+ 'archiver':['/path/to/nowhere/none'],
+ 'ranlib':['/path/to/nowhere/none'],
+ 'version_cmd':['/path/to/nowhere/none'],
+ }
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = NoneFCompiler()
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py
new file mode 100644
index 000000000..bfdf9752c
--- /dev/null
+++ b/numpy/distutils/fcompiler/pg.py
@@ -0,0 +1,42 @@
+
+# http://www.pgroup.com
+
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler
+
+class PGroupFCompiler(FCompiler):
+
+ compiler_type = 'pg'
+ version_pattern = r'\s*pg(f77|f90|hpf) (?P<version>[\d.-]+).*'
+
+ executables = {
+ 'version_cmd' : ["pgf77", "-V 2>/dev/null"],
+ 'compiler_f77' : ["pgf77"],
+ 'compiler_fix' : ["pgf90", "-Mfixed"],
+ 'compiler_f90' : ["pgf90"],
+ 'linker_so' : ["pgf90","-shared","-fpic"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ pic_flags = ['-fpic']
+ module_dir_switch = '-module '
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ opt = ['-Minform=inform','-Mnosecond_underscore']
+ return self.pic_flags + opt
+ def get_flags_opt(self):
+ return ['-fast']
+ def get_flags_debug(self):
+ return ['-g']
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='pg')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py
new file mode 100644
index 000000000..0c0599a06
--- /dev/null
+++ b/numpy/distutils/fcompiler/sun.py
@@ -0,0 +1,47 @@
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler import FCompiler
+
+class SunFCompiler(FCompiler):
+
+ compiler_type = 'sun'
+ version_pattern = r'(f90|f95): (Sun|Forte Developer 7|WorkShop 6 update \d+) Fortran 95 (?P<version>[^\s]+).*'
+
+ executables = {
+ 'version_cmd' : ["f90", "-V"],
+ 'compiler_f77' : ["f90"],
+ 'compiler_fix' : ["f90", "-fixed"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["f90","-Bdynamic","-G"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ module_dir_switch = '-moddir='
+ module_include_switch = '-M'
+ pic_flags = ['-xcode=pic32']
+
+ def get_flags_f77(self):
+ ret = ["-ftrap=%none"]
+ if (self.get_version() or '') >= '7':
+ ret.append("-f77")
+ else:
+ ret.append("-fixed")
+ return ret
+ def get_opt(self):
+ return ['-fast','-dalign']
+ def get_arch(self):
+ return ['-xtarget=generic']
+ def get_libraries(self):
+ opt = []
+ opt.extend(['fsu','sunmath','mvec','f77compat'])
+ return opt
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='sun')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/fcompiler/vast.py b/numpy/distutils/fcompiler/vast.py
new file mode 100644
index 000000000..1c6b40032
--- /dev/null
+++ b/numpy/distutils/fcompiler/vast.py
@@ -0,0 +1,50 @@
+import os
+import sys
+
+from scipy.distutils.cpuinfo import cpu
+from scipy.distutils.fcompiler.gnu import GnuFCompiler
+
+class VastFCompiler(GnuFCompiler):
+
+ compiler_type = 'vast'
+ version_pattern = r'\s*Pacific-Sierra Research vf90 '\
+ '(Personal|Professional)\s+(?P<version>[^\s]*)'
+
+ # VAST f90 does not support -o with -c. So, object files are created
+ # to the current directory and then moved to build directory
+ object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile '
+
+ executables = {
+ 'version_cmd' : ["vf90", "-v"],
+ 'compiler_f77' : ["g77"],
+ 'compiler_fix' : ["f90", "-Wv,-ya"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["f90"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ module_dir_switch = None #XXX Fix me
+ module_include_switch = None #XXX Fix me
+
+ def get_version_cmd(self):
+ f90 = self.compiler_f90[0]
+ d,b = os.path.split(f90)
+ vf90 = os.path.join(d,'v'+b)
+ return vf90
+
+ def get_flags_arch(self):
+ vast_version = self.get_version()
+ gnu = GnuFCompiler()
+ gnu.customize()
+ self.version = gnu.get_version()
+ opt = GnuFCompiler.get_flags_arch(self)
+ self.version = vast_version
+ return opt
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from scipy.distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler(compiler='vast')
+ compiler.customize()
+ print compiler.get_version()
diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py
new file mode 100644
index 000000000..96866bb9b
--- /dev/null
+++ b/numpy/distutils/from_template.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+"""
+
+process_file(filename)
+
+ takes templated file .xxx.src and produces .xxx file where .xxx
+ is .pyf .f90 or .f using the following template rules:
+
+ '<..>' denotes a template.
+
+ All function and subroutine blocks in a source file with names that
+ contain '<..>' will be replicated according to the rules in '<..>'.
+
+ The number of comma-separeted words in '<..>' will determine the number of
+ replicates.
+
+ '<..>' may have two different forms, named and short. For example,
+
+ named:
+ <p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
+ 'd', 's', 'z', and 'c' for each replicate of the block.
+
+ <_c> is already defined: <_c=s,d,c,z>
+ <_t> is already defined: <_t=real,double precision,complex,double complex>
+
+ short:
+ <s,d,c,z>, a short form of the named, useful when no <p> appears inside
+ a block.
+
+ In general, '<..>' contains a comma separated list of arbitrary
+ expressions. If these expression must contain a comma|leftarrow|rightarrow,
+ then prepend the comma|leftarrow|rightarrow with a backslash.
+
+ If an expression matches '\\<index>' then it will be replaced
+ by <index>-th expression.
+
+ Note that all '<..>' forms in a block must have the same number of
+ comma-separated entries.
+
+ Predefined named template rules:
+ <prefix=s,d,c,z>
+ <ftype=real,double precision,complex,double complex>
+ <ftypereal=real,double precision,\\0,\\1>
+ <ctype=float,double,complex_float,complex_double>
+ <ctypereal=float,double,\\0,\\1>
+
+"""
+
+__all__ = ['process_str','process_file']
+
+import string,os,sys
+if sys.version[:3]>='2.3':
+ import re
+else:
+ import pre as re
+ False = 0
+ True = 1
+if sys.version[:5]=='2.2.1':
+ import re
+
+routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b',re.I)
+routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)',re.I)
+function_start_re = re.compile(r'\n (\$|\*)\s*function\b',re.I)
+
+def parse_structure(astr):
+ """ Return a list of tuples for each function or subroutine each
+ tuple is the start and end of a subroutine or function to be
+ expanded.
+ """
+
+ spanlist = []
+ ind = 0
+ while 1:
+ m = routine_start_re.search(astr,ind)
+ if m is None:
+ break
+ start = m.start()
+ if function_start_re.match(astr,start,m.end()):
+ while 1:
+ i = astr.rfind('\n',ind,start)
+ if i==-1:
+ break
+ start = i
+ if astr[i:i+7]!='\n $':
+ break
+ start += 1
+ m = routine_end_re.search(astr,m.end())
+ ind = end = m and m.end()-1 or len(astr)
+ spanlist.append((start,end))
+ return spanlist
+
+template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
+named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
+list_re = re.compile(r"<\s*((.*?))\s*>")
+
+def find_repl_patterns(astr):
+ reps = named_re.findall(astr)
+ names = {}
+ for rep in reps:
+ name = rep[0].strip() or unique_key(names)
+ repl = rep[1].replace('\,','@comma@')
+ thelist = conv(repl)
+ names[name] = thelist
+ return names
+
+item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
+def conv(astr):
+ b = astr.split(',')
+ l = [x.strip() for x in b]
+ for i in range(len(l)):
+ m = item_re.match(l[i])
+ if m:
+ j = int(m.group('index'))
+ l[i] = l[j]
+ return ','.join(l)
+
+def unique_key(adict):
+ """ Obtain a unique key given a dictionary."""
+ allkeys = adict.keys()
+ done = False
+ n = 1
+ while not done:
+ newkey = '__l%s' % (n)
+ if newkey in allkeys:
+ n += 1
+ else:
+ done = True
+ return newkey
+
+
+template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
+def expand_sub(substr,names):
+ substr = substr.replace('\>','@rightarrow@')
+ substr = substr.replace('\<','@leftarrow@')
+ lnames = find_repl_patterns(substr)
+ substr = named_re.sub(r"<\1>",substr) # get rid of definition templates
+
+ def listrepl(mobj):
+ thelist = conv(mobj.group(1).replace('\,','@comma@'))
+ if template_name_re.match(thelist):
+ return "<%s>" % (thelist)
+ name = None
+ for key in lnames.keys(): # see if list is already in dictionary
+ if lnames[key] == thelist:
+ name = key
+ if name is None: # this list is not in the dictionary yet
+ name = unique_key(lnames)
+ lnames[name] = thelist
+ return "<%s>" % name
+
+ substr = list_re.sub(listrepl, substr) # convert all lists to named templates
+ # newnames are constructed as needed
+
+ numsubs = None
+ base_rule = None
+ rules = {}
+ for r in template_re.findall(substr):
+ if not rules.has_key(r):
+ thelist = lnames.get(r,names.get(r,None))
+ if thelist is None:
+ raise ValueError,'No replicates found for <%s>' % (r)
+ if not names.has_key(r) and not thelist.startswith('_'):
+ names[r] = thelist
+ rule = [i.replace('@comma@',',') for i in thelist.split(',')]
+ num = len(rule)
+
+ if numsubs is None:
+ numsubs = num
+ rules[r] = rule
+ base_rule = r
+ elif num == numsubs:
+ rules[r] = rule
+ else:
+ print "Mismatch in number of replacements (base <%s=%s>)"\
+ " for <%s=%s>. Ignoring." % (base_rule,
+ ','.join(rules[base_rule]),
+ r,thelist)
+ if not rules:
+ return substr
+
+ def namerepl(mobj):
+ name = mobj.group(1)
+ return rules.get(name,(k+1)*[name])[k]
+
+ newstr = ''
+ for k in range(numsubs):
+ newstr += template_re.sub(namerepl, substr) + '\n\n'
+
+ newstr = newstr.replace('@rightarrow@','>')
+ newstr = newstr.replace('@leftarrow@','<')
+ return newstr
+
+def process_str(allstr):
+ newstr = allstr
+ writestr = '' #_head # using _head will break free-format files
+
+ struct = parse_structure(newstr)
+
+ oldend = 0
+ names = {}
+ names.update(_special_names)
+ for sub in struct:
+ writestr += newstr[oldend:sub[0]]
+ names.update(find_repl_patterns(newstr[oldend:sub[0]]))
+ writestr += expand_sub(newstr[sub[0]:sub[1]],names)
+ oldend = sub[1]
+ writestr += newstr[oldend:]
+
+ return writestr
+
+include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]",re.I)
+
+def resolve_includes(source):
+ d = os.path.dirname(source)
+ fid = open(source)
+ lines = []
+ for line in fid.readlines():
+ m = include_src_re.match(line)
+ if m:
+ fn = m.group('name')
+ if not os.path.isabs(fn):
+ fn = os.path.join(d,fn)
+ if os.path.isfile(fn):
+ print 'Including file',fn
+ lines.extend(resolve_includes(fn))
+ else:
+ lines.append(line)
+ else:
+ lines.append(line)
+ fid.close()
+ return lines
+
+def process_file(source):
+ lines = resolve_includes(source)
+ return process_str(''.join(lines))
+
+_special_names = find_repl_patterns('''
+<_c=s,d,c,z>
+<_t=real,double precision,complex,double complex>
+<prefix=s,d,c,z>
+<ftype=real,double precision,complex,double complex>
+<ctype=float,double,complex_float,complex_double>
+<ftypereal=real,double precision,\\0,\\1>
+<ctypereal=float,double,\\0,\\1>
+''')
+
+if __name__ == "__main__":
+
+ try:
+ file = sys.argv[1]
+ except IndexError:
+ fid = sys.stdin
+ outfile = sys.stdout
+ else:
+ fid = open(file,'r')
+ (base, ext) = os.path.splitext(file)
+ newname = base
+ outfile = open(newname,'w')
+
+ allstr = fid.read()
+ writestr = process_str(allstr)
+ outfile.write(writestr)
diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py
new file mode 100644
index 000000000..87b7f564b
--- /dev/null
+++ b/numpy/distutils/intelccompiler.py
@@ -0,0 +1,30 @@
+
+import os
+from distutils.unixccompiler import UnixCCompiler
+from scipy.distutils.exec_command import find_executable
+
+class IntelCCompiler(UnixCCompiler):
+
+ """ A modified Intel compiler compatible with an gcc built Python.
+ """
+
+ compiler_type = 'intel'
+ cc_exe = 'icc'
+
+ def __init__ (self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__ (self, verbose,dry_run, force)
+ compiler = self.cc_exe
+ self.set_executables(compiler=compiler,
+ compiler_so=compiler,
+ compiler_cxx=compiler,
+ linker_exe=compiler,
+ linker_so=compiler + ' -shared')
+
+class IntelItaniumCCompiler(IntelCCompiler):
+ compiler_type = 'intele'
+
+ # On Itanium, the Intel Compiler used to be called ecc, let's search for
+ # it (now it's also icc, so ecc is last in the search).
+ for cc_exe in map(find_executable,['icc','ecc']):
+ if os.path.isfile(cc_exe):
+ break
diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py
new file mode 100644
index 000000000..36c41f0b5
--- /dev/null
+++ b/numpy/distutils/lib2def.py
@@ -0,0 +1,116 @@
+import re
+import sys
+import os
+import string
+
+__doc__ = """This module generates a DEF file from the symbols in
+an MSVC-compiled DLL import library. It correctly discriminates between
+data and functions. The data is collected from the output of the program
+nm(1).
+
+Usage:
+ python lib2def.py [libname.lib] [output.def]
+or
+ python lib2def.py [libname.lib] > output.def
+
+libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout
+
+Author: Robert Kern <kernr@mail.ncifcrf.gov>
+Last Update: April 30, 1999
+"""
+
+__version__ = '0.1a'
+
+import sys
+
+py_ver = "%d%d" % tuple(sys.version_info[:2])
+
+DEFAULT_NM = 'nm -Cs'
+
+DEF_HEADER = """LIBRARY python%s.dll
+;CODE PRELOAD MOVEABLE DISCARDABLE
+;DATA PRELOAD SINGLE
+
+EXPORTS
+""" % py_ver
+# the header of the DEF file
+
+FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE)
+DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE)
+
+def parse_cmd():
+ """Parses the command-line arguments.
+
+libfile, deffile = parse_cmd()"""
+ if len(sys.argv) == 3:
+ if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def':
+ libfile, deffile = sys.argv[1:]
+ elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
+ deffile, libfile = sys.argv[1:]
+ else:
+ print "I'm assuming that your first argument is the library"
+ print "and the second is the DEF file."
+ elif len(sys.argv) == 2:
+ if sys.argv[1][-4:] == '.def':
+ deffile = sys.argv[1]
+ libfile = 'python%s.lib' % py_ver
+ elif sys.argv[1][-4:] == '.lib':
+ deffile = None
+ libfile = sys.argv[1]
+ else:
+ libfile = 'python%s.lib' % py_ver
+ deffile = None
+ return libfile, deffile
+
+def getnm(nm_cmd = 'nm -Cs python%s.lib' % py_ver):
+ """Returns the output of nm_cmd via a pipe.
+
+nm_output = getnam(nm_cmd = 'nm -Cs py_lib')"""
+ f = os.popen(nm_cmd)
+ nm_output = f.read()
+ f.close()
+ return nm_output
+
+def parse_nm(nm_output):
+ """Returns a tuple of lists: dlist for the list of data
+symbols and flist for the list of function symbols.
+
+dlist, flist = parse_nm(nm_output)"""
+ data = DATA_RE.findall(nm_output)
+ func = FUNC_RE.findall(nm_output)
+
+ flist = []
+ for sym in data:
+ if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'):
+ flist.append(sym)
+
+ dlist = []
+ for sym in data:
+ if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'):
+ dlist.append(sym)
+
+ dlist.sort()
+ flist.sort()
+ return dlist, flist
+
+def output_def(dlist, flist, header, file = sys.stdout):
+ """Outputs the final DEF file to a file defaulting to stdout.
+
+output_def(dlist, flist, header, file = sys.stdout)"""
+ for data_sym in dlist:
+ header = header + '\t%s DATA\n' % data_sym
+ header = header + '\n' # blank line
+ for func_sym in flist:
+ header = header + '\t%s\n' % func_sym
+ file.write(header)
+
+if __name__ == '__main__':
+ libfile, deffile = parse_cmd()
+ if deffile is None:
+ deffile = sys.stdout
+ else:
+ deffile = open(deffile, 'w')
+ nm_cmd = '%s %s' % (DEFAULT_NM, libfile)
+ nm_output = getnm(nm_cmd)
+ dlist, flist = parse_nm(nm_output)
+ output_def(dlist, flist, DEF_HEADER, deffile)
diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py
new file mode 100644
index 000000000..4f30af06a
--- /dev/null
+++ b/numpy/distutils/line_endings.py
@@ -0,0 +1,75 @@
+""" Functions for converting from DOS to UNIX line endings
+"""
+
+import sys, re, os
+
+def dos2unix(file):
+ "Replace CRLF with LF in argument files. Print names of changed files."
+ if os.path.isdir(file):
+ print file, "Directory!"
+ return
+
+ data = open(file, "rb").read()
+ if '\0' in data:
+ print file, "Binary!"
+ return
+
+ newdata = re.sub("\r\n", "\n", data)
+ if newdata != data:
+ print 'dos2unix:', file
+ f = open(file, "wb")
+ f.write(newdata)
+ f.close()
+ return file
+ else:
+ print file, 'ok'
+
+def dos2unix_one_dir(modified_files,dir_name,file_names):
+ for file in file_names:
+ full_path = os.path.join(dir_name,file)
+ file = dos2unix(full_path)
+ if file is not None:
+ modified_files.append(file)
+
+def dos2unix_dir(dir_name):
+ modified_files = []
+ os.path.walk(dir_name,dos2unix_one_dir,modified_files)
+ return modified_files
+#----------------------------------
+
+def unix2dos(file):
+ "Replace LF with CRLF in argument files. Print names of changed files."
+ if os.path.isdir(file):
+ print file, "Directory!"
+ return
+
+ data = open(file, "rb").read()
+ if '\0' in data:
+ print file, "Binary!"
+ return
+ newdata = re.sub("\r\n", "\n", data)
+ newdata = re.sub("\n", "\r\n", newdata)
+ if newdata != data:
+ print 'unix2dos:', file
+ f = open(file, "wb")
+ f.write(newdata)
+ f.close()
+ return file
+ else:
+ print file, 'ok'
+
+def unix2dos_one_dir(modified_files,dir_name,file_names):
+ for file in file_names:
+ full_path = os.path.join(dir_name,file)
+ unix2dos(full_path)
+ if file is not None:
+ modified_files.append(file)
+
+def unix2dos_dir(dir_name):
+ modified_files = []
+ os.path.walk(dir_name,unix2dos_one_dir,modified_files)
+ return modified_files
+
+if __name__ == "__main__":
+ import sys
+ dos2unix_dir(sys.argv[1])
diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py
new file mode 100644
index 000000000..6d72f9222
--- /dev/null
+++ b/numpy/distutils/log.py
@@ -0,0 +1,47 @@
+# Colored log, requires Python 2.3 or up.
+
+import sys
+from distutils.log import *
+from distutils.log import Log as old_Log
+from distutils.log import _global_log
+from misc_util import red_text, yellow_text, cyan_text
+
+
+def _fix_args(args,flag=1):
+ if type(args) is type(''):
+ return args.replace('%','%%')
+ if flag and type(args) is type(()):
+ return tuple([_fix_args(a,flag=0) for a in args])
+ return args
+
+class Log(old_Log):
+ def _log(self, level, msg, args):
+ if level>= self.threshold:
+ if args:
+ print _global_color_map[level](msg % _fix_args(args))
+ else:
+ print _global_color_map[level](msg)
+ sys.stdout.flush()
+_global_log.__class__ = Log
+
+def set_verbosity(v):
+ prev_level = _global_log.threshold
+ if v<0:
+ set_threshold(ERROR)
+ elif v == 0:
+ set_threshold(WARN)
+ elif v == 1:
+ set_threshold(INFO)
+ elif v >= 2:
+ set_threshold(DEBUG)
+ return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level,1)
+
+_global_color_map = {
+ DEBUG:cyan_text,
+ INFO:yellow_text,
+ WARN:red_text,
+ ERROR:red_text,
+ FATAL:red_text
+}
+
+set_verbosity(1)
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
new file mode 100644
index 000000000..fe7b4bd98
--- /dev/null
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -0,0 +1,219 @@
+"""
+Support code for building Python extensions on Windows.
+
+ # NT stuff
+ # 1. Make sure libpython<version>.a exists for gcc. If not, build it.
+ # 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
+ # 3. Force windows to use g77
+
+"""
+
+import os
+import sys
+import log
+
+# Overwrite certain distutils.ccompiler functions:
+import scipy.distutils.ccompiler
+
+# NT stuff
+# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
+# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
+# --> this is done in scipy/distutils/ccompiler.py
+# 3. Force windows to use g77
+
+import distutils.cygwinccompiler
+from distutils.version import StrictVersion
+from scipy.distutils.ccompiler import gen_preprocess_options, gen_lib_options
+from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
+
+from distutils.unixccompiler import UnixCCompiler
+
+# the same as cygwin plus some additional parameters
+class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
+ """ A modified MingW32 compiler compatible with an MSVC built Python.
+
+ """
+
+ compiler_type = 'mingw32'
+
+ def __init__ (self,
+ verbose=0,
+ dry_run=0,
+ force=0):
+
+ distutils.cygwinccompiler.CygwinCCompiler.__init__ (self,
+ verbose,dry_run, force)
+
+ # we need to support 3.2 which doesn't match the standard
+ # get_versions methods regex
+ if self.gcc_version is None:
+ import re
+ out = os.popen('gcc -dumpversion','r')
+ out_string = out.read()
+ out.close()
+ result = re.search('(\d+\.\d+)',out_string)
+ if result:
+ self.gcc_version = StrictVersion(result.group(1))
+
+ # A real mingw32 doesn't need to specify a different entry point,
+ # but cygwin 2.91.57 in no-cygwin-mode needs it.
+ if self.gcc_version <= "2.91.57":
+ entry_point = '--entry _DllMain@12'
+ else:
+ entry_point = ''
+
+ if self.linker_dll == 'dllwrap':
+ self.linker = 'dllwrap --driver-name g++'
+ elif self.linker_dll == 'gcc':
+ self.linker = 'g++'
+
+ # **changes: eric jones 4/11/01
+ # 1. Check for import library on Windows. Build if it doesn't exist.
+
+ build_import_library()
+
+ # **changes: eric jones 4/11/01
+ # 2. increased optimization and turned off all warnings
+ # 3. also added --driver-name g++
+ #self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
+ # compiler_so='gcc -mno-cygwin -mdll -O2 -w',
+ # linker_exe='gcc -mno-cygwin',
+ # linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s'
+ # % (self.linker, entry_point))
+ if self.gcc_version <= "3.0.0":
+ self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
+ compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes',
+ linker_exe='g++ -mno-cygwin',
+ linker_so='%s -mno-cygwin -mdll -static %s'
+ % (self.linker, entry_point))
+ else:
+ self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall',
+ compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
+ linker_exe='g++ ',
+ linker_so='g++ -shared')
+ # added for python2.3 support
+ # we can't pass it through set_executables because pre 2.2 would fail
+ self.compiler_cxx = ['g++']
+
+ # Maybe we should also append -mthreads, but then the finished
+ # dlls need another dll (mingwm10.dll see Mingw32 docs)
+ # (-mthreads: Support thread-safe exception handling on `Mingw32')
+
+ # no additional libraries needed -- maybe need msvcr71
+ #self.dll_libraries=[]
+ return
+
+ # __init__ ()
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir,
+ libraries,
+ library_dirs,
+ runtime_library_dirs,
+ export_symbols = None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+ if sys.version[:3] > '2.3':
+ if libraries:
+ libraries.append('msvcr71')
+ else:
+ libraries = ['msvcr71']
+ args = (self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir,
+ libraries,
+ library_dirs,
+ runtime_library_dirs,
+ None, #export_symbols, we do this in our def-file
+ debug,
+ extra_preargs,
+ extra_postargs,
+ build_temp,
+ target_lang)
+ if self.gcc_version < "3.0.0":
+ func = distutils.cygwinccompiler.CygwinCCompiler.link
+ else:
+ func = UnixCCompiler.link
+ func(*args[:func.im_func.func_code.co_argcount])
+ return
+
+ def object_filenames (self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ if output_dir is None: output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ # use normcase to make sure '.rc' is really '.rc' and not '.RC'
+ (base, ext) = os.path.splitext (os.path.normcase(src_name))
+
+ # added these lines to strip off windows drive letters
+ # without it, .o files are placed next to .c files
+ # instead of the build directory
+ drv,base = os.path.splitdrive(base)
+ if drv:
+ base = base[1:]
+
+ if ext not in (self.src_extensions + ['.rc','.res']):
+ raise UnknownFileError, \
+ "unknown file type '%s' (from '%s')" % \
+ (ext, src_name)
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext == '.res' or ext == '.rc':
+ # these need to be compiled to object files
+ obj_names.append (os.path.join (output_dir,
+ base + ext + self.obj_extension))
+ else:
+ obj_names.append (os.path.join (output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+ # object_filenames ()
+
+
+def build_import_library():
+ """ Build the import libraries for Mingw32-gcc on Windows
+ """
+ if os.name != 'nt':
+ return
+ lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
+ lib_file = os.path.join(sys.prefix,'libs',lib_name)
+ out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
+ out_file = os.path.join(sys.prefix,'libs',out_name)
+ if not os.path.isfile(lib_file):
+ log.warn('Cannot build import library: "%s" not found' % (lib_file))
+ return
+ if os.path.isfile(out_file):
+ log.debug('Skip building import library: "%s" exists' % (out_file))
+ return
+ log.info('Building import library: "%s"' % (out_file))
+
+ from scipy.distutils import lib2def
+
+ def_name = "python%d%d.def" % tuple(sys.version_info[:2])
+ def_file = os.path.join(sys.prefix,'libs',def_name)
+ nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
+ nm_output = lib2def.getnm(nm_cmd)
+ dlist, flist = lib2def.parse_nm(nm_output)
+ lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
+
+ dll_name = "python%d%d.dll" % tuple(sys.version_info[:2])
+ args = (dll_name,def_file,out_file)
+ cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args
+ status = os.system(cmd)
+ # for now, fail silently
+ if status:
+ log.warn('Failed to build import library for gcc. Linking will fail.')
+ #if not success:
+ # msg = "Couldn't find import library, and failed to build it."
+ # raise DistutilsPlatformError, msg
+ return
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
new file mode 100644
index 000000000..f94220c74
--- /dev/null
+++ b/numpy/distutils/misc_util.py
@@ -0,0 +1,988 @@
+import os
+import re
+import sys
+import imp
+import copy
+import types
+import glob
+
+def allpath(name):
+ "Convert a /-separated pathname to one using the OS's path separator."
+ splitted = name.split('/')
+ return os.path.join(*splitted)
+
+def get_path(mod_name,parent_path=None):
+ """ Return path of the module.
+
+ Returned path is relative to parent_path when given,
+ otherwise it is absolute path.
+ """
+ if mod_name == '__main__':
+ d = os.path.abspath('.')
+ elif mod_name == '__builtin__':
+ #builtin if/then added by Pearu for use in core.run_setup.
+ d = os.path.dirname(os.path.abspath(sys.argv[0]))
+ else:
+ __import__(mod_name)
+ mod = sys.modules[mod_name]
+ file = mod.__file__
+ d = os.path.dirname(os.path.abspath(file))
+ if parent_path is not None:
+ pd = os.path.abspath(parent_path)
+ if pd==d[:len(pd)]:
+ d = d[len(pd)+1:]
+ return d or '.'
+
+# Hooks for colored terminal output.
+# See also http://www.livinglogic.de/Python/ansistyle
+def terminal_has_colors():
+ if sys.platform=='cygwin' and not os.environ.has_key('USE_COLOR'):
+ # Avoid importing curses that causes illegal operation
+ # with a message:
+ # PYTHON2 caused an invalid page fault in
+ # module CYGNURSES7.DLL as 015f:18bbfc28
+ # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
+ # ssh to Win32 machine from debian
+ # curses.version is 2.2
+ # CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
+ return 0
+ if hasattr(sys.stdout,'isatty') and sys.stdout.isatty():
+ try:
+ import curses
+ curses.setupterm()
+ if (curses.tigetnum("colors") >= 0
+ and curses.tigetnum("pairs") >= 0
+ and ((curses.tigetstr("setf") is not None
+ and curses.tigetstr("setb") is not None)
+ or (curses.tigetstr("setaf") is not None
+ and curses.tigetstr("setab") is not None)
+ or curses.tigetstr("scp") is not None)):
+ return 1
+ except Exception,msg:
+ pass
+ return 0
+
+if terminal_has_colors():
+ def red_text(s): return '\x1b[31m%s\x1b[0m'%s
+ def green_text(s): return '\x1b[32m%s\x1b[0m'%s
+ def yellow_text(s): return '\x1b[33m%s\x1b[0m'%s
+ def blue_text(s): return '\x1b[34m%s\x1b[0m'%s
+ def cyan_text(s): return '\x1b[35m%s\x1b[0m'%s
+else:
+ def red_text(s): return s
+ def green_text(s): return s
+ def yellow_text(s): return s
+ def cyan_text(s): return s
+ def blue_text(s): return s
+
+#########################
+
+def cyg2win32(path):
+ if sys.platform=='cygwin' and path.startswith('/cygdrive'):
+ path = path[10] + ':' + os.path.normcase(path[11:])
+ return path
+
+#########################
+
+#XXX need support for .C that is also C++
+cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match
+fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z',re.I).match
+f90_ext_match = re.compile(r'.*[.](f90|f95)\Z',re.I).match
+f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)',re.I).match
+def _get_f90_modules(source):
+ """ Return a list of Fortran f90 module names that
+ given source file defines.
+ """
+ if not f90_ext_match(source):
+ return []
+ modules = []
+ f = open(source,'r')
+ f_readlines = getattr(f,'xreadlines',f.readlines)
+ for line in f_readlines():
+ m = f90_module_name_match(line)
+ if m:
+ name = m.group('name')
+ modules.append(name)
+ # break # XXX can we assume that there is one module per file?
+ f.close()
+ return modules
+
+def all_strings(lst):
+ """ Return True if all items in lst are string objects. """
+ for item in lst:
+ if type(item) is not types.StringType:
+ return False
+ return True
+
+def has_f_sources(sources):
+ """ Return True if sources contains Fortran files """
+ for source in sources:
+ if fortran_ext_match(source):
+ return True
+ return False
+
+def has_cxx_sources(sources):
+ """ Return True if sources contains C++ files """
+ for source in sources:
+ if cxx_ext_match(source):
+ return True
+ return False
+
+def filter_sources(sources):
+ """ Return four lists of filenames containing
+ C, C++, Fortran, and Fortran 90 module sources,
+ respectively.
+ """
+ c_sources = []
+ cxx_sources = []
+ f_sources = []
+ fmodule_sources = []
+ for source in sources:
+ if fortran_ext_match(source):
+ modules = _get_f90_modules(source)
+ if modules:
+ fmodule_sources.append(source)
+ else:
+ f_sources.append(source)
+ elif cxx_ext_match(source):
+ cxx_sources.append(source)
+ else:
+ c_sources.append(source)
+ return c_sources, cxx_sources, f_sources, fmodule_sources
+
+
+def _get_headers(directory_list):
+ # get *.h files from list of directories
+ headers = []
+ for dir in directory_list:
+ head = glob.glob(os.path.join(dir,"*.h")) #XXX: *.hpp files??
+ headers.extend(head)
+ return headers
+
+def _get_directories(list_of_sources):
+ # get unique directories from list of sources.
+ direcs = []
+ for file in list_of_sources:
+ dir = os.path.split(file)
+ if dir[0] != '' and not dir[0] in direcs:
+ direcs.append(dir[0])
+ return direcs
+
+def get_dependencies(sources):
+ #XXX scan sources for include statements
+ return _get_headers(_get_directories(sources))
+
+def is_local_src_dir(directory):
+ """ Return true if directory is local directory.
+ """
+ if type(directory) is not type(''):
+ return False
+ abs_dir = os.path.abspath(directory)
+ c = os.path.commonprefix([os.getcwd(),abs_dir])
+ new_dir = abs_dir[len(c):].split(os.sep)
+ if new_dir and not new_dir[0]:
+ new_dir = new_dir[1:]
+ if new_dir and new_dir[0]=='build':
+ return False
+ new_dir = os.sep.join(new_dir)
+ return os.path.isdir(new_dir)
+
+def _gsf_visit_func(filenames,dirname,names):
+ if os.path.basename(dirname) in ['CVS','.svn','build']:
+ names[:] = []
+ return
+ for name in names:
+ if name[-1] in "~#":
+ continue
+ fullname = os.path.join(dirname,name)
+ ext = os.path.splitext(fullname)[1]
+ if ext and ext in ['.pyc','.o']:
+ continue
+ if os.path.isfile(fullname):
+ filenames.append(fullname)
+
+def get_ext_source_files(ext):
+ # Get sources and any include files in the same directory.
+ filenames = []
+ sources = filter(lambda s:type(s) is types.StringType,ext.sources)
+ filenames.extend(sources)
+ filenames.extend(get_dependencies(sources))
+ for d in ext.depends:
+ if is_local_src_dir(d):
+ os.path.walk(d,_gsf_visit_func,filenames)
+ elif os.path.isfile(d):
+ filenames.append(d)
+ return filenames
+
+def get_script_files(scripts):
+ scripts = filter(lambda s:type(s) is types.StringType,scripts)
+ return scripts
+
+def get_lib_source_files(lib):
+ filenames = []
+ sources = lib[1].get('sources',[])
+ sources = filter(lambda s:type(s) is types.StringType,sources)
+ filenames.extend(sources)
+ filenames.extend(get_dependencies(sources))
+ depends = lib[1].get('depends',[])
+ for d in depends:
+ if is_local_src_dir(d):
+ os.path.walk(d,_gsf_visit_func,filenames)
+ elif os.path.isfile(d):
+ filenames.append(d)
+ return filenames
+
+def get_data_files(data):
+ if type(data) is types.StringType:
+ return [data]
+ sources = data[1]
+ filenames = []
+ for s in sources:
+ if callable(s):
+ s = s()
+ if s is None:
+ continue
+ if is_local_src_dir(s):
+ os.path.walk(s,_gsf_visit_func,filenames)
+ elif type(s) is type(''):
+ if os.path.isfile(s):
+ filenames.append(s)
+ else:
+ print 'Not existing data file:',s
+ else:
+ raise TypeError,`s`
+ return filenames
+
+def dot_join(*args):
+ return '.'.join(filter(None,args))
+
+def get_frame(level=0):
+ try:
+ return sys._getframe(level+1)
+ except AttributeError:
+ frame = sys.exc_info()[2].tb_frame
+ for i in range(level+1):
+ frame = frame.f_back
+ return frame
+
+######################
+
+class Configuration:
+
+ _list_keys = ['packages','ext_modules','data_files','include_dirs',
+ 'libraries','headers','scripts']
+ _dict_keys = ['package_dir']
+
+ scipy_include_dirs = []
+
+ def __init__(self,
+ package_name=None,
+ parent_name=None,
+ top_path=None,
+ package_path=None,
+ **attrs):
+ """ Construct configuration instance of a package.
+ """
+ self.name = dot_join(parent_name, package_name)
+
+ caller_frame = get_frame(1)
+ caller_name = eval('__name__',caller_frame.f_globals,caller_frame.f_locals)
+
+ self.local_path = get_path(caller_name, top_path)
+ if top_path is None:
+ top_path = self.local_path
+ if package_path is None:
+ package_path = self.local_path
+ elif os.path.isdir(os.path.join(self.local_path,package_path)):
+ package_path = os.path.join(self.local_path,package_path)
+ assert os.path.isdir(package_path),`package_path`
+ self.top_path = top_path
+
+ self.list_keys = copy.copy(self._list_keys)
+ self.dict_keys = copy.copy(self._dict_keys)
+
+ for n in self.list_keys:
+ setattr(self,n,copy.copy(attrs.get(n,[])))
+
+ for n in self.dict_keys:
+ setattr(self,n,copy.copy(attrs.get(n,{})))
+
+ known_keys = self.list_keys + self.dict_keys
+ self.extra_keys = []
+ for n in attrs.keys():
+ if n in known_keys:
+ continue
+ a = attrs[n]
+ setattr(self,n,a)
+ if type(a) is types.ListType:
+ self.list_keys.append(n)
+ elif type(a) is types.DictType:
+ self.dict_keys.append(n)
+ else:
+ self.extra_keys.append(n)
+
+ if os.path.exists(os.path.join(package_path,'__init__.py')):
+ self.packages.append(self.name)
+ self.package_dir[self.name] = package_path
+ return
+
+ def todict(self):
+ """ Return configuration distionary suitable for passing
+ to distutils.core.setup() function.
+ """
+ d = {}
+ for n in self.list_keys + self.dict_keys + self.extra_keys:
+ a = getattr(self,n)
+ if a:
+ d[n] = a
+ if self.name:
+ d['name'] = self.name
+ return d
+
+ def __dict__(self):
+ return self.todict()
+
+ def get_distribution(self):
+ import distutils.core
+ dist = distutils.core._setup_distribution
+ return dist
+
+ def get_subpackage(self,subpackage_name,subpackage_path=None):
+ """ Return subpackage configuration.
+ """
+ if subpackage_name is None:
+ assert subpackage_path is not None
+ subpackage_name = os.path.basename(subpackage_path)
+ assert '.' not in subpackage_name,`subpackage_name`
+ if subpackage_path is None:
+ subpackage_path = os.path.join(self.local_path,subpackage_name)
+ else:
+ subpackage_path = self._fix_paths([subpackage_path])[0]
+
+ setup_py = os.path.join(subpackage_path,'setup_%s.py' % (subpackage_name))
+ if not os.path.isfile(setup_py):
+ setup_py = os.path.join(subpackage_path,'setup.py')
+ if not os.path.isfile(setup_py):
+ print 'Assuming default configuration '\
+ '(%s/{setup_%s,setup}.py was not found)' \
+ % (os.path.dirname(setup_py),subpackage_name)
+ config = Configuration(subpackage_name,self.name,
+ self.top_path,subpackage_path)
+ else:
+ # In case setup_py imports local modules:
+ sys.path.insert(0,os.path.dirname(setup_py))
+ try:
+ info = (open(setup_py),setup_py,('.py','U',1))
+ setup_name = os.path.splitext(os.path.basename(setup_py))[0]
+ n = dot_join(self.name,setup_name)
+ setup_module = imp.load_module('_'.join(n.split('.')),*info)
+
+ if not hasattr(setup_module,'configuration'):
+ print 'Assuming default configuration '\
+ '(%s does not define configuration())' % (setup_module)
+ config = Configuration(subpackage_name,self.name,
+ self.top_path,subpackage_path)
+ else:
+ args = (self.name,)
+ if setup_module.configuration.func_code.co_argcount>1:
+ args = args + (self.top_path,)
+ config = setup_module.configuration(*args)
+
+ finally:
+ del sys.path[0]
+
+ return config
+
+ def add_subpackage(self,subpackage_name,subpackage_path=None):
+ """ Add subpackage to configuration.
+ """
+ config = self.get_subpackage(subpackage_name,subpackage_path)
+
+ if not config:
+ print 'No configuration returned, assuming unavailable.'
+ else:
+
+ if isinstance(config,Configuration):
+ print 'Appending %s configuration to %s' % (config.name,self.name)
+ self.dict_append(**config.todict())
+ else:
+ print 'Appending %s configuration to %s' % (config.get('name'),self.name)
+ self.dict_append(**config)
+
+ dist = self.get_distribution()
+ if dist is not None:
+ print 'distutils distribution has been initialized, it may be too late to add a subpackage', subpackage_name
+ return
+
+ def add_data_dir(self,data_path):
+ """ Recursively add files under data_path to data_files list.
+ Argument can be either
+ - 2-sequence (<datadir suffix>,<path to data directory>)
+ - path to data directory where python datadir suffix defaults
+ to package dir.
+ If path is not absolute then it's datadir suffix is
+ package dir + subdirname of the path.
+ """
+ if type(data_path) is type(()):
+ assert len(data_path)==2,`data_path`
+ d,data_path = data_path
+ else:
+ d = None
+ assert type(data_path) is type(''),`data_path`
+ for path in self.paths(data_path):
+ if not os.path.exists(path):
+ print 'Not existing data path',path
+ continue
+ filenames = []
+ os.path.walk(path, _gsf_visit_func,filenames)
+ if not os.path.isabs(path):
+ if d is None:
+ ds = os.path.join(*(self.name.split('.')+[data_path]))
+ else:
+ ds = os.path.join(d,data_path)
+ self.add_data_files((ds,filenames))
+ else:
+ if d is None:
+ self.add_data_files(*filenames)
+ else:
+ self.add_data_files((d,filenames))
+ return
+
+ def add_data_files(self,*files):
+ """ Add data files to configuration data_files.
+ Argument(s) can be either
+ - 2-sequence (<datadir prefix>,<path to data file(s)>)
+ - paths to data files where python datadir prefix defaults
+ to package dir.
+ If path is not absolute then it's datadir prefix is
+ package dir + dirname of the path.
+ """
+ data_dict = {}
+ new_files = []
+ for p in files:
+ if type(p) is not type(()):
+ d = os.path.join(*(self.name.split('.')))
+ if type(p) is type('') and not os.path.isabs(p):
+ d = appendpath(d,os.path.dirname(p))
+ p = (d,p)
+ new_files.append(p)
+ files = []
+ for prefix,filepattern in new_files:
+ if type(filepattern) is type(''):
+ file_list = self.paths(filepattern)
+ elif callable(filepattern):
+ file_list = [filepattern]
+ else:
+ file_list = self.paths(*filepattern)
+
+ nof_path_components = [len(f.split(os.sep)) \
+ for f in file_list if type(f) is type('')]
+ if nof_path_components:
+ min_path_components = min(nof_path_components)-1
+ else:
+ min_path_components = 0
+
+ for f in file_list:
+ if type(f) is type(''):
+ extra_path_components = f.split(os.sep)[min_path_components:-1]
+ p = os.path.join(*([prefix]+extra_path_components))
+ else:
+ p = prefix
+ if not data_dict.has_key(p):
+ data_dict[p] = [f]
+ else:
+ data_dict[p].append(f)
+
+ dist = self.get_distribution()
+ if dist is not None:
+ dist.data_files.extend(data_dict.items())
+ else:
+ self.data_files.extend(data_dict.items())
+ return
+
+ def add_include_dirs(self,*paths):
+ """ Add paths to configuration include directories.
+ """
+ include_dirs = self._fix_paths(paths)
+ dist = self.get_distribution()
+ if dist is not None:
+ dist.include_dirs.extend(include_dirs)
+ else:
+ self.include_dirs.extend(include_dirs)
+ return
+
+ def add_headers(self,*files):
+ """ Add installable headers to configuration.
+ Argument(s) can be either
+ - 2-sequence (<includedir suffix>,<path to header file(s)>)
+ - path(s) to header file(s) where python includedir suffix will default
+ to package name.
+ """
+ headers = []
+ for path in files:
+ if type(path) is type(''):
+ [headers.append((self.name,p)) for p in self.paths(path)]
+ else:
+ assert type(path) in [type(()),type([])] and len(path)==2,`path`
+ [headers.append((path[0],p)) for p in self.paths(path[1])]
+ dist = self.get_distribution()
+ if dist is not None:
+ dist.headers.extend(headers)
+ else:
+ self.headers.extend(headers)
+ return
+
+ def _fix_paths(self,paths):
+ new_paths = []
+ for n in paths:
+ if isinstance(n,str):
+ if '*' in n or '?' in n:
+ p = glob.glob(n)
+ p2 = glob.glob(os.path.join(self.local_path,n))
+ if p2:
+ new_paths.extend(p2)
+ elif p:
+ new_paths.extend(p)
+ else:
+ new_paths.append(n)
+ else:
+ n2 = os.path.join(self.local_path,n)
+ if os.path.exists(n2):
+ new_paths.append(n2)
+ else:
+ new_paths.append(n)
+ else:
+ new_paths.append(n)
+ return new_paths
+
+ def paths(self,*paths):
+ """ Apply glob to paths and prepend local_path if needed.
+ """
+ return self._fix_paths(paths)
+
+ def add_extension(self,name,sources,**kw):
+ """ Add extension to configuration.
+
+ Keywords:
+ include_dirs, define_macros, undef_macros,
+ library_dirs, libraries, runtime_library_dirs,
+ extra_objects, extra_compile_args, extra_link_args,
+ export_symbols, swig_opts, depends, language,
+ f2py_options, module_dirs
+ extra_info - dict or list of dict of keywords to be
+ appended to keywords.
+ """
+ ext_args = copy.copy(kw)
+ ext_args['name'] = dot_join(self.name,name)
+ ext_args['sources'] = sources
+
+ if ext_args.has_key('extra_info'):
+ extra_info = ext_args['extra_info']
+ del ext_args['extra_info']
+ if type(extra_info) is type({}):
+ extra_info = [extra_info]
+ for info in extra_info:
+ assert type(info) is type({}),`info`
+ dict_append(ext_args,**info)
+
+ for k in ext_args.keys():
+ v = ext_args[k]
+ if k in ['sources','depends','include_dirs','library_dirs',
+ 'module_dirs','extra_objects']:
+ new_v = self._fix_paths(v)
+ ext_args[k] = new_v
+
+ # Resolve out-of-tree dependencies
+ libraries = ext_args.get('libraries',[])
+ libnames = []
+ ext_args['libraries'] = []
+ for libname in libraries:
+ if '@' in libname:
+ lname,lpath = libname.split('@',1)
+ lpath = os.path.abspath(os.path.join(self.local_path,lpath))
+ if os.path.isdir(lpath):
+ c = self.get_subpackage(None,lpath)
+ if isinstance(c,Configuration):
+ c = c.todict()
+ for l in [l[0] for l in c.get('libraries',[])]:
+ llname = l.split('__OF__',1)[0]
+ if llname == lname:
+ c.pop('name',None)
+ dict_append(ext_args,**c)
+ break
+ continue
+ libnames.append(libname)
+
+ ext_args['libraries'] = libnames + ext_args['libraries']
+
+ from scipy.distutils.core import Extension
+ ext = Extension(**ext_args)
+ self.ext_modules.append(ext)
+
+ dist = self.get_distribution()
+ if dist is not None:
+ print 'distutils distribution has been initialized, it may be too late to add an extension', name
+ return ext
+
+ def add_library(self,name,sources,**build_info):
+ """ Add library to configuration.
+
+ Valid keywords for build_info:
+ depends
+ macros
+ include_dirs
+ extra_compiler_args
+ f2py_options
+ """
+ build_info = copy.copy(build_info)
+ name = name #+ '__OF__' + self.name
+ build_info['sources'] = sources
+
+ for k in build_info.keys():
+ v = build_info[k]
+ if k in ['sources','depends']:
+ new_v = self._fix_paths(v)
+ build_info[k] = new_v
+ self.libraries.append((name,build_info))
+
+ dist = self.get_distribution()
+ if dist is not None:
+ print 'distutils distribution has been initialized, it may be too late to add a library', name
+ return
+
+ def add_scripts(self,*files):
+ """ Add scripts to configuration.
+ """
+ scripts = self._fix_paths(files)
+ dist = self.get_distribution()
+ if dist is not None:
+ dist.scripts.extend(scripts)
+ else:
+ self.scripts.extend(scripts)
+ return
+
+ def dict_append(self,**dict):
+ for key in self.list_keys:
+ a = getattr(self,key)
+ a.extend(dict.get(key,[]))
+ for key in self.dict_keys:
+ a = getattr(self,key)
+ a.update(dict.get(key,{}))
+ known_keys = self.list_keys + self.dict_keys + self.extra_keys
+ for key in dict.keys():
+ if key not in known_keys and not hasattr(self,key):
+ print 'Inheriting attribute %r from %r' \
+ % (key,dict.get('name','?'))
+ setattr(self,key,dict[key])
+ self.extra_keys.append(key)
+ return
+
+ def __str__(self):
+ known_keys = self.list_keys + self.dict_keys + self.extra_keys
+ s = '<'+5*'-' + '\n'
+ s += 'Configuration of '+self.name+':\n'
+ for k in known_keys:
+ a = getattr(self,k,None)
+ if a:
+ s += '%s = %r\n' % (k,a)
+ s += 5*'-' + '>'
+ return s
+
+ def get_config_cmd(self):
+ cmd = get_cmd('config')
+ cmd.ensure_finalized()
+ cmd.dump_source = 0
+ cmd.noisy = 0
+ old_path = os.environ.get('PATH')
+ if old_path:
+ path = os.pathsep.join(['.',old_path])
+ os.environ['PATH'] = path
+ return cmd
+
+ def get_build_temp_dir(self):
+ cmd = get_cmd('build')
+ cmd.ensure_finalized()
+ return cmd.build_temp
+
+ def have_f77c(self):
+ """ Check for availability of Fortran 77 compiler.
+ Use it inside source generating function to ensure that
+ setup distribution instance has been initialized.
+ """
+ simple_fortran_subroutine = '''
+ subroutine simple
+ end
+ '''
+ config_cmd = self.get_config_cmd()
+ flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f77')
+ return flag
+
+ def have_f90c(self):
+ """ Check for availability of Fortran 90 compiler.
+ Use it inside source generating function to ensure that
+ setup distribution instance has been initialized.
+ """
+ simple_fortran_subroutine = '''
+ subroutine simple
+ end
+ '''
+ config_cmd = self.get_config_cmd()
+ flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f90')
+ return flag
+
+ def append_to(self, extlib):
+ """ Append libraries, include_dirs to extension or library item.
+ """
+ if type(extlib) is type(()):
+ lib_name, build_info = extlib
+ dict_append(build_info,
+ libraries=self.libraries,
+ include_dirs=self.include_dirs)
+ else:
+ from scipy.distutils.core import Extension
+ assert isinstance(extlib,Extension),`extlib`
+ extlib.libraries.extend(self.libraries)
+ extlib.include_dirs.extend(self.include_dirs)
+ return
+
+ def _get_svn_revision(self,path):
+ """ Return path's SVN revision number.
+ """
+ entries = os.path.join(path,'.svn','entries')
+ revision = None
+ if os.path.isfile(entries):
+ f = open(entries)
+ m = re.search(r'revision="(?P<revision>\d+)"',f.read())
+ f.close()
+ if m:
+ revision = int(m.group('revision'))
+ return revision
+
+ def get_version(self):
+ """ Try to get version string of a package.
+ """
+ version = getattr(self,'version',None)
+ if version is not None:
+ return version
+
+ # Get version from version file.
+ files = ['__version__.py',
+ self.name.split('.')[-1]+'_version.py',
+ 'version.py',
+ '__svn_version__.py']
+ version_vars = ['version',
+ '__version__',
+ self.name.split('.')[-1]+'_version']
+ for f in files:
+ fn = os.path.join(self.local_path,f)
+ if os.path.isfile(fn):
+ info = (open(fn),fn,('.py','U',1))
+ name = os.path.splitext(os.path.basename(fn))[0]
+ n = dot_join(self.name,name)
+ try:
+ version_module = imp.load_module('_'.join(n.split('.')),*info)
+ except ImportError,msg:
+ print msg
+ version_module = None
+ if version_module is None:
+ continue
+
+ for a in version_vars:
+ version = getattr(version_module,a,None)
+ if version is not None:
+ break
+ if version is not None:
+ break
+
+ if version is not None:
+ self.version = version
+ return version
+
+ # Get version as SVN revision number
+ revision = self._get_svn_revision(self.local_path)
+ if revision is not None:
+ version = str(revision)
+ self.version = version
+
+ return version
+
+ def make_svn_version_py(self):
+ """ Generate package __svn_version__.py file from SVN revision number,
+ it will be removed after python exits but will be available
+ when sdist, etc commands are executed.
+
+ If __svn_version__.py existed before, nothing is done.
+ """
+ target = os.path.join(self.local_path,'__svn_version__.py')
+ if os.path.isfile(target):
+ return
+
+ def generate_svn_version_py():
+ if not os.path.isfile(target):
+ revision = self._get_svn_revision(self.local_path)
+ assert revision is not None,'hmm, why I am not inside SVN tree???'
+ version = str(revision)
+ print 'Creating %s (version=%r)' % (target,version)
+ f = open(target,'w')
+ f.write('version = %r\n' % (version))
+ f.close()
+
+ import atexit
+ def rm_file(f=target):
+ try: os.remove(f); print 'removed',f
+ except OSError: pass
+ try: os.remove(f+'c'); print 'removed',f+'c'
+ except OSError: pass
+ atexit.register(rm_file)
+
+ return target
+
+ d = os.path.join(*(self.name.split('.')))
+ self.add_data_files((d,generate_svn_version_py()))
+ return
+
+ def make_config_py(self,name='__config__'):
+ """ Generate package __config__.py file containing system_info
+ information used during building the package.
+ """
+ self.add_extension(name,[generate_config_py])
+ return
+
+def get_cmd(cmdname,_cache={}):
+ if not _cache.has_key(cmdname):
+ import distutils.core
+ dist = distutils.core._setup_distribution
+ if dist is None:
+ from distutils.errors import DistutilsInternalError
+ raise DistutilsInternalError,\
+ 'setup distribution instance not initialized'
+ cmd = dist.get_command_obj(cmdname)
+ _cache[cmdname] = cmd
+ return _cache[cmdname]
+
+def get_scipy_include_dirs():
+ # scipy_include_dirs are set by scipy/base/setup.py, otherwise []
+ include_dirs = Configuration.scipy_include_dirs[:]
+ if not include_dirs:
+ import scipy
+ if scipy.show_core_config is None:
+ # running from scipy_core source directory
+ include_dirs.append(os.path.join(os.path.dirname(scipy.__file__),
+ 'base','include'))
+ else:
+ # using installed scipy core headers
+ import scipy.base as base
+ include_dirs.append(os.path.join(os.path.dirname(base.__file__),'include'))
+ # else running scipy/base/setup.py
+ return include_dirs
+
+#########################
+
+def default_config_dict(name = None, parent_name = None, local_path=None):
+ """ Return a configuration dictionary for usage in
+ configuration() function defined in file setup_<name>.py.
+ """
+ import warnings
+ warnings.warn('Use Configuration(%s,%s,top_path=%s) instead of '\
+ 'deprecated default_config_dict(%s,%s,%s)' \
+ % (`name`,`parent_name`,`local_path`,
+ `name`,`parent_name`,`local_path`,
+ ))
+ c = Configuration(name, parent_name, local_path)
+ return c.todict()
+
+
+def dict_append(d,**kws):
+ for k,v in kws.items():
+ if d.has_key(k):
+ d[k].extend(v)
+ else:
+ d[k] = v
+
+def appendpath(prefix,path):
+ if not ('/' == os.path.sep):
+ prefix = prefix.replace('/',os.path.sep)
+ path = path.replace('/',os.path.sep)
+ drive = ''
+ if os.path.isabs(path):
+ drive = os.path.splitdrive(prefix)[0]
+ absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
+ pathdrive,path = os.path.splitdrive(path)
+ d = os.path.commonprefix([absprefix,path])
+ if os.path.join(absprefix[:len(d)],absprefix[len(d):])!=absprefix \
+ or os.path.join(path[:len(d)],path[len(d):])!=path:
+ # Handle invalid paths
+ d = os.path.dirname(d)
+ subpath = path[len(d):]
+ if os.path.isabs(subpath):
+ subpath = subpath[1:]
+ else:
+ subpath = path
+ return os.path.normpath(os.path.join(drive + prefix, subpath))
+
+def generate_config_py(extension, build_dir):
+ """ Generate <package>/config.py file containing system_info
+ information used during building the package.
+
+ Usage:\
+ ext = Extension(dot_join(config['name'],'config'),
+ sources=[generate_config_py])
+ config['ext_modules'].append(ext)
+ """
+ from scipy.distutils.system_info import system_info
+ from distutils.dir_util import mkpath
+ target = os.path.join(*([build_dir]+extension.name.split('.'))) + '.py'
+ mkpath(os.path.dirname(target))
+ f = open(target,'w')
+ f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
+ f.write('# It contains system_info results at the time of building this package.\n')
+ f.write('__all__ = ["get_info","show"]\n\n')
+ for k,i in system_info.saved_results.items():
+ f.write('%s=%r\n' % (k,i))
+ f.write('\ndef get_info(name): g=globals(); return g.get(name,g.get(name+"_info",{}))\n')
+ f.write('''
+def show():
+ for name,info_dict in globals().items():
+ if name[0]=="_" or type(info_dict) is not type({}): continue
+ print name+":"
+ if not info_dict:
+ print " NOT AVAILABLE"
+ for k,v in info_dict.items():
+ v = str(v)
+ if k==\'sources\' and len(v)>200: v = v[:60]+\' ...\\n... \'+v[-60:]
+ print \' %s = %s\'%(k,v)
+ print
+ return
+ ''')
+
+ f.close()
+ return target
+
+def generate_svn_version_py(extension, build_dir):
+ """ Generate __svn_version__.py file containing SVN
+ revision number of a module.
+
+ To use, add the following codelet to setup
+ configuration(..) function
+
+ ext = Extension(dot_join(config['name'],'__svn_version__'),
+ sources=[generate_svn_version_py])
+ ext.local_path = local_path
+ config['ext_modules'].append(ext)
+
+ """
+ from distutils import dep_util
+ local_path = extension.local_path
+ target = os.path.join(build_dir, '__svn_version__.py')
+ entries = os.path.join(local_path,'.svn','entries')
+ if os.path.isfile(entries):
+ if not dep_util.newer(entries, target):
+ return target
+ elif os.path.isfile(target):
+ return target
+
+ revision = get_svn_revision(local_path)
+ f = open(target,'w')
+ f.write('revision=%s\n' % (revision))
+ f.close()
+ return target
diff --git a/numpy/distutils/setup.py b/numpy/distutils/setup.py
new file mode 100644
index 000000000..8fe472eaf
--- /dev/null
+++ b/numpy/distutils/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+from scipy.distutils.core import setup
+from scipy.distutils.misc_util import Configuration
+
+def configuration(parent_package='',top_path=None):
+ config = Configuration('distutils',parent_package,top_path)
+ config.add_subpackage('command')
+ config.add_subpackage('fcompiler')
+ config.add_data_dir('tests')
+ config.make_config_py()
+ return config.todict()
+
+if __name__ == '__main__':
+ setup(**configuration(top_path=''))
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
new file mode 100644
index 000000000..f0cfff1d0
--- /dev/null
+++ b/numpy/distutils/system_info.py
@@ -0,0 +1,1644 @@
+#!/usr/bin/env python
+"""
+This file defines a set of system_info classes for getting
+information about various resources (libraries, library directories,
+include directories, etc.) in the system. Currently, the following
+classes are available:
+
+ atlas_info
+ atlas_threads_info
+ atlas_blas_info
+ atlas_blas_threads_info
+ lapack_atlas_info
+ blas_info
+ lapack_info
+ blas_opt_info # usage recommended
+ lapack_opt_info # usage recommended
+ fftw_info,dfftw_info,sfftw_info
+ fftw_threads_info,dfftw_threads_info,sfftw_threads_info
+ djbfft_info
+ x11_info
+ lapack_src_info
+ blas_src_info
+ numpy_info
+ numarray_info
+ scipy_info
+ boost_python_info
+ agg2_info
+ wx_info
+ gdk_pixbuf_xlib_2_info
+ gdk_pixbuf_2_info
+ gdk_x11_2_info
+ gtkp_x11_2_info
+ gtkp_2_info
+ xft_info
+ freetype2_info
+
+Usage:
+ info_dict = get_info(<name>)
+ where <name> is a string 'atlas','x11','fftw','lapack','blas',
+ 'lapack_src', 'blas_src', etc. For a complete list of allowed names,
+ see the definition of get_info() function below.
+
+ Returned info_dict is a dictionary which is compatible with
+ distutils.setup keyword arguments. If info_dict == {}, then the
+ asked resource is not available (system_info could not find it).
+
+ Several *_info classes specify an environment variable to specify
+ the locations of software. When setting the corresponding environment
+ variable to 'None' then the software will be ignored, even when it
+ is available in system.
+
+Global parameters:
+ system_info.search_static_first - search static libraries (.a)
+ in precedence to shared ones (.so, .sl) if enabled.
+ system_info.verbosity - output the results to stdout if enabled.
+
+The file 'site.cfg' in the same directory as this module is read
+for configuration options. The format is that used by ConfigParser (i.e.,
+Windows .INI style). The section DEFAULT has options that are the default
+for each section. The available sections are fftw, atlas, and x11. Appropiate
+defaults are used if nothing is specified.
+
+The order of finding the locations of resources is the following:
+ 1. environment variable
+ 2. section in site.cfg
+ 3. DEFAULT section in site.cfg
+Only the first complete match is returned.
+
+Example:
+----------
+[DEFAULT]
+library_dirs = /usr/lib:/usr/local/lib:/opt/lib
+include_dirs = /usr/include:/usr/local/include:/opt/include
+src_dirs = /usr/local/src:/opt/src
+# search static libraries (.a) in preference to shared ones (.so)
+search_static_first = 0
+
+[fftw]
+fftw_libs = rfftw, fftw
+fftw_opt_libs = rfftw_threaded, fftw_threaded
+# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs
+
+[atlas]
+library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
+# for overriding the names of the atlas libraries
+atlas_libs = lapack, f77blas, cblas, atlas
+
+[x11]
+library_dirs = /usr/X11R6/lib
+include_dirs = /usr/X11R6/include
+----------
+
+Authors:
+ Pearu Peterson <pearu@cens.ioc.ee>, February 2002
+ David M. Cooke <cookedm@physics.mcmaster.ca>, April 2002
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@cens.ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the SciPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+"""
+
+__revision__ = '$Id: system_info.py,v 1.1 2005/04/09 19:29:35 pearu Exp $'
+import sys,os,re,types
+import warnings
+from distutils.errors import DistutilsError
+from glob import glob
+import ConfigParser
+from exec_command import find_executable, exec_command, get_pythonexe
+
+from distutils.sysconfig import get_config_vars
+
+if sys.platform == 'win32':
+ default_lib_dirs = ['C:\\'] # probably not very helpful...
+ default_include_dirs = []
+ default_src_dirs = ['.']
+ default_x11_lib_dirs = []
+ default_x11_include_dirs = []
+ default_intel_dirs = []
+else:
+ default_lib_dirs = ['/usr/local/lib', '/opt/lib', '/usr/lib',
+ '/sw/lib']
+ default_include_dirs = ['/usr/local/include',
+ '/opt/include', '/usr/include',
+ '/sw/include']
+ default_src_dirs = ['.','/usr/local/src', '/opt/src','/sw/src']
+ default_x11_lib_dirs = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
+ default_x11_include_dirs = ['/usr/X11R6/include','/usr/X11/include',
+ '/usr/include']
+ default_intel_dirs = [os.environ.get('HOME','.'),
+ '/opt/intel','/usr/local/intel']
+
+if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
+ default_lib_dirs.insert(0,os.path.join(sys.prefix, 'lib'))
+ default_include_dirs.append(os.path.join(sys.prefix, 'include'))
+ default_src_dirs.append(os.path.join(sys.prefix, 'src'))
+
+default_lib_dirs = filter(os.path.isdir, default_lib_dirs)
+default_include_dirs = filter(os.path.isdir, default_include_dirs)
+default_src_dirs = filter(os.path.isdir, default_src_dirs)
+
+so_ext = get_config_vars('SO')[0] or ''
+
+def get_info(name,notfound_action=0):
+ """
+ notfound_action:
+ 0 - do nothing
+ 1 - display warning message
+ 2 - raise error
+ """
+ cl = {'atlas':atlas_info, # use lapack_opt or blas_opt instead
+ 'atlas_threads':atlas_threads_info, # ditto
+ 'atlas_blas':atlas_blas_info,
+ 'atlas_blas_threads':atlas_blas_threads_info,
+ 'lapack_atlas':lapack_atlas_info, # use lapack_opt instead
+ 'lapack_atlas_threads':lapack_atlas_threads_info, # ditto
+ 'mkl':mkl_info,
+ 'lapack_mkl':lapack_mkl_info, # use lapack_opt instead
+ 'blas_mkl':blas_mkl_info, # use blas_opt instead
+ 'x11':x11_info,
+ 'fftw':fftw_info,
+ 'dfftw':dfftw_info,
+ 'sfftw':sfftw_info,
+ 'fftw_threads':fftw_threads_info,
+ 'dfftw_threads':dfftw_threads_info,
+ 'sfftw_threads':sfftw_threads_info,
+ 'djbfft':djbfft_info,
+ 'blas':blas_info, # use blas_opt instead
+ 'lapack':lapack_info, # use lapack_opt instead
+ 'lapack_src':lapack_src_info,
+ 'blas_src':blas_src_info,
+ 'numpy':numpy_info,
+ 'numeric':numpy_info, # alias to numpy, for build_ext --backends support
+ 'numarray':numarray_info,
+ 'scipy':scipy_info,
+ 'lapack_opt':lapack_opt_info,
+ 'blas_opt':blas_opt_info,
+ 'boost_python':boost_python_info,
+ 'agg2':agg2_info,
+ 'wx':wx_info,
+ 'gdk_pixbuf_xlib_2':gdk_pixbuf_xlib_2_info,
+ 'gdk-pixbuf-xlib-2.0':gdk_pixbuf_xlib_2_info,
+ 'gdk_pixbuf_2':gdk_pixbuf_2_info,
+ 'gdk-pixbuf-2.0':gdk_pixbuf_2_info,
+ 'gdk':gdk_info,
+ 'gdk_2':gdk_2_info,
+ 'gdk-2.0':gdk_2_info,
+ 'gdk_x11_2':gdk_x11_2_info,
+ 'gdk-x11-2.0':gdk_x11_2_info,
+ 'gtkp_x11_2':gtkp_x11_2_info,
+ 'gtk+-x11-2.0':gtkp_x11_2_info,
+ 'gtkp_2':gtkp_2_info,
+ 'gtk+-2.0':gtkp_2_info,
+ 'xft':xft_info,
+ 'freetype2':freetype2_info,
+ }.get(name.lower(),system_info)
+ return cl().get_info(notfound_action)
+
+class NotFoundError(DistutilsError):
+ """Some third-party program or library is not found."""
+
+class AtlasNotFoundError(NotFoundError):
+ """
+ Atlas (http://math-atlas.sourceforge.net/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ scipy_distutils/site.cfg file (section [atlas]) or by setting
+ the ATLAS environment variable."""
+
+class LapackNotFoundError(NotFoundError):
+ """
+ Lapack (http://www.netlib.org/lapack/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ scipy_distutils/site.cfg file (section [lapack]) or by setting
+ the LAPACK environment variable."""
+
+class LapackSrcNotFoundError(LapackNotFoundError):
+ """
+ Lapack (http://www.netlib.org/lapack/) sources not found.
+ Directories to search for the sources can be specified in the
+ scipy_distutils/site.cfg file (section [lapack_src]) or by setting
+ the LAPACK_SRC environment variable."""
+
+class BlasNotFoundError(NotFoundError):
+ """
+ Blas (http://www.netlib.org/blas/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ scipy_distutils/site.cfg file (section [blas]) or by setting
+ the BLAS environment variable."""
+
+class BlasSrcNotFoundError(BlasNotFoundError):
+ """
+ Blas (http://www.netlib.org/blas/) sources not found.
+ Directories to search for the sources can be specified in the
+ scipy_distutils/site.cfg file (section [blas_src]) or by setting
+ the BLAS_SRC environment variable."""
+
+class FFTWNotFoundError(NotFoundError):
+ """
+ FFTW (http://www.fftw.org/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ scipy_distutils/site.cfg file (section [fftw]) or by setting
+ the FFTW environment variable."""
+
+class DJBFFTNotFoundError(NotFoundError):
+ """
+ DJBFFT (http://cr.yp.to/djbfft.html) libraries not found.
+ Directories to search for the libraries can be specified in the
+ scipy_distutils/site.cfg file (section [djbfft]) or by setting
+ the DJBFFT environment variable."""
+
+class F2pyNotFoundError(NotFoundError):
+ """
+ f2py2e (http://cens.ioc.ee/projects/f2py2e/) module not found.
+ Get it from above location, install it, and retry setup.py."""
+
+class NumericNotFoundError(NotFoundError):
+ """
+ Numeric (http://www.numpy.org/) module not found.
+ Get it from above location, install it, and retry setup.py."""
+
+class X11NotFoundError(NotFoundError):
+ """X11 libraries not found."""
+
+class system_info:
+
+ """ get_info() is the only public method. Don't use others.
+ """
+ section = 'DEFAULT'
+ dir_env_var = None
+ search_static_first = 0 # XXX: disabled by default, may disappear in
+ # future unless it is proved to be useful.
+ verbosity = 1
+ saved_results = {}
+
+ notfounderror = NotFoundError
+
+ def __init__ (self,
+ default_lib_dirs=default_lib_dirs,
+ default_include_dirs=default_include_dirs,
+ verbosity = 1,
+ ):
+ self.__class__.info = {}
+ self.local_prefixes = []
+ defaults = {}
+ defaults['libraries'] = ''
+ defaults['library_dirs'] = os.pathsep.join(default_lib_dirs)
+ defaults['include_dirs'] = os.pathsep.join(default_include_dirs)
+ defaults['src_dirs'] = os.pathsep.join(default_src_dirs)
+ defaults['search_static_first'] = str(self.search_static_first)
+ self.cp = ConfigParser.ConfigParser(defaults)
+ try:
+ f = __file__
+ except NameError,msg:
+ f = sys.argv[0]
+ cf = os.path.join(os.path.split(os.path.abspath(f))[0],
+ 'site.cfg')
+ self.cp.read([cf])
+ if not self.cp.has_section(self.section):
+ self.cp.add_section(self.section)
+ self.search_static_first = self.cp.getboolean(self.section,
+ 'search_static_first')
+ assert isinstance(self.search_static_first, type(0))
+
+ def calc_libraries_info(self):
+ libs = self.get_libraries()
+ dirs = self.get_lib_dirs()
+ info = {}
+ for lib in libs:
+ i = None
+ for d in dirs:
+ i = self.check_libs(d,[lib])
+ if i is not None:
+ break
+ if i is not None:
+ dict_append(info,**i)
+ else:
+ print 'Library %s was not found. Ignoring' % (lib)
+ return info
+
+ def set_info(self,**info):
+ if info:
+ lib_info = self.calc_libraries_info()
+ dict_append(info,**lib_info)
+ self.saved_results[self.__class__.__name__] = info
+
+ def has_info(self):
+ return self.saved_results.has_key(self.__class__.__name__)
+
+ def get_info(self,notfound_action=0):
+ """ Return a dictonary with items that are compatible
+ with scipy_distutils.setup keyword arguments.
+ """
+ flag = 0
+ if not self.has_info():
+ flag = 1
+ if self.verbosity>0:
+ print self.__class__.__name__ + ':'
+ if hasattr(self, 'calc_info'):
+ self.calc_info()
+ if notfound_action:
+ if not self.has_info():
+ if notfound_action==1:
+ warnings.warn(self.notfounderror.__doc__)
+ elif notfound_action==2:
+ raise self.notfounderror,self.notfounderror.__doc__
+ else:
+ raise ValueError,`notfound_action`
+
+ if self.verbosity>0:
+ if not self.has_info():
+ print ' NOT AVAILABLE'
+ self.set_info()
+ else:
+ print ' FOUND:'
+
+ res = self.saved_results.get(self.__class__.__name__)
+ if self.verbosity>0 and flag:
+ for k,v in res.items():
+ v = str(v)
+ if k=='sources' and len(v)>200: v = v[:60]+' ...\n... '+v[-60:]
+ print ' %s = %s'%(k,v)
+ print
+
+ return res
+
+ def get_paths(self, section, key):
+ dirs = self.cp.get(section, key).split(os.pathsep)
+ env_var = self.dir_env_var
+ if env_var:
+ if type(env_var) is type([]):
+ e0 = env_var[-1]
+ for e in env_var:
+ if os.environ.has_key(e):
+ e0 = e
+ break
+ if not env_var[0]==e0:
+ print 'Setting %s=%s' % (env_var[0],e0)
+ env_var = e0
+ if env_var and os.environ.has_key(env_var):
+ d = os.environ[env_var]
+ if d=='None':
+ print 'Disabled',self.__class__.__name__,'(%s is None)' \
+ % (self.dir_env_var)
+ return []
+ if os.path.isfile(d):
+ dirs = [os.path.dirname(d)] + dirs
+ l = getattr(self,'_lib_names',[])
+ if len(l)==1:
+ b = os.path.basename(d)
+ b = os.path.splitext(b)[0]
+ if b[:3]=='lib':
+ print 'Replacing _lib_names[0]==%r with %r' \
+ % (self._lib_names[0], b[3:])
+ self._lib_names[0] = b[3:]
+ else:
+ ds = d.split(os.pathsep)
+ ds2 = []
+ for d in ds:
+ if os.path.isdir(d):
+ ds2.append(d)
+ for dd in ['include','lib']:
+ d1 = os.path.join(d,dd)
+ if os.path.isdir(d1):
+ ds2.append(d1)
+ dirs = ds2 + dirs
+ default_dirs = self.cp.get('DEFAULT', key).split(os.pathsep)
+ dirs.extend(default_dirs)
+ ret = []
+ [ret.append(d) for d in dirs if os.path.isdir(d) and d not in ret]
+ if self.verbosity>1:
+ print '(',key,'=',':'.join(ret),')'
+ return ret
+
+ def get_lib_dirs(self, key='library_dirs'):
+ return self.get_paths(self.section, key)
+
+ def get_include_dirs(self, key='include_dirs'):
+ return self.get_paths(self.section, key)
+
+ def get_src_dirs(self, key='src_dirs'):
+ return self.get_paths(self.section, key)
+
+ def get_libs(self, key, default):
+ try:
+ libs = self.cp.get(self.section, key)
+ except ConfigParser.NoOptionError:
+ if not default:
+ return []
+ if type(default) is type(''):
+ return [default]
+ return default
+ return [b for b in [a.strip() for a in libs.split(',')] if b]
+
+ def get_libraries(self, key='libraries'):
+ return self.get_libs(key,'')
+
+ def check_libs(self,lib_dir,libs,opt_libs =[]):
+ """ If static or shared libraries are available then return
+ their info dictionary. """
+ if self.search_static_first:
+ exts = ['.a',so_ext]
+ else:
+ exts = [so_ext,'.a']
+ if sys.platform=='cygwin':
+ exts.append('.dll.a')
+ for ext in exts:
+ info = self._check_libs(lib_dir,libs,opt_libs,[ext])
+ if info is not None: return info
+ return
+
+ def check_libs2(self,lib_dir,libs,opt_libs =[]):
+ """ If static or shared libraries are available then return
+ their info dictionary. """
+ if self.search_static_first:
+ exts = ['.a',so_ext]
+ else:
+ exts = [so_ext,'.a']
+ if sys.platform=='cygwin':
+ exts.append('.dll.a')
+ info = self._check_libs(lib_dir,libs,opt_libs,exts)
+ if info is not None: return info
+ return
+
+ def _lib_list(self, lib_dir, libs, exts):
+ assert type(lib_dir) is type('')
+ liblist = []
+ for l in libs:
+ for ext in exts:
+ p = self.combine_paths(lib_dir, 'lib'+l+ext)
+ if p:
+ assert len(p)==1
+ liblist.append(p[0])
+ break
+ return liblist
+
+ def _extract_lib_names(self,libs):
+ return [os.path.splitext(os.path.basename(p))[0][3:] \
+ for p in libs]
+
+ def _check_libs(self,lib_dir,libs, opt_libs, exts):
+ found_libs = self._lib_list(lib_dir, libs, exts)
+ if len(found_libs) == len(libs):
+ found_libs = self._extract_lib_names(found_libs)
+ info = {'libraries' : found_libs, 'library_dirs' : [lib_dir]}
+ opt_found_libs = self._lib_list(lib_dir, opt_libs, exts)
+ if len(opt_found_libs) == len(opt_libs):
+ opt_found_libs = self._extract_lib_names(opt_found_libs)
+ info['libraries'].extend(opt_found_libs)
+ return info
+
+ def combine_paths(self,*args):
+ return combine_paths(*args,**{'verbosity':self.verbosity})
+
+
+class fftw_info(system_info):
+ #variables to override
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ notfounderror = FFTWNotFoundError
+ ver_info = [ { 'name':'fftw3',
+ 'libs':['fftw3'],
+ 'includes':['fftw3.h'],
+ 'macros':[('SCIPY_FFTW3_H',None)]},
+ { 'name':'fftw2',
+ 'libs':['rfftw', 'fftw'],
+ 'includes':['fftw.h','rfftw.h'],
+ 'macros':[('SCIPY_FFTW_H',None)]}]
+
+ def __init__(self):
+ system_info.__init__(self)
+
+ def calc_ver_info(self,ver_param):
+ """Returns True on successful version detection, else False"""
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+ incl_dir = None
+ libs = self.get_libs(self.section+'_libs', ver_param['libs'])
+ info = None
+ for d in lib_dirs:
+ r = self.check_libs(d,libs)
+ if r is not None:
+ info = r
+ break
+ if info is not None:
+ flag = 0
+ for d in incl_dirs:
+ if len(self.combine_paths(d,ver_param['includes']))==len(ver_param['includes']):
+ dict_append(info,include_dirs=[d])
+ flag = 1
+ incl_dirs = [d]
+ incl_dir = d
+ break
+ if flag:
+ dict_append(info,define_macros=ver_param['macros'])
+ else:
+ info = None
+ if info is not None:
+ self.set_info(**info)
+ return True
+ else:
+ if self.verbosity>0:
+ print ' %s not found' % (ver_param['name'])
+ return False
+
+ def calc_info(self):
+ for i in self.ver_info:
+ if self.calc_ver_info(i):
+ break
+
+class dfftw_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [ { 'name':'dfftw',
+ 'libs':['drfftw','dfftw'],
+ 'includes':['dfftw.h','drfftw.h'],
+ 'macros':[('SCIPY_DFFTW_H',None)]} ]
+
+class sfftw_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [ { 'name':'sfftw',
+ 'libs':['srfftw','sfftw'],
+ 'includes':['sfftw.h','srfftw.h'],
+ 'macros':[('SCIPY_SFFTW_H',None)]} ]
+
+class fftw_threads_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [ { 'name':'fftw threads',
+ 'libs':['rfftw_threads','fftw_threads'],
+ 'includes':['fftw_threads.h','rfftw_threads.h'],
+ 'macros':[('SCIPY_FFTW_THREADS_H',None)]} ]
+
+class dfftw_threads_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [ { 'name':'dfftw threads',
+ 'libs':['drfftw_threads','dfftw_threads'],
+ 'includes':['dfftw_threads.h','drfftw_threads.h'],
+ 'macros':[('SCIPY_DFFTW_THREADS_H',None)]} ]
+
+class sfftw_threads_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [ { 'name':'sfftw threads',
+ 'libs':['srfftw_threads','sfftw_threads'],
+ 'includes':['sfftw_threads.h','srfftw_threads.h'],
+ 'macros':[('SCIPY_SFFTW_THREADS_H',None)]} ]
+
+class djbfft_info(system_info):
+ section = 'djbfft'
+ dir_env_var = 'DJBFFT'
+ notfounderror = DJBFFTNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend(self.combine_paths(d,['djbfft'])+[d])
+ return [ d for d in dirs if os.path.isdir(d) ]
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+ info = None
+ for d in lib_dirs:
+ p = self.combine_paths (d,['djbfft.a'])
+ if p:
+ info = {'extra_objects':p}
+ break
+ p = self.combine_paths (d,['libdjbfft.a'])
+ if p:
+ info = {'libraries':['djbfft'],'library_dirs':[d]}
+ break
+ if info is None:
+ return
+ for d in incl_dirs:
+ if len(self.combine_paths(d,['fftc8.h','fftfreq.h']))==2:
+ dict_append(info,include_dirs=[d],
+ define_macros=[('SCIPY_DJBFFT_H',None)])
+ self.set_info(**info)
+ return
+ return
+
+class mkl_info(system_info):
+ section = 'mkl'
+ dir_env_var = 'MKL'
+ _lib_mkl = ['mkl','vml','guide']
+
+ def get_mkl_rootdir(self):
+ mklroot = os.environ.get('MKLROOT',None)
+ if mklroot is not None:
+ return mklroot
+ for d in default_intel_dirs:
+ dirs = glob(os.path.join(d,'mkl','*')) + glob(os.path.join(d,'mkl*'))
+ for d in dirs:
+ if os.path.isdir(os.path.join(d,'lib')):
+ return d
+ return None
+
+ def __init__(self):
+ mklroot = self.get_mkl_rootdir()
+ if mklroot is None:
+ system_info.__init__(self)
+ else:
+ from cpuinfo import cpu
+ if cpu.is_Itanium():
+ plt = '64'
+ l = 'mkl_ipf'
+ elif cpu.is_Xeon():
+ plt = 'em64t'
+ l = 'mkl_em64t'
+ else:
+ plt = '32'
+ l = 'mkl_ia32'
+ if l not in self._lib_mkl:
+ self._lib_mkl.insert(0,l)
+ system_info.__init__(self,
+ default_lib_dirs=[os.path.join(mklroot,'lib',plt)],
+ default_include_dirs=[os.path.join(mklroot,'include')])
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+ mkl_libs = self.get_libs('mkl_libs',self._lib_mkl)
+ mkl = None
+ for d in lib_dirs:
+ mkl = self.check_libs2(d,mkl_libs)
+ if mkl is not None:
+ break
+ if mkl is None:
+ return
+ info = {}
+ dict_append(info,**mkl)
+ dict_append(info,libraries = ['pthread'], include_dirs = incl_dirs)
+ self.set_info(**info)
+
+class lapack_mkl_info(mkl_info):
+
+ def calc_info(self):
+ mkl = get_info('mkl')
+ if not mkl:
+ return
+ lapack_libs = self.get_libs('lapack_libs',['mkl_lapack'])
+ info = {'libraries': lapack_libs}
+ dict_append(info,**mkl)
+ self.set_info(**info)
+
+class blas_mkl_info(mkl_info):
+ pass
+
+class atlas_info(system_info):
+ section = 'atlas'
+ dir_env_var = 'ATLAS'
+ _lib_names = ['f77blas','cblas']
+ if sys.platform[:7]=='freebsd':
+ _lib_atlas = ['atlas_r']
+ _lib_lapack = ['alapack_r']
+ else:
+ _lib_atlas = ['atlas']
+ _lib_lapack = ['lapack']
+
+ notfounderror = AtlasNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend(self.combine_paths(d,['atlas*','ATLAS*',
+ 'sse','3dnow','sse2'])+[d])
+ return [ d for d in dirs if os.path.isdir(d) ]
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ info = {}
+ atlas_libs = self.get_libs('atlas_libs',
+ self._lib_names + self._lib_atlas)
+ lapack_libs = self.get_libs('lapack_libs',self._lib_lapack)
+ atlas = None
+ lapack = None
+ atlas_1 = None
+ for d in lib_dirs:
+ atlas = self.check_libs(d,atlas_libs,[])
+ lapack_atlas = self.check_libs(d,['lapack_atlas'],[])
+ if atlas is not None:
+ lib_dirs2 = self.combine_paths(d,['atlas*','ATLAS*'])+[d]
+ for d2 in lib_dirs2:
+ lapack = self.check_libs(d2,lapack_libs,[])
+ if lapack is not None:
+ break
+ else:
+ lapack = None
+ if lapack is not None:
+ break
+ if atlas:
+ atlas_1 = atlas
+ print self.__class__
+ if atlas is None:
+ atlas = atlas_1
+ if atlas is None:
+ return
+ include_dirs = self.get_include_dirs()
+ h = (self.combine_paths(lib_dirs+include_dirs,'cblas.h') or [None])[0]
+ if h:
+ h = os.path.dirname(h)
+ dict_append(info,include_dirs=[h])
+ info['language'] = 'c'
+ if lapack is not None:
+ dict_append(info,**lapack)
+ dict_append(info,**atlas)
+ elif 'lapack_atlas' in atlas['libraries']:
+ dict_append(info,**atlas)
+ dict_append(info,define_macros=[('ATLAS_WITH_LAPACK_ATLAS',None)])
+ self.set_info(**info)
+ return
+ else:
+ dict_append(info,**atlas)
+ dict_append(info,define_macros=[('ATLAS_WITHOUT_LAPACK',None)])
+ message = """
+*********************************************************************
+ Could not find lapack library within the ATLAS installation.
+*********************************************************************
+"""
+ warnings.warn(message)
+ self.set_info(**info)
+ return
+
+ # Check if lapack library is complete, only warn if it is not.
+ lapack_dir = lapack['library_dirs'][0]
+ lapack_name = lapack['libraries'][0]
+ lapack_lib = None
+ for e in ['.a',so_ext]:
+ fn = os.path.join(lapack_dir,'lib'+lapack_name+e)
+ if os.path.exists(fn):
+ lapack_lib = fn
+ break
+ if lapack_lib is not None:
+ sz = os.stat(lapack_lib)[6]
+ if sz <= 4000*1024:
+ message = """
+*********************************************************************
+ Lapack library (from ATLAS) is probably incomplete:
+ size of %s is %sk (expected >4000k)
+
+ Follow the instructions in the KNOWN PROBLEMS section of the file
+ scipy/INSTALL.txt.
+*********************************************************************
+""" % (lapack_lib,sz/1024)
+ warnings.warn(message)
+ else:
+ info['language'] = 'f77'
+
+ self.set_info(**info)
+
+class atlas_blas_info(atlas_info):
+ _lib_names = ['f77blas','cblas']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ info = {}
+ atlas_libs = self.get_libs('atlas_libs',
+ self._lib_names + self._lib_atlas)
+ atlas = None
+ for d in lib_dirs:
+ atlas = self.check_libs(d,atlas_libs,[])
+ if atlas is not None:
+ break
+ if atlas is None:
+ return
+ include_dirs = self.get_include_dirs()
+ h = (self.combine_paths(lib_dirs+include_dirs,'cblas.h') or [None])[0]
+ if h:
+ h = os.path.dirname(h)
+ dict_append(info,include_dirs=[h])
+ info['language'] = 'c'
+
+ dict_append(info,**atlas)
+
+ self.set_info(**info)
+ return
+
+
+class atlas_threads_info(atlas_info):
+ dir_env_var = ['PTATLAS','ATLAS']
+ _lib_names = ['ptf77blas','ptcblas']
+
+class atlas_blas_threads_info(atlas_blas_info):
+ dir_env_var = ['PTATLAS','ATLAS']
+ _lib_names = ['ptf77blas','ptcblas']
+
+class lapack_atlas_info(atlas_info):
+ _lib_names = ['lapack_atlas'] + atlas_info._lib_names
+
+class lapack_atlas_threads_info(atlas_threads_info):
+ _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names
+
+class lapack_info(system_info):
+ section = 'lapack'
+ dir_env_var = 'LAPACK'
+ _lib_names = ['lapack']
+ notfounderror = LapackNotFoundError
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+
+ lapack_libs = self.get_libs('lapack_libs', self._lib_names)
+ for d in lib_dirs:
+ lapack = self.check_libs(d,lapack_libs,[])
+ if lapack is not None:
+ info = lapack
+ break
+ else:
+ return
+ info['language'] = 'f77'
+ self.set_info(**info)
+
+class lapack_src_info(system_info):
+ section = 'lapack_src'
+ dir_env_var = 'LAPACK_SRC'
+ notfounderror = LapackSrcNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d,['LAPACK*/SRC','SRC']))
+ return [ d for d in dirs if os.path.isdir(d) ]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d,'dgesv.f')):
+ src_dir = d
+ break
+ if not src_dir:
+ #XXX: Get sources from netlib. May be ask first.
+ return
+ # The following is extracted from LAPACK-3.0/SRC/Makefile
+ allaux='''
+ ilaenv ieeeck lsame lsamen xerbla
+ ''' # *.f
+ laux = '''
+ bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
+ laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
+ lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
+ larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
+ lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
+ lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
+ stebz stedc steqr sterf
+ ''' # [s|d]*.f
+ lasrc = '''
+ gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
+ gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
+ gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
+ geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
+ gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
+ gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
+ ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
+ hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
+ lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
+ lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
+ laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
+ lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
+ latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
+ pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
+ potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
+ pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
+ spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
+ sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
+ tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
+ trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
+ tzrqf tzrzf
+ ''' # [s|c|d|z]*.f
+ sd_lasrc = '''
+ laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
+ org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
+ orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
+ ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
+ sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
+ stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
+ sygvx sytd2 sytrd
+ ''' # [s|d]*.f
+ cz_lasrc = '''
+ bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
+ heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
+ hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
+ hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
+ hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
+ laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
+ laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
+ spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
+ ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
+ unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
+ ''' # [c|z]*.f
+ #######
+ sclaux = laux + ' econd ' # s*.f
+ dzlaux = laux + ' secnd ' # d*.f
+ slasrc = lasrc + sd_lasrc # s*.f
+ dlasrc = lasrc + sd_lasrc # d*.f
+ clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
+ zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
+ oclasrc = ' icmax1 scsum1 ' # *.f
+ ozlasrc = ' izmax1 dzsum1 ' # *.f
+ sources = ['s%s.f'%f for f in (sclaux+slasrc).split()] \
+ + ['d%s.f'%f for f in (dzlaux+dlasrc).split()] \
+ + ['c%s.f'%f for f in (clasrc).split()] \
+ + ['z%s.f'%f for f in (zlasrc).split()] \
+ + ['%s.f'%f for f in (allaux+oclasrc+ozlasrc).split()]
+ sources = [os.path.join(src_dir,f) for f in sources]
+ #XXX: should we check here actual existence of source files?
+ info = {'sources':sources,'language':'f77'}
+ self.set_info(**info)
+
+atlas_version_c_text = r'''
+/* This file is generated from scipy_distutils/system_info.py */
+#ifdef __CPLUSPLUS__
+extern "C" {
+#endif
+#include "Python.h"
+static PyMethodDef module_methods[] = { {NULL,NULL} };
+DL_EXPORT(void) initatlas_version(void) {
+ void ATL_buildinfo(void);
+ ATL_buildinfo();
+ Py_InitModule("atlas_version", module_methods);
+}
+#ifdef __CPLUSCPLUS__
+}
+#endif
+'''
+
+def _get_build_temp():
+ from distutils.util import get_platform
+ plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
+ return os.path.join('build','temp'+plat_specifier)
+
+def get_atlas_version(**config):
+ os.environ['NO_SCIPY_IMPORT']='get_atlas_version'
+ from core import Extension, setup
+ from misc_util import get_cmd
+ import log
+ magic = hex(hash(`config`))
+ def atlas_version_c(extension, build_dir,magic=magic):
+ source = os.path.join(build_dir,'atlas_version_%s.c' % (magic))
+ if os.path.isfile(source):
+ from distutils.dep_util import newer
+ if newer(source,__file__):
+ return source
+ f = open(source,'w')
+ f.write(atlas_version_c_text)
+ f.close()
+ return source
+ ext = Extension('atlas_version',
+ sources=[atlas_version_c],
+ **config)
+ build_dir = _get_build_temp()
+ extra_args = ['--build-lib',build_dir]
+ for a in sys.argv:
+ if re.match('[-][-]compiler[=]',a):
+ extra_args.append(a)
+ import distutils.core
+ old_dist = distutils.core._setup_distribution
+ distutils.core._setup_distribution = None
+ try:
+ dist = setup(ext_modules=[ext],
+ script_name = 'get_atlas_version',
+ script_args = ['build_src','build_ext']+extra_args)
+ except Exception,msg:
+ print "##### msg: %s" % msg
+ if not msg:
+ msg = "Unknown Exception"
+ log.warn(msg)
+ return None
+ distutils.core._setup_distribution = old_dist
+
+ from distutils.sysconfig import get_config_var
+ so_ext = get_config_var('SO')
+ target = os.path.join(build_dir,'atlas_version'+so_ext)
+ cmd = [get_pythonexe(),'-c',
+ '"import imp,os;os.environ[\\"NO_SCIPY_IMPORT\\"]='\
+ '\\"system_info.get_atlas_version:load atlas_version\\";'\
+ 'imp.load_dynamic(\\"atlas_version\\",\\"%s\\")"'\
+ % (os.path.basename(target))]
+ s,o = exec_command(cmd,execute_in=os.path.dirname(target),use_tee=0)
+ atlas_version = None
+ if not s:
+ m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)',o)
+ if m:
+ atlas_version = m.group('version')
+ if atlas_version is None:
+ if re.search(r'undefined symbol: ATL_buildinfo',o,re.M):
+ atlas_version = '3.2.1_pre3.3.6'
+ else:
+ print 'Command:',' '.join(cmd)
+ print 'Status:',s
+ print 'Output:',o
+ return atlas_version
+
+
+class lapack_opt_info(system_info):
+
+ def calc_info(self):
+
+ if sys.platform=='darwin' and not os.environ.get('ATLAS',None):
+ args = []
+ link_args = []
+ if os.path.exists('/System/Library/Frameworks/Accelerate.framework/'):
+ args.extend(['-faltivec'])
+ link_args.extend(['-Wl,-framework','-Wl,Accelerate'])
+ elif os.path.exists('/System/Library/Frameworks/vecLib.framework/'):
+ args.extend(['-faltivec'])
+ link_args.extend(['-Wl,-framework','-Wl,vecLib'])
+ if args:
+ self.set_info(extra_compile_args=args,
+ extra_link_args=link_args,
+ define_macros=[('NO_ATLAS_INFO',3)])
+ return
+
+ lapack_mkl_info = get_info('lapack_mkl')
+ if lapack_mkl_info:
+ self.set_info(**lapack_mkl_info)
+ return
+
+ atlas_info = get_info('atlas_threads')
+ if not atlas_info:
+ atlas_info = get_info('atlas')
+ #atlas_info = {} ## uncomment for testing
+ atlas_version = None
+ need_lapack = 0
+ need_blas = 0
+ info = {}
+ if atlas_info:
+ version_info = atlas_info.copy()
+ atlas_version = get_atlas_version(**version_info)
+ if not atlas_info.has_key('define_macros'):
+ atlas_info['define_macros'] = []
+ if atlas_version is None:
+ atlas_info['define_macros'].append(('NO_ATLAS_INFO',2))
+ else:
+ atlas_info['define_macros'].append(('ATLAS_INFO',
+ '"\\"%s\\""' % atlas_version))
+ if atlas_version=='3.2.1_pre3.3.6':
+ atlas_info['define_macros'].append(('NO_ATLAS_INFO',4))
+ l = atlas_info.get('define_macros',[])
+ if ('ATLAS_WITH_LAPACK_ATLAS',None) in l \
+ or ('ATLAS_WITHOUT_LAPACK',None) in l:
+ need_lapack = 1
+ info = atlas_info
+ else:
+ warnings.warn(AtlasNotFoundError.__doc__)
+ need_blas = 1
+ need_lapack = 1
+ dict_append(info,define_macros=[('NO_ATLAS_INFO',1)])
+
+ if need_lapack:
+ lapack_info = get_info('lapack')
+ #lapack_info = {} ## uncomment for testing
+ if lapack_info:
+ dict_append(info,**lapack_info)
+ else:
+ warnings.warn(LapackNotFoundError.__doc__)
+ lapack_src_info = get_info('lapack_src')
+ if not lapack_src_info:
+ warnings.warn(LapackSrcNotFoundError.__doc__)
+ return
+ dict_append(info,libraries=[('flapack_src',lapack_src_info)])
+
+ if need_blas:
+ blas_info = get_info('blas')
+ #blas_info = {} ## uncomment for testing
+ if blas_info:
+ dict_append(info,**blas_info)
+ else:
+ warnings.warn(BlasNotFoundError.__doc__)
+ blas_src_info = get_info('blas_src')
+ if not blas_src_info:
+ warnings.warn(BlasSrcNotFoundError.__doc__)
+ return
+ dict_append(info,libraries=[('fblas_src',blas_src_info)])
+
+ self.set_info(**info)
+ return
+
+
+class blas_opt_info(system_info):
+
+ def calc_info(self):
+
+ if sys.platform=='darwin' and not os.environ.get('ATLAS',None):
+ args = []
+ link_args = []
+ if os.path.exists('/System/Library/Frameworks/Accelerate.framework/'):
+ args.extend(['-faltivec',
+ '-I/System/Library/Frameworks/vecLib.framework/Headers',
+ ])
+ link_args.extend(['-Wl,-framework','-Wl,Accelerate'])
+ elif os.path.exists('/System/Library/Frameworks/vecLib.framework/'):
+ args.extend(['-faltivec',
+ '-I/System/Library/Frameworks/vecLib.framework/Headers',
+ ])
+ link_args.extend(['-Wl,-framework','-Wl,vecLib'])
+ if args:
+ self.set_info(extra_compile_args=args,
+ extra_link_args=link_args,
+ define_macros=[('NO_ATLAS_INFO',3)])
+ return
+
+ blas_mkl_info = get_info('blas_mkl')
+ if blas_mkl_info:
+ self.set_info(**blas_mkl_info)
+ return
+
+ atlas_info = get_info('atlas_blas_threads')
+ if not atlas_info:
+ atlas_info = get_info('atlas_blas')
+ atlas_version = None
+ need_blas = 0
+ info = {}
+ if atlas_info:
+ version_info = atlas_info.copy()
+ atlas_version = get_atlas_version(**version_info)
+ if not atlas_info.has_key('define_macros'):
+ atlas_info['define_macros'] = []
+ if atlas_version is None:
+ atlas_info['define_macros'].append(('NO_ATLAS_INFO',2))
+ else:
+ atlas_info['define_macros'].append(('ATLAS_INFO',
+ '"\\"%s\\""' % atlas_version))
+ info = atlas_info
+ else:
+ warnings.warn(AtlasNotFoundError.__doc__)
+ need_blas = 1
+ dict_append(info,define_macros=[('NO_ATLAS_INFO',1)])
+
+ if need_blas:
+ blas_info = get_info('blas')
+ if blas_info:
+ dict_append(info,**blas_info)
+ else:
+ warnings.warn(BlasNotFoundError.__doc__)
+ blas_src_info = get_info('blas_src')
+ if not blas_src_info:
+ warnings.warn(BlasSrcNotFoundError.__doc__)
+ return
+ dict_append(info,libraries=[('fblas_src',blas_src_info)])
+
+ self.set_info(**info)
+ return
+
+
+class blas_info(system_info):
+ section = 'blas'
+ dir_env_var = 'BLAS'
+ _lib_names = ['blas']
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+
+ blas_libs = self.get_libs('blas_libs', self._lib_names)
+ for d in lib_dirs:
+ blas = self.check_libs(d,blas_libs,[])
+ if blas is not None:
+ info = blas
+ break
+ else:
+ return
+ info['language'] = 'f77' # XXX: is it generally true?
+ self.set_info(**info)
+
+
+class blas_src_info(system_info):
+ section = 'blas_src'
+ dir_env_var = 'BLAS_SRC'
+ notfounderror = BlasSrcNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d,['blas']))
+ return [ d for d in dirs if os.path.isdir(d) ]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d,'daxpy.f')):
+ src_dir = d
+ break
+ if not src_dir:
+ #XXX: Get sources from netlib. May be ask first.
+ return
+ blas1 = '''
+ caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
+ dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
+ srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
+ dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
+ snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
+ '''
+ blas2 = '''
+ cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
+ chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
+ dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
+ sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
+ stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
+ zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
+ ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
+ '''
+ blas3 = '''
+ cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
+ dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
+ ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
+ '''
+ sources = [os.path.join(src_dir,f+'.f') \
+ for f in (blas1+blas2+blas3).split()]
+ #XXX: should we check here actual existence of source files?
+ info = {'sources':sources,'language':'f77'}
+ self.set_info(**info)
+
+class x11_info(system_info):
+ section = 'x11'
+ notfounderror = X11NotFoundError
+
+ def __init__(self):
+ system_info.__init__(self,
+ default_lib_dirs=default_x11_lib_dirs,
+ default_include_dirs=default_x11_include_dirs)
+
+ def calc_info(self):
+ if sys.platform in ['win32']:
+ return
+ lib_dirs = self.get_lib_dirs()
+ include_dirs = self.get_include_dirs()
+ x11_libs = self.get_libs('x11_libs', ['X11'])
+ for lib_dir in lib_dirs:
+ info = self.check_libs(lib_dir, x11_libs, [])
+ if info is not None:
+ break
+ else:
+ return
+ inc_dir = None
+ for d in include_dirs:
+ if self.combine_paths(d, 'X11/X.h'):
+ inc_dir = d
+ break
+ if inc_dir is not None:
+ dict_append(info, include_dirs=[inc_dir])
+ self.set_info(**info)
+
+class numpy_info(system_info):
+ section = 'numpy'
+ modulename = 'Numeric'
+ notfounderror = NumericNotFoundError
+
+ def __init__(self):
+ from distutils.sysconfig import get_python_inc
+ include_dirs = []
+ try:
+ module = __import__(self.modulename)
+ prefix = []
+ for name in module.__file__.split(os.sep):
+ if name=='lib':
+ break
+ prefix.append(name)
+ include_dirs.append(get_python_inc(prefix=os.sep.join(prefix)))
+ except ImportError:
+ pass
+ py_incl_dir = get_python_inc()
+ include_dirs.append(py_incl_dir)
+ for d in default_include_dirs:
+ d = os.path.join(d, os.path.basename(py_incl_dir))
+ if d not in include_dirs:
+ include_dirs.append(d)
+ system_info.__init__(self,
+ default_lib_dirs=[],
+ default_include_dirs=include_dirs)
+
+ def calc_info(self):
+ try:
+ module = __import__(self.modulename)
+ except ImportError:
+ return
+ info = {}
+ macros = []
+ for v in ['__version__','version']:
+ vrs = getattr(module,v,None)
+ if vrs is None:
+ continue
+ macros = [(self.modulename.upper()+'_VERSION',
+ '"\\"%s\\""' % (vrs)),
+ (self.modulename.upper(),None)]
+ break
+## try:
+## macros.append(
+## (self.modulename.upper()+'_VERSION_HEX',
+## hex(vstr2hex(module.__version__))),
+## )
+## except Exception,msg:
+## print msg
+ dict_append(info, define_macros = macros)
+ include_dirs = self.get_include_dirs()
+ inc_dir = None
+ for d in include_dirs:
+ if self.combine_paths(d,
+ os.path.join(self.modulename,
+ 'arrayobject.h')):
+ inc_dir = d
+ break
+ if inc_dir is not None:
+ dict_append(info, include_dirs=[inc_dir])
+ if info:
+ self.set_info(**info)
+ return
+
+class numarray_info(numpy_info):
+ section = 'numarray'
+ modulename = 'numarray'
+
+class scipy_info(numpy_info):
+ section = 'scipy'
+ modulename = 'scipy'
+
+
+class boost_python_info(system_info):
+ section = 'boost_python'
+ dir_env_var = 'BOOST'
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d,['boost*']))
+ return [ d for d in dirs if os.path.isdir(d) ]
+
+ def calc_info(self):
+ from distutils.sysconfig import get_python_inc
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d,'libs','python','src','module.cpp')):
+ src_dir = d
+ break
+ if not src_dir:
+ return
+ py_incl_dir = get_python_inc()
+ srcs_dir = os.path.join(src_dir,'libs','python','src')
+ bpl_srcs = glob(os.path.join(srcs_dir,'*.cpp'))
+ bpl_srcs += glob(os.path.join(srcs_dir,'*','*.cpp'))
+ info = {'libraries':[('boost_python_src',{'include_dirs':[src_dir,py_incl_dir],
+ 'sources':bpl_srcs})],
+ 'include_dirs':[src_dir],
+ }
+ if info:
+ self.set_info(**info)
+ return
+
+class agg2_info(system_info):
+ section = 'agg2'
+ dir_env_var = 'AGG2'
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d,['agg2*']))
+ return [ d for d in dirs if os.path.isdir(d) ]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d,'src','agg_affine_matrix.cpp')):
+ src_dir = d
+ break
+ if not src_dir:
+ return
+ if sys.platform=='win32':
+ agg2_srcs = glob(os.path.join(src_dir,'src','platform','win32','agg_win32_bmp.cpp'))
+ else:
+ agg2_srcs = glob(os.path.join(src_dir,'src','*.cpp'))
+ agg2_srcs += [os.path.join(src_dir,'src','platform','X11','agg_platform_support.cpp')]
+
+ info = {'libraries':[('agg2_src',{'sources':agg2_srcs,
+ 'include_dirs':[os.path.join(src_dir,'include')],
+ })],
+ 'include_dirs':[os.path.join(src_dir,'include')],
+ }
+ if info:
+ self.set_info(**info)
+ return
+
+class _pkg_config_info(system_info):
+ section = None
+ config_env_var = 'PKG_CONFIG'
+ default_config_exe = 'pkg-config'
+ append_config_exe = ''
+ version_macro_name = None
+ release_macro_name = None
+ version_flag = '--modversion'
+ cflags_flag = '--cflags'
+
+ def get_config_exe(self):
+ if os.environ.has_key(self.config_env_var):
+ return os.environ[self.config_env_var]
+ return self.default_config_exe
+ def get_config_output(self, config_exe, option):
+ s,o = exec_command(config_exe+' '+self.append_config_exe+' '+option,use_tee=0)
+ if not s:
+ return o
+
+ def calc_info(self):
+ config_exe = find_executable(self.get_config_exe())
+ if not os.path.isfile(config_exe):
+ print 'File not found: %s. Cannot determine %s info.' \
+ % (config_exe, self.section)
+ return
+ info = {}
+ macros = []
+ libraries = []
+ library_dirs = []
+ include_dirs = []
+ extra_link_args = []
+ extra_compile_args = []
+ version = self.get_config_output(config_exe,self.version_flag)
+ if version:
+ macros.append((self.__class__.__name__.split('.')[-1].upper(),
+ '"\\"%s\\""' % (version)))
+ if self.version_macro_name:
+ macros.append((self.version_macro_name+'_%s' % (version.replace('.','_')),None))
+ if self.release_macro_name:
+ release = self.get_config_output(config_exe,'--release')
+ if release:
+ macros.append((self.release_macro_name+'_%s' % (release.replace('.','_')),None))
+ opts = self.get_config_output(config_exe,'--libs')
+ if opts:
+ for opt in opts.split():
+ if opt[:2]=='-l':
+ libraries.append(opt[2:])
+ elif opt[:2]=='-L':
+ library_dirs.append(opt[2:])
+ else:
+ extra_link_args.append(opt)
+ opts = self.get_config_output(config_exe,self.cflags_flag)
+ if opts:
+ for opt in opts.split():
+ if opt[:2]=='-I':
+ include_dirs.append(opt[2:])
+ elif opt[:2]=='-D':
+ if '=' in opt:
+ n,v = opt[2:].split('=')
+ macros.append((n,v))
+ else:
+ macros.append((opt[2:],None))
+ else:
+ extra_compile_args.append(opt)
+ if macros: dict_append(info, define_macros = macros)
+ if libraries: dict_append(info, libraries = libraries)
+ if library_dirs: dict_append(info, library_dirs = library_dirs)
+ if include_dirs: dict_append(info, include_dirs = include_dirs)
+ if extra_link_args: dict_append(info, extra_link_args = extra_link_args)
+ if extra_compile_args: dict_append(info, extra_compile_args = extra_compile_args)
+ if info:
+ self.set_info(**info)
+ return
+
+class wx_info(_pkg_config_info):
+ section = 'wx'
+ config_env_var = 'WX_CONFIG'
+ default_config_exe = 'wx-config'
+ append_config_exe = ''
+ version_macro_name = 'WX_VERSION'
+ release_macro_name = 'WX_RELEASE'
+ version_flag = '--version'
+ cflags_flag = '--cxxflags'
+
+class gdk_pixbuf_xlib_2_info(_pkg_config_info):
+ section = 'gdk_pixbuf_xlib_2'
+ append_config_exe = 'gdk-pixbuf-xlib-2.0'
+ version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'
+
+class gdk_pixbuf_2_info(_pkg_config_info):
+ section = 'gdk_pixbuf_2'
+ append_config_exe = 'gdk-pixbuf-2.0'
+ version_macro_name = 'GDK_PIXBUF_VERSION'
+
+class gdk_x11_2_info(_pkg_config_info):
+ section = 'gdk_x11_2'
+ append_config_exe = 'gdk-x11-2.0'
+ version_macro_name = 'GDK_X11_VERSION'
+
+class gdk_2_info(_pkg_config_info):
+ section = 'gdk_2'
+ append_config_exe = 'gdk-2.0'
+ version_macro_name = 'GDK_VERSION'
+
+class gdk_info(_pkg_config_info):
+ section = 'gdk'
+ append_config_exe = 'gdk'
+ version_macro_name = 'GDK_VERSION'
+
+class gtkp_x11_2_info(_pkg_config_info):
+ section = 'gtkp_x11_2'
+ append_config_exe = 'gtk+-x11-2.0'
+ version_macro_name = 'GTK_X11_VERSION'
+
+
+class gtkp_2_info(_pkg_config_info):
+ section = 'gtkp_2'
+ append_config_exe = 'gtk+-2.0'
+ version_macro_name = 'GTK_VERSION'
+
+class xft_info(_pkg_config_info):
+ section = 'xft'
+ append_config_exe = 'xft'
+ version_macro_name = 'XFT_VERSION'
+
+class freetype2_info(_pkg_config_info):
+ section = 'freetype2'
+ append_config_exe = 'freetype2'
+ version_macro_name = 'FREETYPE2_VERSION'
+
+## def vstr2hex(version):
+## bits = []
+## n = [24,16,8,4,0]
+## r = 0
+## for s in version.split('.'):
+## r |= int(s) << n[0]
+## del n[0]
+## return r
+
+#--------------------------------------------------------------------
+
+def combine_paths(*args,**kws):
+ """ Return a list of existing paths composed by all combinations of
+ items from arguments.
+ """
+ r = []
+ for a in args:
+ if not a: continue
+ if type(a) is types.StringType:
+ a = [a]
+ r.append(a)
+ args = r
+ if not args: return []
+ if len(args)==1:
+ result = reduce(lambda a,b:a+b,map(glob,args[0]),[])
+ elif len (args)==2:
+ result = []
+ for a0 in args[0]:
+ for a1 in args[1]:
+ result.extend(glob(os.path.join(a0,a1)))
+ else:
+ result = combine_paths(*(combine_paths(args[0],args[1])+args[2:]))
+ verbosity = kws.get('verbosity',1)
+ if verbosity>1 and result:
+ print '(','paths:',','.join(result),')'
+ return result
+
+language_map = {'c':0,'c++':1,'f77':2,'f90':3}
+inv_language_map = {0:'c',1:'c++',2:'f77',3:'f90'}
+def dict_append(d,**kws):
+ languages = []
+ for k,v in kws.items():
+ if k=='language':
+ languages.append(v)
+ continue
+ if d.has_key(k):
+ if k in ['library_dirs','include_dirs','define_macros']:
+ [d[k].append(vv) for vv in v if vv not in d[k]]
+ else:
+ d[k].extend(v)
+ else:
+ d[k] = v
+ if languages:
+ l = inv_language_map[max([language_map.get(l,0) for l in languages])]
+ d['language'] = l
+ return
+
+def show_all():
+ import system_info
+ import pprint
+ match_info = re.compile(r'.*?_info').match
+ show_only = []
+ for n in sys.argv[1:]:
+ if n[-5:] != '_info':
+ n = n + '_info'
+ show_only.append(n)
+ show_all = not show_only
+ for n in filter(match_info,dir(system_info)):
+ if n in ['system_info','get_info']: continue
+ if not show_all:
+ if n not in show_only: continue
+ del show_only[show_only.index(n)]
+ c = getattr(system_info,n)()
+ c.verbosity = 2
+ r = c.get_info()
+ if show_only:
+ print 'Info classes not defined:',','.join(show_only)
+if __name__ == "__main__":
+ show_all()
diff --git a/numpy/distutils/tests/f2py_ext/__init__.py b/numpy/distutils/tests/f2py_ext/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/distutils/tests/f2py_ext/__init__.py
diff --git a/numpy/distutils/tests/f2py_ext/setup.py b/numpy/distutils/tests/f2py_ext/setup.py
new file mode 100644
index 000000000..6b786a97e
--- /dev/null
+++ b/numpy/distutils/tests/f2py_ext/setup.py
@@ -0,0 +1,12 @@
+
+import os
+from scipy.distutils.core import setup, Extension
+
+ext = Extension('f2py_ext.fib2',['src/fib2.pyf','src/fib1.f'])
+
+setup(
+ name = 'f2py_ext',
+ ext_modules = [ext],
+ packages = ['f2py_ext.tests','f2py_ext'],
+ package_dir = {'f2py_ext':'.'})
+
diff --git a/numpy/distutils/tests/f2py_ext/src/fib1.f b/numpy/distutils/tests/f2py_ext/src/fib1.f
new file mode 100644
index 000000000..cfbb1eea0
--- /dev/null
+++ b/numpy/distutils/tests/f2py_ext/src/fib1.f
@@ -0,0 +1,18 @@
+C FILE: FIB1.F
+ SUBROUTINE FIB(A,N)
+C
+C CALCULATE FIRST N FIBONACCI NUMBERS
+C
+ INTEGER N
+ REAL*8 A(N)
+ DO I=1,N
+ IF (I.EQ.1) THEN
+ A(I) = 0.0D0
+ ELSEIF (I.EQ.2) THEN
+ A(I) = 1.0D0
+ ELSE
+ A(I) = A(I-1) + A(I-2)
+ ENDIF
+ ENDDO
+ END
+C END FILE FIB1.F
diff --git a/numpy/distutils/tests/f2py_ext/src/fib2.pyf b/numpy/distutils/tests/f2py_ext/src/fib2.pyf
new file mode 100644
index 000000000..90a8cf00c
--- /dev/null
+++ b/numpy/distutils/tests/f2py_ext/src/fib2.pyf
@@ -0,0 +1,9 @@
+! -*- f90 -*-
+python module fib2
+ interface
+ subroutine fib(a,n)
+ real*8 dimension(n),intent(out),depend(n) :: a
+ integer intent(in) :: n
+ end subroutine fib
+ end interface
+end python module fib2
diff --git a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py b/numpy/distutils/tests/f2py_ext/tests/test_fib2.py
new file mode 100644
index 000000000..633e2ba20
--- /dev/null
+++ b/numpy/distutils/tests/f2py_ext/tests/test_fib2.py
@@ -0,0 +1,13 @@
+import sys
+from scipy.base.testing import *
+set_package_path()
+from f2py_ext import fib2
+del sys.path[0]
+
+class test_fib2(ScipyTestCase):
+
+ def check_fib(self):
+ assert_array_equal(fib2.fib(6),[0,1,1,2,3,5])
+
+if __name__ == "__main__":
+ ScipyTest(fib2).run()
diff --git a/numpy/distutils/tests/f2py_f90_ext/__init__.py b/numpy/distutils/tests/f2py_f90_ext/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/distutils/tests/f2py_f90_ext/__init__.py
diff --git a/numpy/distutils/tests/f2py_f90_ext/include/body.f90 b/numpy/distutils/tests/f2py_f90_ext/include/body.f90
new file mode 100644
index 000000000..90b44e29d
--- /dev/null
+++ b/numpy/distutils/tests/f2py_f90_ext/include/body.f90
@@ -0,0 +1,5 @@
+ subroutine bar13(a)
+ !f2py intent(out) a
+ integer a
+ a = 13
+ end subroutine bar13
diff --git a/numpy/distutils/tests/f2py_f90_ext/setup.py b/numpy/distutils/tests/f2py_f90_ext/setup.py
new file mode 100644
index 000000000..f3ab45045
--- /dev/null
+++ b/numpy/distutils/tests/f2py_f90_ext/setup.py
@@ -0,0 +1,16 @@
+
+import os
+from scipy_distutils.core import setup, Extension
+
+package = 'f2py_f90_ext'
+
+ext = Extension(package+'.foo',['src/foo_free.f90'],
+ include_dirs=['include'],
+ f2py_options=['--include_paths','include'])
+
+setup(
+ name = package,
+ ext_modules = [ext],
+ packages = [package+'.tests',package],
+ package_dir = {package:'.'})
+
diff --git a/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 b/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90
new file mode 100644
index 000000000..c7713be59
--- /dev/null
+++ b/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90
@@ -0,0 +1,6 @@
+module foo_free
+contains
+
+include "body.f90"
+
+end module foo_free
diff --git a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py b/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py
new file mode 100644
index 000000000..544f94ab4
--- /dev/null
+++ b/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py
@@ -0,0 +1,13 @@
+import sys
+from scipy.base.testing import *
+set_package_path()
+from f2py_f90_ext import foo
+del sys.path[0]
+
+class test_foo(ScipyTestCase):
+
+ def check_foo_free(self):
+ assert_equal(foo.foo_free.bar13(),13)
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/distutils/tests/gen_ext/__init__.py b/numpy/distutils/tests/gen_ext/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/distutils/tests/gen_ext/__init__.py
diff --git a/numpy/distutils/tests/gen_ext/setup.py b/numpy/distutils/tests/gen_ext/setup.py
new file mode 100644
index 000000000..7b12c1f55
--- /dev/null
+++ b/numpy/distutils/tests/gen_ext/setup.py
@@ -0,0 +1,47 @@
+
+import os
+from scipy.distutils.core import setup, Extension
+from distutils.dep_util import newer
+
+fib3_f = '''
+C FILE: FIB3.F
+ SUBROUTINE FIB(A,N)
+C
+C CALCULATE FIRST N FIBONACCI NUMBERS
+C
+ INTEGER N
+ REAL*8 A(N)
+Cf2py intent(in) n
+Cf2py intent(out) a
+Cf2py depend(n) a
+ DO I=1,N
+ IF (I.EQ.1) THEN
+ A(I) = 0.0D0
+ ELSEIF (I.EQ.2) THEN
+ A(I) = 1.0D0
+ ELSE
+ A(I) = A(I-1) + A(I-2)
+ ENDIF
+ ENDDO
+ END
+C END FILE FIB3.F
+'''
+
+package = 'gen_ext'
+
+def source_func(ext, src_dir):
+ source = os.path.join(src_dir,'fib3.f')
+ if newer(__file__, source):
+ f = open(source,'w')
+ f.write(fib3_f)
+ f.close()
+ return [source]
+
+ext = Extension(package+'.fib3',[source_func])
+
+setup(
+ name = package,
+ ext_modules = [ext],
+ packages = [package+'.tests',package],
+ package_dir = {package:'.'})
+
diff --git a/numpy/distutils/tests/gen_ext/tests/test_fib3.py b/numpy/distutils/tests/gen_ext/tests/test_fib3.py
new file mode 100644
index 000000000..c8ee2441c
--- /dev/null
+++ b/numpy/distutils/tests/gen_ext/tests/test_fib3.py
@@ -0,0 +1,13 @@
+import sys
+from scipy.base.testing import *
+set_package_path()
+from gen_ext import fib3
+del sys.path[0]
+
+class test_fib3(ScipyTestCase):
+
+ def check_fib(self):
+ assert_array_equal(fib3.fib(6),[0,1,1,2,3,5])
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/distutils/tests/swig_ext/__init__.py b/numpy/distutils/tests/swig_ext/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/distutils/tests/swig_ext/__init__.py
diff --git a/numpy/distutils/tests/swig_ext/setup.py b/numpy/distutils/tests/swig_ext/setup.py
new file mode 100644
index 000000000..b6fe8eed4
--- /dev/null
+++ b/numpy/distutils/tests/swig_ext/setup.py
@@ -0,0 +1,14 @@
+
+import os
+from scipy_distutils.core import setup, Extension
+
+ext_c = Extension('swig_ext._example',['src/example.i','src/example.c'])
+ext_cpp = Extension('swig_ext._example2',['src/zoo.i','src/zoo.cc'],
+ depends=['src/zoo.h'],include_dirs=['src'])
+
+setup(
+ name = 'swig_ext',
+ ext_modules = [ext_c,ext_cpp],
+ packages = ['swig_ext.tests','swig_ext'],
+ package_dir = {'swig_ext':'.'})
+
diff --git a/numpy/distutils/tests/swig_ext/src/example.c b/numpy/distutils/tests/swig_ext/src/example.c
new file mode 100644
index 000000000..7bbb661dd
--- /dev/null
+++ b/numpy/distutils/tests/swig_ext/src/example.c
@@ -0,0 +1,14 @@
+/* File : example.c */
+
+double My_variable = 3.0;
+
+/* Compute factorial of n */
+int fact(int n) {
+ if (n <= 1) return 1;
+ else return n*fact(n-1);
+}
+
+/* Compute n mod m */
+int my_mod(int n, int m) {
+ return(n % m);
+}
diff --git a/numpy/distutils/tests/swig_ext/src/example.i b/numpy/distutils/tests/swig_ext/src/example.i
new file mode 100644
index 000000000..6d61062b3
--- /dev/null
+++ b/numpy/distutils/tests/swig_ext/src/example.i
@@ -0,0 +1,11 @@
+/* -*- c -*- */
+
+/* File : example.i */
+%module example
+%{
+/* Put headers and other declarations here */
+%}
+
+extern double My_variable;
+extern int fact(int);
+extern int my_mod(int n, int m);
diff --git a/numpy/distutils/tests/swig_ext/src/zoo.cc b/numpy/distutils/tests/swig_ext/src/zoo.cc
new file mode 100644
index 000000000..0a643d1e5
--- /dev/null
+++ b/numpy/distutils/tests/swig_ext/src/zoo.cc
@@ -0,0 +1,23 @@
+#include "zoo.h"
+#include <cstdio>
+#include <cstring>
+
+Zoo::Zoo()
+{
+ n = 0;
+}
+
+void Zoo::shut_up(char *animal)
+{
+ if (n < 10) {
+ strcpy(animals[n], animal);
+ n++;
+ }
+}
+
+void Zoo::display()
+{
+ int i;
+ for(i = 0; i < n; i++)
+ printf("%s\n", animals[i]);
+}
diff --git a/numpy/distutils/tests/swig_ext/src/zoo.h b/numpy/distutils/tests/swig_ext/src/zoo.h
new file mode 100644
index 000000000..cb26e6cef
--- /dev/null
+++ b/numpy/distutils/tests/swig_ext/src/zoo.h
@@ -0,0 +1,9 @@
+
+class Zoo{
+ int n;
+ char animals[10][50];
+public:
+ Zoo();
+ void shut_up(char *animal);
+ void display();
+};
diff --git a/numpy/distutils/tests/swig_ext/src/zoo.i b/numpy/distutils/tests/swig_ext/src/zoo.i
new file mode 100644
index 000000000..a029c03e8
--- /dev/null
+++ b/numpy/distutils/tests/swig_ext/src/zoo.i
@@ -0,0 +1,10 @@
+// -*- c++ -*-
+// Example copied from http://linuxgazette.net/issue49/pramode.html
+
+%module example2
+
+%{
+#include "zoo.h"
+%}
+
+%include "zoo.h"
diff --git a/numpy/distutils/tests/swig_ext/tests/test_example.py b/numpy/distutils/tests/swig_ext/tests/test_example.py
new file mode 100644
index 000000000..baedec642
--- /dev/null
+++ b/numpy/distutils/tests/swig_ext/tests/test_example.py
@@ -0,0 +1,18 @@
+import sys
+from scipy.base.testing import *
+set_package_path()
+from swig_ext import example
+del sys.path[0]
+
+class test_example(ScipyTestCase):
+
+ def check_fact(self):
+ assert_equal(example.fact(10),3628800)
+
+ def check_cvar(self):
+ assert_equal(example.cvar.My_variable,3.0)
+ example.cvar.My_variable = 5
+ assert_equal(example.cvar.My_variable,5.0)
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/distutils/tests/swig_ext/tests/test_example2.py b/numpy/distutils/tests/swig_ext/tests/test_example2.py
new file mode 100644
index 000000000..af066be68
--- /dev/null
+++ b/numpy/distutils/tests/swig_ext/tests/test_example2.py
@@ -0,0 +1,17 @@
+import sys
+from scipy.base.testing import *
+set_package_path()
+from swig_ext import example2
+del sys.path[0]
+
+class test_example2(ScipyTestCase):
+
+ def check_zoo(self):
+ z = example2.Zoo()
+ z.shut_up('Tiger')
+ z.shut_up('Lion')
+ z.display()
+
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py
new file mode 100644
index 000000000..4ca21ea13
--- /dev/null
+++ b/numpy/distutils/tests/test_misc_util.py
@@ -0,0 +1,33 @@
+import sys
+from scipy.testing import *
+from scipy.distutils.misc_util import appendpath
+from os.path import join, sep
+
+ajoin = lambda *paths: join(*((sep,)+paths))
+
+class test_appendpath(ScipyTestCase):
+
+ def check_1(self):
+ assert_equal(appendpath('prefix','name'),join('prefix','name'))
+ assert_equal(appendpath('/prefix','name'),ajoin('prefix','name'))
+ assert_equal(appendpath('/prefix','/name'),ajoin('prefix','name'))
+ assert_equal(appendpath('prefix','/name'),join('prefix','name'))
+
+ def check_2(self):
+ assert_equal(appendpath('prefix/sub','name'),
+ join('prefix','sub','name'))
+ assert_equal(appendpath('prefix/sub','sup/name'),
+ join('prefix','sub','sup','name'))
+ assert_equal(appendpath('/prefix/sub','/prefix/name'),
+ ajoin('prefix','sub','name'))
+
+ def check_3(self):
+ assert_equal(appendpath('/prefix/sub','/prefix/sup/name'),
+ ajoin('prefix','sub','sup','name'))
+ assert_equal(appendpath('/prefix/sub/sub2','/prefix/sup/sup2/name'),
+ ajoin('prefix','sub','sub2','sup','sup2','name'))
+ assert_equal(appendpath('/prefix/sub/sub2','/prefix/sub/sup/name'),
+ ajoin('prefix','sub','sub2','sup','name'))
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py
new file mode 100644
index 000000000..214763cef
--- /dev/null
+++ b/numpy/distutils/unixccompiler.py
@@ -0,0 +1,65 @@
+"""
+unixccompiler - can handle very long argument lists for ar.
+"""
+
+import os
+import sys
+import new
+
+from distutils.errors import DistutilsExecError, LinkError, CompileError
+from distutils.unixccompiler import *
+
+
+import log
+
+# Note that UnixCCompiler._compile appeared in Python 2.3
+def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ display = '%s: %s' % (os.path.basename(self.compiler_so[0]),src)
+ try:
+ self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
+ extra_postargs, display = display)
+ except DistutilsExecError, msg:
+ raise CompileError, msg
+UnixCCompiler._compile = new.instancemethod(UnixCCompiler__compile,
+ None,
+ UnixCCompiler)
+
+
+def UnixCCompile_create_static_lib(self, objects, output_libname,
+ output_dir=None, debug=0, target_lang=None):
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+
+ output_filename = \
+ self.library_filename(output_libname, output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ self.mkpath(os.path.dirname(output_filename))
+ tmp_objects = objects + self.objects
+ while tmp_objects:
+ objects = tmp_objects[:50]
+ tmp_objects = tmp_objects[50:]
+ display = '%s: adding %d object files to %s' % (os.path.basename(self.archiver[0]),
+ len(objects),output_filename)
+ self.spawn(self.archiver + [output_filename] + objects,
+ display = display)
+
+ # Not many Unices required ranlib anymore -- SunOS 4.x is, I
+ # think the only major Unix that does. Maybe we need some
+ # platform intelligence here to skip ranlib if it's not
+ # needed -- or maybe Python's configure script took care of
+ # it for us, hence the check for leading colon.
+ if self.ranlib:
+ display = '%s:@ %s' % (os.path.basename(self.ranlib[0]),
+ output_filename)
+ try:
+ self.spawn(self.ranlib + [output_filename],
+ display = display)
+ except DistutilsExecError, msg:
+ raise LibError, msg
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+ return
+
+UnixCCompiler.create_static_lib = \
+ new.instancemethod(UnixCCompile_create_static_lib,
+ None,UnixCCompiler)
diff --git a/numpy/doc/CAPI.txt b/numpy/doc/CAPI.txt
new file mode 100644
index 000000000..f8bfd3981
--- /dev/null
+++ b/numpy/doc/CAPI.txt
@@ -0,0 +1,317 @@
+Author: Travis Oliphant
+Discussions to: scipy-dev@scipy.org
+Created: October 2005
+
+The CAPI of SciPy is (mostly) backward compatible with Numeric.
+
+There are a few non-standard Numeric usages (that were not really part
+of the API) that will need to be changed:
+
+ * If you used any of the function pointers in the PyArray_Descr
+ structure you will have to modify your usage of those. First,
+ the pointers are all under the member named f. So descr->cast is now
+ descr->f->cast. In addition, the
+ casting functions have eliminated the strides argument (use
+ PyArray_CastTo if you need strided casting). All functions have
+ one or two PyArrayObject * arguments at the end. This allows the
+ flexible arrays and mis-behaved arrays to be handled.
+
+ * The descr->zero and descr->one constants have been replaced with
+ function calls, PyArray_Zero, and PyArray_One (be sure to read the
+ code and free the resulting memory if you use these calls.
+
+ * If you passed array->dimensions and array->strides around to
+ functions, you will need to fix some code. These are now intp* pointers.
+ On 32-bit systems there won't be a problem. However, on 64-bit systems, you will
+ need to make changes to avoid errors and segfaults.
+
+
+The header files arrayobject.h and ufuncobject.h contain many defines
+that you may find useful. The files __ufunc_api.h and
+__multiarray_api.h contain the available C-API function calls with
+their function signatures.
+
+All of these headers are installed to
+
+<YOUR_PYTHON_LOCATION>/site-packages/scipy/base/include
+
+
+Getting arrays in C-code
+=========================
+
+All new arrays can be created using PyArray_NewFromDescr. A simple interface
+equivalent to PyArray_FromDims is PyArray_SimpleNew(nd, dims, typenum)
+and to PyArray_FromDimsAndData is PyArray_SimpleNewFromData(nd, dims, typenum, data)
+
+This is a very flexible function.
+
+PyObject * PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr,
+ int nd, intp *dims,
+ intp *strides, char *data,
+ int flags, PyObject *obj);
+
+
+subtype : The subtype that should be created (either pass in
+ &PyArray_Type, &PyBigArray_Type, or obj->ob_type,
+ where obj is a an instance of a subtype (or subclass) of
+ PyArray_Type or PyBigArray_Type).
+
+descr : The type descriptor for the array. This is a Python Object
+ (this function steals a reference to it). The easiest way
+ to get one is using PyArray_DescrFromType(<typenum>). If
+ you want to use a flexible size array, then you need to use
+ PyArray_DescrNewFromType(<flexible typenum>) and set its elsize
+ paramter to the desired size. The typenum in both of these
+ cases is one of the PyArray_XXXX enumerated types.
+
+nd : The number of dimensions (<MAX_DIMS)
+
+*dims : A pointer to the size in each dimension. Information will be
+ copied from here.
+
+*strides : The strides this array should have. For new arrays created
+ by this routine, this should be NULL. If you pass in
+ memory for this array to use, then you can pass in the
+ strides information as well (otherwise it will be created for
+ you and default to C-contiguous or Fortran contiguous).
+ Any strides will be copied into the array structure.
+ Do not pass in bad strides information!!!!
+
+ PyArray_CheckStrides(...) can help but you must call it if you are
+ unsure. You cannot pass in strides information when data is NULL
+ and this routine is creating its own memory.
+
+*data : NULL for creating brand-new memory. If you want this array
+ to wrap another memory area, then pass the pointer here.
+ You are responsible for deleting the memory in that case,
+ but do not do so until the new array object has been
+ deleted. The best way to handle that is to get the memory
+ from another Python object, INCREF that Python object after
+ passing it's data pointer to this routine, and set the
+ ->base member of the returned array to the Python object.
+ *You are responsible for* setting PyArray_BASE(ret) to the
+ base object. Failure to do so will create a memory leak.
+
+ If you pass in a data buffer, the flags argument will be
+ the flags of the new array. If you create a new array, a
+ non-zero flags argument indicates that you want the array
+ to be in FORTRAN order.
+
+flags : Either the flags showing how to interpret the data buffer
+ passed in. Or if a new array is created, nonzero to
+ indicate a FORTRAN order array. See below for an explanation of
+ the flags.
+
+obj : If subtypes is &PyArray_Type or &PyBigArray_Type, this
+ argument is ignored. Otherwise, the __array_finalize__
+ method of the subtype is called (if present) and passed
+ this object. This is usually an array of the type to be
+ created (so the __array_finalize__ method must handle an
+ array argument. But, it can be anything...)
+
+Note: The returned array object will be unitialized unless the type is
+PyArray_OBJECT in which case the memory will be set to NULL.
+
+PyArray_SimpleNew(nd, dims, typenum) is a drop-in replacement for
+PyArray_FromDims (except it takes intp* dims instead of int* dims which
+ matters on 64-bit systems) and it does not initialize
+ the memory to zero.
+
+PyArray_SimpleNew is just a macro for PyArray_New with default arguments.
+Use PyArray_FILLWBYTE(arr, 0) to fill with zeros.
+
+The PyArray_FromDims and family of functions are still available and
+are loose wrappers around this function. These functions still take
+int * arguments. This should be fine on 32-bit systems, but on 64-bit
+systems you may run into trouble if you frequently passed
+PyArray_FromDims the dimensions member of the old PyArrayObject structure
+because sizeof(intp) != sizeof(int).
+
+
+Getting an arrayobject from an arbitrary Python object
+==============================================================
+
+PyArray_FromAny(...)
+
+This function replaces PyArray_ContiguousFromObject and friends (those
+function calls still remain but they are loose wrappers around the
+PyArray_FromAny call).
+
+static PyObject *
+PyArray_FromAny(PyObject *op, PyArray_Descr *dtype, int min_depth,
+ int max_depth, int requires)
+
+
+op : The Python object to "convert" to an array object
+
+dtype : The desired data-type descriptor. This can be NULL, if the descriptor
+ should be determined by the object. Unless
+ FORCECAST is present in flags, this call will generate
+ an error if the data type cannot be safely obtained from
+ the object.
+
+min_depth : The minimum depth of array needed or 0 if doesn't matter
+
+max_depth : The maximum depth of array allowed or 0 if doesn't matter
+
+requires : A flag indicating the "requirements" of the returned array.
+
+
+From the code comments, the requires flag is explained.
+
+requires can be any of
+
+ CONTIGUOUS,
+ FORTRAN,
+ ALIGNED,
+ WRITEABLE,
+ ENSURECOPY,
+ ENSUREARRAY,
+ UPDATEIFCOPY,
+ FORCECAST,
+
+ or'd (|) together
+
+ Any of these flags present means that the returned array should
+ guarantee that aspect of the array. Otherwise the returned array
+ won't guarantee it -- it will depend on the object as to whether or
+ not it has such features.
+
+ Note that ENSURECOPY is enough to guarantee CONTIGUOUS, ALIGNED,
+ and WRITEABLE and therefore it is redundant to include those as well.
+
+ BEHAVED_FLAGS == ALIGNED | WRITEABLE
+ BEHAVED_FLAGS_RO == ALIGNED
+ CARRAY_FLAGS = CONTIGUOUS | BEHAVED_FLAGS
+ FARRAY_FLAGS = FORTRAN | BEHAVED_FLAGS
+
+ By default, if the object is an array (or any subclass) and requires is 0,
+ the array will just be INCREF'd and returned.
+
+ ENSUREARRAY makes sure a base-class ndarray is returned (If the object is a
+ bigndarray it will also be returned).
+
+ UPDATEIFCOPY flag sets this flag in the returned array *if a copy is
+ made*. The base argument of the returned array points to the misbehaved
+ array (which is set to READONLY in that case).
+ When the new array is deallocated, the original array held in base
+ is updated with the contents of the new array. This is useful,
+ if you don't want to deal with a possibly mis-behaved array, but want
+ to update it easily using a local contiguous copy.
+
+ FORCECAST will cause a cast to occur regardless of whether or not
+ it is safe.
+
+
+PyArray_ContiguousFromAny(op, typenum, min_depth, max_depth) is equivalent
+to PyArray_ContiguousFromObject(...) (which is still available), except
+it will return the subclass if op is already a subclass of the ndarray.
+The ContiguousFromObject version will always return an ndarray (or a bigndarray).
+
+Passing Data Type information to C-code
+============================================
+
+All Data-types are handled using the PyArray_Descr * structure.
+This structure can be obtained from a Python object using
+PyArray_DescrConverter and PyArray_DescrConverter2. The former
+returns the default PyArray_LONG descriptor when the input object
+is None, while the latter returns NULL when the input object is None.
+
+See the arraymethods.c and multiarraymodule.c files for many examples of usage.
+
+Getting at the structure of the array.
+
+You should use the #defines provided to access array structure portions:
+
+PyArray_DATA(obj)
+PyArray_ITEMSIZE(obj)
+PyArray_NDIM(obj)
+PyArray_DIMS(obj)
+PyArray_DIM(obj, n)
+PyArray_STRIDES(obj)
+PyArray_STRIDE(obj,n)
+PyArray_DESCR(obj)
+PyArray_BASE(obj)
+
+
+see more in arrayobject.h
+
+
+NDArray Flags
+==========================
+
+The flags attribute of the PyArrayObject structure contains important
+information about the memory used by the array (pointed to by the data member)
+This flags information must be kept accurate or strange results and even
+segfaults may result.
+
+There are 7 (binary) flags that describe the memory area used by the
+data buffer. These constants are defined in arrayobject.h and
+determine the bit-position of the flag. Python exposes a nice dictionary
+interface for getting (and, if appropriate, setting) these flags.
+
+Memory areas of all kinds can be pointed to by an ndarray, necessitating
+these flags. If you get an arbitrary PyArrayObject in C-code,
+you need to be aware of the flags that are set.
+If you need to guarantee a certain kind of array
+(like CONTIGUOUS and BEHAVED), then pass these requirements into the
+PyArray_FromAny function.
+
+
+CONTIGUOUS : True if the array is (C-style) contiguous in memory.
+FORTRAN : True if the array is (Fortran-style) contiguous in memory.
+
+Notice that 1-d arrays are always both FORTRAN contiguous and C contiguous.
+Both of these flags can be checked and are convenience flags only as whether
+or not an array is CONTIGUOUS or FORTRAN can be determined by the strides,
+dimensions, and itemsize variables..
+
+OWNDATA : True if the array owns the memory (it will try and free it
+ using PyDataMem_FREE() on deallocation ---
+ so it better really own it).
+
+These three flags facilitate using a data pointer that is a memory-mapped
+array, or part of some larger record array. But, they may have other uses...
+
+ALIGNED : True if the data buffer is aligned for the type. This
+ can be checked.
+WRITEABLE : True only if the data buffer can be "written" to.
+
+
+UPDATEIFCOPY : This is a special flag that is set if this array represents
+ a copy made because a user required certain FLAGS in
+ PyArray_FromAny and a copy had to be made of some
+ other array (and the user asked for this flag to be set in
+ such a situation). The base attribute then points to the
+ "misbehaved" array (which is set read_only).
+ When the array with this flag set is deallocated,
+ it will copy its contents back to the "misbehaved" array
+ (casting if necessary) and will reset the "misbehaved"
+ array to WRITEABLE. If the "misbehaved" array
+ was not WRITEABLE to begin with then PyArray_FromAny would
+ have returned an error because UPDATEIFCOPY would not
+ have been possible.
+
+
+PyArray_UpdateFlags(obj, FLAGS) will update the obj->flags for FLAGS
+ which can be any of CONTIGUOUS FORTRAN ALIGNED or WRITEABLE
+
+Some useful combinations of these flags:
+
+BEHAVED = ALIGNED | WRITEABLE
+BEHAVED_RO = ALIGNED
+CARRAY_FLAGS = CONTIGUOUS | BEHAVED
+FARRAY_FLAGS = FORTRAN | BEHAVED
+
+The macro PyArray_CHECKFLAGS(obj, FLAGS) can test any combination of flags.
+There are several default combinations defined as macros already
+(see arrayobject.h)
+
+In particular, there are ISBEHAVED, ISBEHAVED_RO, ISCARRAY and ISFARRAY macros
+that also check to make sure the array is in native byte order (as determined)
+by the data-type descriptor.
+
+There are more C-API enhancements which you can discover in the code,
+ or buy the book (http://www.trelgol.com)
+
+
diff --git a/numpy/doc/DISTUTILS.txt b/numpy/doc/DISTUTILS.txt
new file mode 100644
index 000000000..a08c50edf
--- /dev/null
+++ b/numpy/doc/DISTUTILS.txt
@@ -0,0 +1,509 @@
+.. -*- rest -*-
+
+Scipy Distutils - Users Guide
+=============================
+
+:Author: Pearu Peterson <pearu@cens.ioc.ee>
+:Discussions to: scipy-dev@scipy.org
+:Created: October 2005
+:Revision: $LastChangedRevision$
+:SVN source: $HeadURL$
+
+Scipy structure
+'''''''''''''''
+
+Currently Scipy project consists of two packages:
+
+- Scipy core --- it provides packages like:
+
+ + scipy.distutils - extension to Python distutils
+ + scipy.f2py - a tool to bind Fortran/C codes to Python
+ + scipy.weave - a tool to bind C++ codes to Python
+ + scipy.base - future replacement of Numeric and numarray packages
+ + scipy.testing - scipy-style tools for unit testing
+ + etc
+
+- Scipy --- a collection of Scientific tools for Python.
+
+The aim of this document is to describe how to add new tools to Scipy.
+
+
+Requirements for SciPy packages
+'''''''''''''''''''''''''''''''
+
+Scipy consists of Python packages, called Scipy packages, that are
+available to Python users via ``scipy`` name space. Each Scipy package
+may contain other Scipy packages. And so on. So, Scipy directory tree
+is a tree of packages with arbitrary depth and width. Any Scipy
+package may depend on Scipy core packages but the dependence on other
+Scipy packages should be kept minimal or zero.
+
+A Scipy package contains in addition to its sources, the following
+files and directories:
+
+ ``setup.py`` --- building script
+ ``info.py`` --- contains documentation and import flags
+ ``__init__.py`` --- package initializer
+ ``tests/`` --- directory of unittests
+
+Their contents will be described below.
+
+The ``setup.py`` file
+'''''''''''''''''''''
+
+In order to add a Python package to Scipy, its building script (the
+``setup.py`` file) must meet certain requirements. The minimal and the
+most important one is that it must define a function
+``configuration(parent_package='',top_path=None)`` that returns a
+dictionary suitable for passing to ``scipy.distutils.core.setup(..)``
+function. In order to simplify the construction of such an distionary,
+``scipy.distutils.misc_util`` provides a class ``Configuration``, the
+usage of will be described below.
+
+Scipy pure Python package example
+---------------------------------
+
+Here follows a minimal example for a pure Python Scipy package
+``setup.py`` file that will be explained in detail below::
+
+ #!/usr/bin/env python
+ def configuration(parent_package='',top_path=None):
+ from scipy.distutils.misc_util import Configuration
+ config = Configuration('mypackage',parent_package,top_path)
+ return config
+
+ if __name__ == "__main__":
+ from scipy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
+
+The first argument ``parent_package`` of the main configuration
+function will contain a name of the parent Scipy package and the
+second argument ``top_path`` contains the name of the directory where
+the main ``setup.py`` script is located. Both arguments should be
+passed to the ``Configuration`` constructor after the name of the
+current package.
+
+The ``Configuration`` constructor has also fourth optional argument,
+``package_path``, that can be used when package files are located in
+some other location than the directory of the ``setup.py`` file.
+
+Remaining ``Configuration`` arguments are all keyword arguments that will
+be used to initialize attributes of ``Configuration``
+instance. Usually, these keywords are the same as the ones that
+``setup(..)`` function would expect, for example, ``packages``,
+``ext_modules``, ``data_files``, ``include_dirs``, ``libraries``,
+``headers``, ``scripts``, ``package_dir``, etc. However, the direct
+specification of these keywords is not recommended as the content of
+these keyword arguments will not be processed or checked for the
+consistency of Scipy building system.
+
+Finally, ``Configuration`` has ``.todict()`` method that returns all
+the configuration data as a dictionary suitable for passing on to the
+``setup(..)`` function.
+
+``Configuration`` instance attributes
+-------------------------------------
+
+In addition to attributes that can be specified via keyword arguments
+to ``Configuration`` constructor, ``Configuration`` instance (let us
+denote as ``config``) has the following attributes that can be useful
+in writing setup scripts:
+
++ ``config.name`` - full name of the current package. The names of parent
+ packages can be extracted as ``config.name.split('.')``.
+
++ ``config.local_path`` - path to the location of current ``setup.py`` file.
+
++ ``config.top_path`` - path to the location of main ``setup.py`` file.
+
+``Configuration`` instance methods
+----------------------------------
+
++ ``config.todict()`` --- returns configuration distionary suitable for
+ passing to ``scipy.distutils.core.setup(..)`` function.
+
++ ``config.paths(*paths) --- applies ``glob.glob(..)`` to items of
+ ``paths`` if necessary. Fixes ``paths`` item that is relative to
+ ``config.local_path``.
+
++ ``config.get_subpackage(subpackage_name,subpackage_path=None)`` ---
+ returns Scipy subpackage configuration. Subpackage is looked in the
+ current directory under the name ``subpackage_name`` but the path
+ can be specified also via optional ``subpackage_path`` argument.
+ If ``subpackage_name`` is specified as ``None`` then the subpackage
+ name will be taken the basename of ``subpackage_path``.
+
++ ``config.add_subpackage(subpackage_name,subpackage_path=None)`` ---
+ add Scipy subpackage configuration to the current one. The meaning
+ and usage of arguments is explained above, see
+ ``config.get_subpackage()`` method.
+
++ ``config.add_data_files(*files)`` --- prepend ``files`` to ``data_files``
+ list. If ``files`` item is a tuple then its first element defines
+ the suffix of where data files are copied relative to package installation
+ directory and the second element specifies the path to data
+ files. By default data files are copied under package installation
+ directory. For example,
+
+ ::
+
+ config.add_data_files('foo.dat',
+ ('fun',['gun.dat','nun/pun.dat','/tmp/sun.dat']),
+ 'bar/car.dat'.
+ '/full/path/to/can.dat',
+ )
+
+ will install data files to the following locations::
+
+ <installation path of config.name package>/
+ foo.dat
+ fun/
+ gun.dat
+ nun/
+ pun.dat
+ sun.dat
+ bar/
+ car.dat
+ can.dat
+
+ Path to data files can be a function taking no arguments and
+ returning path(s) to data files -- this is a useful when data files
+ are generated while building the package. (XXX: explain the step
+ when this function are called exactly)
+
++ ``config.add_data_dir(data_path)`` --- add directory ``data_path``
+ recursively to ``data_files``. The whole directory tree starting at
+ ``data_path`` will be copied under package installation directory.
+ If ``data_path`` is a tuple then its first element defines
+ the suffix of where data files are copied relative to package installation
+ directory and the second element specifies the path to data directory.
+ By default data directory are copied under package installation
+ directory. For example,
+
+ ::
+
+ config.add_data_dir('fun') # fun/ contains foo.dat bar/car.dat
+ config.add_data_dir(('sun','fun'))
+ config.add_data_dir(('gun','/full/path/to/fun'))
+
+ will install data files to the following locations
+
+ <installation path of config.name package>/
+ fun/
+ foo.dat
+ bar/
+ car.dat
+ sun/
+ foo.dat
+ bar/
+ car.dat
+ gun/
+ foo.dat
+ car.dat
+
++ ``config.add_include_dirs(*paths)`` --- prepend ``paths`` to
+ ``include_dirs`` list. This list will be visible to all extension
+ modules of the current package.
+
++ ``config.add_headers(*files)`` --- prepend ``files`` to ``headers``
+ list. By default, headers will be installed under
+ ``<prefix>/include/pythonX.X/<config.name.replace('.','/')>/``
+ directory. If ``files`` item is a tuple then it's first argument
+ specifies the installation suffix relative to
+ ``<prefix>/include/pythonX.X/`` path.
+
++ ``config.add_scripts(*files)`` --- prepend ``files`` to ``scripts``
+ list. Scripts will be installed under ``<prefix>/bin/`` directory.
+
++ ``config.add_extension(name,sources,*kw)`` --- create and add an
+ ``Extension`` instance to ``ext_modules`` list. The first argument
+ ``name`` defines the name of the extension module that will be
+ installed under ``config.name`` package. The second argument is
+ a list of sources. ``add_extension`` method takes also keyword
+ arguments that are passed on to the ``Extension`` constructor.
+ The list of allowed keywords is the following: ``include_dirs``,
+ ``define_macros``, ``undef_macros``, ``library_dirs``, ``libraries``,
+ ``runtime_library_dirs``, ``extra_objects``, ``extra_compile_args``,
+ ``extra_link_args``, ``export_symbols``, ``swig_opts``, ``depends``,
+ ``language``, ``f2py_options``, ``module_dirs``, ``extra_info``.
+
+ Note that ``config.paths`` method is applied to all lists that
+ may contain paths. ``extra_info`` is a dictionary or a list
+ of dictionaries that content will be appended to keyword arguments.
+ The list ``depends`` contains paths to files or directories
+ that the sources of the extension module depend on. If any path
+ in the ``depends`` list is newer than the extension module, then
+ the module will be rebuilt.
+
+ The list of sources may contain functions ('source generators')
+ with a pattern ``def <funcname>(ext, build_dir): return
+ <source(s) or None>``. If ``funcname`` returns ``None``, no sources
+ are generated. And if the ``Extension`` instance has no sources
+ after processing all source generators, no extension module will
+ be built. This is the recommended way to conditionally define
+ extension modules. Source generator functions are called by the
+ ``build_src`` command of ``scipy.distutils``.
+
+ For example, here is a typical source generator function::
+
+ def generate_source(ext,build_dir):
+ import os
+ from distutils.dep_util import newer
+ target = os.path.join(build_dir,'somesource.c')
+ if newer(target,__file__):
+ # create target file
+ return target
+
+ The first argument contains the Extension instance that can be
+ useful to access its attributes like ``depends``, ``sources``,
+ etc. lists and modify them during the building process.
+ The second argument gives a path to a build directory that must
+ be used when creating files to a disk.
+
++ ``config.add_library(name, sources, **build_info)`` --- add
+ a library to ``libraries`` list. Allowed keywords arguments
+ are ``depends``, ``macros``, ``include_dirs``,
+ ``extra_compiler_args``, ``f2py_options``. See ``.add_extension()``
+ method for more information on arguments.
+
++ ``config.have_f77c()`` --- return True if Fortran 77 compiler is
+ available (read: a simple Fortran 77 code compiled succesfully).
+
++ ``config.have_f90c()`` --- return True if Fortran 90 compiler is
+ available (read: a simple Fortran 90 code compiled succesfully).
+
++ ``config.get_version()`` --- return version string of the current package,
+ ``None`` if version information could not be detected. This methods
+ scans files ``__version__.py``, ``<packagename>_version.py``,
+ ``version.py``, ``__svn_version__.py`` for string variables
+ ``version``, ``__version__``, ``<packagename>_version``.
+
++ ``config.make_svn_version_py()`` --- appends a data function to
+ ``data_files`` list that will generate ``__svn_version__.py`` file
+ to the current package directory. The file will be removed from
+ the source directory when Python exits.
+
++ ``config.get_build_temp_dir()`` --- return a path to a temporary
+ directory. This is the place where one should build temporary
+ files.
+
++ ``config.get_distribution()`` --- return distutils ``Distribution``
+ instance.
+
++ ``config.get_config_cmd()`` --- returns ``scipy.distutils`` config
+ command instance.
+
+Template files
+--------------
+
+XXX: Describe how files with extensions ``.f.src``, ``.pyf.src``,
+``.c.src``, etc. are pre-processed by the ``build_src`` command.
+
+Useful functions in ``scipy.distutils.misc_util``
+-------------------------------------------------
+
++ ``get_scipy_include_dirs()`` --- return a list of Scipy base
+ include directories. Scipy base include directories contain
+ header files such as ``scipy/arrayobject.h``, ``scipy/funcobject.h``
+ etc. For installed Scipy core the returned list has length 1
+ but when building Scipy core the list may contain more directories,
+ for example, a path to ``config.h`` file that
+ ``scipy/base/setup.py`` file generates and is used by ``scipy``
+ header files.
+
++ ``append_path(prefix,path)`` --- smart append ``path`` to ``prefix``.
+
++ ``def get_cmd(cmdname,_cache={})`` --- returns ``scipy.distutils``
+ command instance.
+
++ ``all_strings(lst)``
+
++ ``has_f_sources(sources)``
+
++ ``has_cxx_sources(sources)``
+
++ ``filter_sources(sources)`` --- return ``c_sources, cxx_sources,
+ f_sources, fmodule_sources``
+
++ ``get_dependencies(sources)``
+
++ ``is_local_src_dir(directory)``
+
++ ``get_ext_source_files(ext)``
+
++ ``get_script_files(scripts)``
+
++ ``get_lib_source_files(lib)``
+
++ ``get_data_files(data)``
+
++ ``dot_join(*args)``
+
++ ``get_frame(level=0)``
+
++ ``cyg2win32(path)``
+
++ ``terminal_has_colors()``, ``red_text(s)``, ``green_text(s)``,
+ ``yellow_text(s)``, ``blue_text(s)``, ``cyan_text(s)``
+
++ ``get_path(mod_name,parent_path=None)``
+
++ ``allpath(name)``
+
++ ``cxx_ext_match``, ``fortran_ext_match``, ``f90_ext_match``,
+ ``f90_module_name_match``
+
+``scipy.distutils.system_info`` module
+--------------------------------------
+
++ ``get_info(name,notfound_action=0)``
++ ``combine_paths(*args,**kws)``
++ ``show_all()``
+
+``scipy.distutils.cpuinfo`` module
+----------------------------------
+
++ ``cpuinfo``
+
+``scipy.distutils.log`` module
+------------------------------
+
++ ``set_verbosity(v)``
+
+
+``scipy.distutils.exec_command`` module
+---------------------------------------
+
++ ``get_pythonexe()``
++ ``splitcmdline(line)``
++ ``find_executable(exe, path=None)``
++ ``exec_command( command, execute_in='', use_shell=None, use_tee=None, **env )``
+
+The ``info.py`` file
+''''''''''''''''''''
+
+Scipy package import hooks assume that each Scipy package contains
+``info.py`` file that contains overall documentation about the package
+and some variables defining the order of package imports, dependence
+relations between packages, etc.
+
+The following information will be looked in the ``info.py`` file:
+
+__doc__
+ The documentation string of the package.
+
+__doc_title__
+ The title of the package. If not defined then the first non-empty
+ line of ``__doc__`` will be used.
+
+__all__
+ List of symbols that package exports. Optional.
+
+global_symbols
+ List of names that should be imported to scipy name space. To import
+ all symbols to ``scipy`` namespace, define ``global_symbols=['*']``.
+
+depends
+ List of names that the package depends on. Prefix ``scipy.``
+ will be automatically added to package names. For example,
+ use ``testing`` to indicate dependence on ``scipy.testing``
+ package. Default value is ``[]``.
+
+postpone_import
+ Boolean variable indicating that importing the package should be
+ postponed until the first attempt of its usage. Default value is ``False``.
+ Depreciated.
+
+The ``__init__.py`` file
+''''''''''''''''''''''''
+
+To speed up the import time as well as to minimize memory usage, scipy
+uses ppimport hooks to transparently postpone importing large modules
+that might not be used during the Scipy usage session. But in order to
+have an access to the documentation of all Scipy packages, including
+of the postponed packages, the documentation string of a package (that would
+usually reside in ``__init__.py`` file) should be copied also
+to ``info.py`` file.
+
+So, the header a typical ``__init__.py`` file is::
+
+ #
+ # Package ... - ...
+ #
+
+ from info import __doc__
+ ...
+
+ from scipy.testing import ScipyTest
+ test = ScipyTest().test
+
+The ``tests/`` directory
+''''''''''''''''''''''''
+
+Ideally, every Python code, extension module, or subpackage in Scipy
+package directory should have the corresponding ``test_<name>.py``
+file in ``tests/`` directory. This file should define classes
+derived from ``ScipyTestCase`` (or from ``unittest.TestCase``) class
+and have names starting with ``test``. The methods of these classes
+which names start with ``bench``, ``check``, or ``test``, are passed
+on to unittest machinery. In addition, the value of the first optional
+argument of these methods determine the level of the corresponding
+test. Default level is 1.
+
+A minimal example of a ``test_yyy.py`` file that implements tests for
+a Scipy package module ``scipy.xxx.yyy`` containing a function
+``zzz()``, is shown below::
+
+ import sys
+ from scipy.testing import *
+
+ set_package_path()
+ # import xxx symbols
+ from xxx.yyy import zzz
+ restore_path()
+
+ #Optional:
+ set_local_path()
+ # import modules that are located in the same directory as this file.
+ restore_path()
+
+ class test_zzz(ScipyTestCase):
+ def check_simple(self, level=1):
+ assert zzz()=='Hello from zzz'
+ #...
+
+ if __name__ == "__main__":
+ ScipyTest().run()
+
+``ScipyTestCase`` is derived from ``unittest.TestCase`` and it
+basically only implements an additional method ``measure(self,
+code_str, times=1)``.
+
+``scipy.testing`` module provides also the following convenience
+functions::
+
+ assert_equal(actual,desired,err_msg='',verbose=1)
+ assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=1)
+ assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=1)
+ assert_array_equal(x,y,err_msg='')
+ assert_array_almost_equal(x,y,decimal=6,err_msg='')
+ rand(*shape) # returns random array with a given shape
+
+``ScipyTest`` can be used for running ``tests/test_*.py`` scripts.
+For instance, to run all test scripts of the module ``xxx``, execute
+in Python:
+
+ >>> ScipyTest('xxx').test(level=1,verbosity=1)
+
+or equivalently,
+
+ >>> import xxx
+ >>> ScipyTest(xxx).test(level=1,verbosity=1)
+
+To run only tests for ``xxx.yyy`` module, execute:
+
+ >>> ScipyTest('xxx.yyy').test(level=1,verbosity=1)
+
+To take the level and verbosity parameters for tests from
+``sys.argv``, use ``ScipyTest.run()`` method (this is supported only
+when ``optparse`` is installed).
diff --git a/numpy/doc/README.txt b/numpy/doc/README.txt
new file mode 100644
index 000000000..f63508669
--- /dev/null
+++ b/numpy/doc/README.txt
@@ -0,0 +1,15 @@
+Very complete documentation is available from the primary developer of
+SciPy Core for a small fee. After a brief period, that documentation
+will become freely available. See http://www.trelgol.com for
+details. The fee and restriction period is intended to allow people
+and to encourage companies to easily contribute to the development of
+SciPy.
+
+This directory will contain all public documentation that becomes available.
+
+Very good documentation is also available using Python's (and
+especially IPython's) own help system. Most of the functions have
+docstrings that provide usage assistance.
+
+
+
diff --git a/numpy/doc/records.txt b/numpy/doc/records.txt
new file mode 100644
index 000000000..faa0bfe3b
--- /dev/null
+++ b/numpy/doc/records.txt
@@ -0,0 +1,86 @@
+
+The ndarray supports records intrinsically.
+None of the default descriptors have fields defined, but you can create new
+descriptors easily. The ndarray even supports nested arrays of records inside
+of a record. Any record that the array protocol can describe can be represented.
+The ndarray also supports partial field descriptors. Not every byte has to be
+accounted for.
+
+This was done by adding to the established PyArray_Descr * structure:
+
+1) a PyObject *fields member which contains a dictionary of
+"field name" : (PyArray_Descr *field-type, offset, [optional field title]).
+If a title is given, then it is also inserted into the dictionary and used to
+key the same entry.
+
+2) A byteorder member. By default this is '=' (native), or '|' (not-applicable).
+
+3) An additional PyArray_ArrDescr *member of the structure which
+contains a simple representation of an array of another base-type.
+types. The PyArray_ArrayDescr structure has members PyArray_Descr *,
+PyObject *, for holding a reference to the base-type and the shape of
+the sub-array.
+
+4) The PyArray_Descr * as official Python object that fully describes
+a region of memory for the data
+
+
+Data type conversions: We can support additional data-type
+conversions. The data-type passed in is converted to a PyArray_Descr*
+object.
+
+New possibilities for the "data-type"
+
+List [data-type 1, data-type 2, ..., data-type n]
+===============================
+Equivalent to {'names':['f1','f2',...,'fn'],
+ 'formats': [data-type 1, data-type 2, ..., data-type n]}
+
+This is a quick way to specify a record format with default field names.
+
+
+Tuple (flexible type, itemsize)
+ (fixed type, shape)
+===============================
+
+Get converted to a new PyArray_Descr * object with a flexible
+type. The latter structure also sets the PyArray_ArrayDescr field of the
+returned PyArray_Descr *.
+
+
+Dictionary (keys "names", "titles", and "formats")
+===============================
+
+This will be converted to a PyArray_VOID type with corresponding
+fields parameter (the formats list will be converted to actual
+PyArray_Descr * objects).
+
+
+Objects (anything with an .itemsize and .fields attribute)
+===============================
+
+If its an instance of (a sub-class of) void type, then a new
+PyArray_Descr* structure is created corresponding to its typeobject
+(and PyArray_VOID) typenumber. If the type is registered, then the
+registered type-number is used.
+
+otherwise a new PyArray_VOID PyArray_Descr* structure is created and
+filled ->elsize and ->fields filled in appropriately.
+
+The itemsize attribute must return a number > 0
+The fields attribute must return a dictionary with at least
+"names" and "formats" entries. The "formats" entry will be
+converted to a "proper" descr->fields entry (all generic data-types
+converted to PyArray_Descr * structure).
+
+
+Reference counting for PyArray_Descr * objects.
+
+Most functions that take PyArary_Descr * as arguments and return a PyObject *
+steal the reference unless otherwise noted in the code:
+
+Functions that return PyArray_Descr * objects return a new reference.
+
+There is a new function and a new method of array objects both labelled
+dtypescr which can be used to try out the PyArray_DescrConverter.
+
diff --git a/numpy/doc/ufuncs.txt b/numpy/doc/ufuncs.txt
new file mode 100644
index 000000000..2d4fa5048
--- /dev/null
+++ b/numpy/doc/ufuncs.txt
@@ -0,0 +1,98 @@
+
+BUFFERED General Ufunc explanation:
+
+We need to optimize the section of ufunc code that handles mixed-type
+and misbehaved arrays. In particular, we need to fix it so that items
+are not copied into the buffer if they don't have to be.
+
+Right now, all data is copied into the buffers (even scalars are copied
+multiple times into the buffers even if they are not going to be cast).
+
+Some benchmarks show that this results in a significant slow-down
+(factor of 4) over similar numarray code.
+
+The approach is therefore, to loop over the largest-dimension (just like
+the NO_BUFFER) portion of the code. All arrays will either have N or
+1 in this last dimension (or their would be a mis-match error). The
+buffer size is B.
+
+If N <= B (and only if needed), we copy the entire last-dimension into
+the buffer as fast as possible using the single-stride information.
+
+Also we only copy into output arrays if needed as well (other-wise the
+output arrays are used directly in the ufunc code).
+
+Call the function using the appropriate strides information from all the input
+arrays. Only set the strides to the element-size for arrays that will be copied.
+
+If N > B, then we have to do the above operation in a loop (with an extra loop
+at the end with a different buffer size).
+
+Both of these cases are handled with the following code:
+
+Compute N = quotient * B + remainder.
+ quotient = N / B # integer math
+ (store quotient + 1) as the number of innerloops
+ remainder = N % B # integer remainder
+
+On the inner-dimension we will have (quotient + 1) loops where
+the size of the inner function is B for all but the last when the niter size is
+remainder.
+
+So, the code looks very similar to NOBUFFER_LOOP except the inner loop is
+replaced with...
+
+for(k=0; i<quotient+1; k++) {
+ if (k==quotient+1) make itersize remainder size
+ copy only needed items to buffer.
+ swap input buffers if needed
+ cast input buffers if needed
+ call function()
+ cast outputs in buffers if needed
+ swap outputs in buffers if needed
+ copy only needed items back to output arrays.
+ update all data-pointers by strides*niter
+}
+
+
+Reference counting for OBJECT arrays:
+
+If there are object arrays involved then loop->obj gets set to 1. Then there are two cases:
+
+1) The loop function is an object loop:
+
+ Inputs:
+ - castbuf starts as NULL and then gets filled with new references.
+ - function gets called and doesn't alter the reference count in castbuf
+ - on the next iteration (next value of k), the casting function will
+ DECREF what is present in castbuf already and place a new object.
+
+ - At the end of the inner loop (for loop over k), the final new-references
+ in castbuf must be DECREF'd. If its a scalar then a single DECREF suffices
+ Otherwise, "bufsize" DECREF's are needed (unless there was only one
+ loop, then "remainder" DECREF's are needed).
+
+ Outputs:
+ - castbuf contains a new reference as the result of the function call. This
+ gets converted to the type of interest and. This new reference in castbuf
+ will be DECREF'd by later calls to the function. Thus, only after the
+ inner most loop do we need to DECREF the remaining references in castbuf.
+
+2) The loop function is of a different type:
+
+ Inputs:
+
+ - The PyObject input is copied over to buffer which receives a "borrowed"
+ reference. This reference is then used but not altered by the cast
+ call. Nothing needs to be done.
+
+ Outputs:
+
+ - The buffer[i] memory receives the PyObject input after the cast. This is
+ a new reference which will be "stolen" as it is copied over into memory.
+ The only problem is that what is presently in memory must be DECREF'd first.
+
+
+
+
+ \ No newline at end of file
diff --git a/numpy/f2py/BUGS.txt b/numpy/f2py/BUGS.txt
new file mode 100644
index 000000000..ee08863bb
--- /dev/null
+++ b/numpy/f2py/BUGS.txt
@@ -0,0 +1,55 @@
+December 1, 2002:
+
+C FILE: STRING.F
+ SUBROUTINE FOO
+ END
+C END OF FILE STRING.F
+does not build with
+ f2py -c -m string string.f
+Cause: string is mapped to string_bn
+**************************************************************************
+August 16, 2001:
+1) re in Python 2.x is **three** times slower than the re in Python 1.5.
+**************************************************************************
+HP-UX B.10.20 A 9000/780:
+Fortran function returning character*(*) (id=7) ... failed(core dump)
+Fortran function returning logical*8 (id=21) ... expected .true. but got 0
+Callback function returning real (id=45) ... expected 34.56 but got 14087495680.0
+Callback function returning real*4 (id=46) ... expected 34.56 but got 14087495680.0
+Callback function returning logical*8 (id=55) ... expected .true. but got 0
+ C compiler: gcc ('gcc 2.x.x' 2.95.2) (from .f2py_get_compiler_CC)
+ Fortran compiler: g77 ('g77 2.x.x' 2.95.2) (from .f2py_get_compiler_FC)
+ Linker: ld ('HP-UX ld' 92453-07 linker linker ld B.10.24 961204) (from .f2py_get_compiler_LD)
+**************************************************************************
+Linux 2.2.13-0.9 #1 Thu Dec 9 17:03:57 EST 1999 alpha unknown:
+Fortran function returning character*(*) (id=7) ... expected 'abcdefgh' but got 'abcdefgh \201' (o?k)
+Callback function returning complex (id=48) ... failed(core dump)
+ Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump)
+Callback function returning complex*8 (id=49) ... failed(core dump)
+ Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump)
+Callback function returning complex*16 (id=50) ... failed(core dump)
+ Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump)
+ C compiler: cc ('Compaq C' V6.2-002) (from .f2py_get_compiler_CC)
+ Fortran compiler: fort ('Compaq Fortran' V1.0-920) (from .f2py_get_compiler_FC)
+ Linker: fort ('Compaq Fortran' V1.0-920) (from .f2py_get_compiler_LD)
+**************************************************************************
+Linux 2.2.14-15mdk #1 Tue Jan 4 22:24:20 CET 2000 i686 unknown:
+Callback function returning logical*8 (id=55) ... failed
+ C compiler: cc ('gcc 2.x.x' 2.95.2)
+ Fortran compiler: f90 ('Absoft F90' 3.0)
+ Linker: ld ('GNU ld' 2.9.5)
+**************************************************************************
+IRIX64 6.5 04151556 IP30:
+Testing integer, intent(inout) ...failed # not f2py problem
+Testing integer, intent(inout,out) ...failed
+Testing integer*1, intent(inout) ...failed
+Testing integer*1, intent(inout,out) ...failed
+Testing integer*8, intent(inout) ...failed
+Testing integer*8, intent(inout,out) ...failed
+cc-1140 cc: WARNING File = genmodule.c, Line = 114
+ A value of type "void *" cannot be used to initialize an entity of type
+ "void (*)()".
+ {"foo",-1,{-1},0,(char *)F_FUNC(foo,FOO),(void *)gen_foo,doc_gen_foo},
+ C compiler: cc ('MIPSpro 7 Compilers' 7.30)
+ Fortran compiler: f77 ('MIPSpro 7 Compilers' 7.30)
+ Linker: ld ('Linker for MIPSpro 7 Compilers' 7.30.)
diff --git a/numpy/f2py/Makefile b/numpy/f2py/Makefile
new file mode 100644
index 000000000..91c199f99
--- /dev/null
+++ b/numpy/f2py/Makefile
@@ -0,0 +1,173 @@
+# Makefile for f2py2e
+#
+# Use GNU make for making.
+# $Revision: 1.46 $
+# $Date: 2005/01/30 17:22:55 $
+# Pearu Peterson <pearu@ioc.ee>
+
+PYTHON=python
+MAJOR=2
+F2PY2E_CVSROOT=:pserver:anonymous@cens.ioc.ee:/home/cvs
+SCIPY_CVSROOT=:pserver:anonymous@scipy.org:/home/cvsroot
+
+UPLOADCMD = scp -r
+UPLOADDIR = pearu@kev.ioc.ee:/net/cens/home/www/unsecure/projects/f2py2e/
+
+REV=`python -c 'from __version__ import *;print version'`
+SCIPY_DISTUTILS_REV=`cd scipy_distutils && $(PYTHON) -c 'from scipy_distutils_version import *;print scipy_distutils_version' && cd ..`
+
+SRC_FILES = F2PY-$(MAJOR)-latest.tar.gz scipy_distutils-latest.tar.gz F2PY-$(MAJOR)-latest.win32.exe scipy_distutils-latest.win32.exe
+
+HTML_FILES = index.html FAQ.html HISTORY.html THANKS.html TESTING.html OLDNEWS.html
+FAQ_DEPS = simple.f pytest.py pyforttest.pyf simple_session.dat
+README_DEPS = hello.f
+UG_FILES = index.html f2py_usersguide.pdf
+UG_FILES_DEP = $(shell cd docs/usersguide && ls *.{f,f90,dat,pyf,py})
+
+WWW_SRC_FILES = $(SRC_FILES:%=upload/www/$(MAJOR).x/%)
+WWW_WEB_FILES = $(HTML_FILES:%=upload/www/%) $(README_DEPS:%=upload/www/%)
+WWW_UG_FILES = $(UG_FILES:%=upload/www/usersguide/%) $(UG_FILES_DEP:%=upload/www/usersguide/%)
+
+TMP_WEB_FILES = $(HTML_FILES:%=upload/tmp/%) $(README_DEPS:%=upload/tmp/%)
+
+##############################################################################
+
+all:
+ @echo "Use 'make install' to install f2py"
+ @echo "Use 'make generate' to build f2py docs to upload/tmp"
+install:
+ $(PYTHON) setup.py install
+test:
+ cd tests && $(PYTHON) run_all.py
+
+##############################################################################
+# Create F2PY tar-balls
+##############################################################################
+f2py2e:
+ test -d f2py2e && (cd f2py2e && cvs -d $(F2PY2E_CVSROOT) -z7 update -Pd && cd -) || cvs -d $(F2PY2E_CVSROOT) checkout f2py2e
+
+upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.tar.gz: f2py2e
+ cd f2py2e && python setup.py sdist -f
+ mkdir -p upload/tmp/$(MAJOR).x
+ cp f2py2e/dist/F2PY-$(REV).tar.gz upload/tmp/$(MAJOR).x
+ ln -sf F2PY-$(REV).tar.gz F2PY-$(MAJOR)-latest.tar.gz
+ mv F2PY-$(MAJOR)-latest.tar.gz upload/tmp/$(MAJOR).x
+upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.win32.exe: f2py2e
+ cd f2py2e && python setup.py bdist_wininst
+ mkdir -p upload/tmp/$(MAJOR).x
+ cp f2py2e/dist/F2PY-$(REV).win32.exe upload/tmp/$(MAJOR).x
+ ln -sf F2PY-$(REV).win32.exe F2PY-$(MAJOR)-latest.win32.exe
+ mv F2PY-$(MAJOR)-latest.win32.exe upload/tmp/$(MAJOR).x
+f2py2e_latest: upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.tar.gz upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.win32.exe
+
+##############################################################################
+# Create Scipy_distutils tar-balls
+##############################################################################
+
+scipy_distutils:
+ test -d scipy_distutils && (cd scipy_distutils && cvs -d $(SCIPY_CVSROOT) -z7 update -Pd && cd -) || cvs -d $(SCIPY_CVSROOT) checkout scipy_distutils
+
+upload/tmp/$(MAJOR).x/scipy_distutils-latest.tar.gz: scipy_distutils
+ cd scipy_distutils && python setup.py sdist -f
+ mkdir -p upload/tmp/$(MAJOR).x
+ cp scipy_distutils/dist/scipy_distutils-$(SCIPY_DISTUTILS_REV).tar.gz upload/tmp/$(MAJOR).x
+ ln -sf scipy_distutils-$(SCIPY_DISTUTILS_REV).tar.gz scipy_distutils-latest.tar.gz
+ mv scipy_distutils-latest.tar.gz upload/tmp/$(MAJOR).x
+upload/tmp/$(MAJOR).x/scipy_distutils-latest.win32.exe: scipy_distutils
+ cd scipy_distutils && python setup.py bdist_wininst
+ mkdir -p upload/tmp/$(MAJOR).x
+ cp scipy_distutils/dist/scipy_distutils-$(SCIPY_DISTUTILS_REV).win32.exe upload/tmp/$(MAJOR).x
+ ln -sf scipy_distutils-$(SCIPY_DISTUTILS_REV).win32.exe scipy_distutils-latest.win32.exe
+ mv scipy_distutils-latest.win32.exe upload/tmp/$(MAJOR).x
+
+scipy_distutils_latest: upload/tmp/$(MAJOR).x/scipy_distutils-latest.tar.gz upload/tmp/$(MAJOR).x/scipy_distutils-latest.win32.exe
+
+latest: f2py2e_latest scipy_distutils_latest
+
+##############################################################################
+# Upload files.
+##############################################################################
+
+upload/www/$(MAJOR).x/F2PY-$(MAJOR)-latest.tar.gz: upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.tar.gz
+ -mkdir -p `dirname $@`
+ cp -P upload/tmp/$(MAJOR).x/F2PY-{$(MAJOR)-latest,$(REV)}.tar.gz upload/www/$(MAJOR).x
+ $(UPLOADCMD) upload/tmp/$(MAJOR).x/F2PY-{$(MAJOR)-latest,$(REV)}.tar.gz $(UPLOADDIR)/$(MAJOR).x/
+upload/www/$(MAJOR).x/scipy_distutils-latest.tar.gz: upload/tmp/$(MAJOR).x/scipy_distutils-latest.tar.gz
+ -mkdir -p `dirname $@`
+ cp -P upload/tmp/$(MAJOR).x/scipy_distutils-{latest,$(SCIPY_DISTUTILS_REV)}.tar.gz upload/www/$(MAJOR).x/
+ $(UPLOADCMD) upload/tmp/$(MAJOR).x/scipy_distutils-{latest,$(SCIPY_DISTUTILS_REV)}.tar.gz $(UPLOADDIR)/$(MAJOR).x
+upload/www/$(MAJOR).x/F2PY-$(MAJOR)-latest.win32.exe: upload/tmp/$(MAJOR).x/F2PY-$(MAJOR)-latest.win32.exe
+ -mkdir -p `dirname $@`
+ cp -P upload/tmp/$(MAJOR).x/F2PY-{$(MAJOR)-latest,$(REV)}.win32.exe upload/www/$(MAJOR).x
+ $(UPLOADCMD) upload/tmp/$(MAJOR).x/F2PY-{$(MAJOR)-latest,$(REV)}.win32.exe $(UPLOADDIR)/$(MAJOR).x/
+upload/www/$(MAJOR).x/scipy_distutils-latest.win32.exe: upload/tmp/$(MAJOR).x/scipy_distutils-latest.win32.exe
+ -mkdir -p `dirname $@`
+ cp -P upload/tmp/$(MAJOR).x/scipy_distutils-{latest,$(SCIPY_DISTUTILS_REV)}.win32.exe upload/www/$(MAJOR).x
+ $(UPLOADCMD) upload/tmp/$(MAJOR).x/scipy_distutils-{latest,$(SCIPY_DISTUTILS_REV)}.win32.exe $(UPLOADDIR)/$(MAJOR).x/
+
+upload/tmp/usersguide/index.html: docs/usersguide/index.txt $(UG_FILES_DEP:%=upload/www/usersguide/%)
+ -mkdir -p upload/tmp/usersguide
+ rest2html $< $@
+upload/tmp/usersguide/f2py_usersguide.tex: docs/usersguide/index.txt $(UG_FILES_DEP:%=upload/www/usersguide/%)
+ -mkdir -p upload/tmp/usersguide
+ rest2latex $< $@
+upload/tmp/usersguide/f2py_usersguide.pdf: upload/tmp/usersguide/f2py_usersguide.tex
+ cd `dirname $@` && pdflatex `basename $<`
+upload/tmp/usersguide/%.f: docs/usersguide/%.f
+ -mkdir -p upload/tmp/usersguide
+ cp $< $@
+upload/tmp/usersguide/%.f90: docs/usersguide/%.f90
+ -mkdir -p upload/tmp/usersguide
+ cp $< $@
+upload/tmp/usersguide/%.dat: docs/usersguide/%.dat
+ -mkdir -p upload/tmp/usersguide
+ cp $< $@
+upload/tmp/usersguide/%.pyf: docs/usersguide/%.pyf
+ -mkdir -p upload/tmp/usersguide
+ cp $< $@
+upload/tmp/usersguide/%.py: docs/usersguide/%.py
+ -mkdir -p upload/tmp/usersguide
+ cp $< $@
+upload/www/usersguide/%: upload/tmp/usersguide/%
+ -mkdir -p `dirname $@`
+ cp -P $< $@
+ $(UPLOADCMD) $@ $(UPLOADDIR)/usersguide
+
+upload/tmp/FAQ.html: docs/FAQ.txt $(FAQ_DEPS:%=docs/%)
+ -mkdir -p upload/tmp
+ rest2html $< $@
+upload/tmp/index.html: docs/README.txt $(README_DEPS:%=docs/%)
+ -mkdir -p upload/tmp
+ rest2html $< $@
+upload/tmp/%.f: docs/%.f
+ -mkdir -p upload/tmp
+ cp $< $@
+upload/tmp/%.html: docs/%.txt
+ -mkdir -p upload/tmp
+ rest2html $< $@
+upload/www/%: upload/tmp/%
+ -mkdir -p `dirname $@`
+ cp -P $< $@
+ $(UPLOADCMD) $@ $(UPLOADDIR)/
+
+upload_web: $(WWW_WEB_FILES)
+upload_ug: $(WWW_UG_FILES)
+upload_src: $(WWW_SRC_FILES)
+upload: upload_src upload_ug upload_web
+
+generate_web: $(TMP_WEB_FILES)
+generate: generate_web
+
+##############################################################################
+# Clean up
+##############################################################################
+clean:
+ rm -f {tests/,tests/{f77,f90,mixed}/,docs/,docs/usersguide/,}*.{o,a,so,sl,pyc}
+ rm -f {tests/,tests/{f77,f90,mixed}/,docs/,docs/usersguide/,}*~
+distclean: clean
+ rm -f {tests/,src/,}*~
+ rm -f tests/*.{f,f90}
+ rm -rf dist {docs/,docs/usersguide/,}build f2py2e scipy_distutils upload
+ rm -f MANIFEST f2py?.? f2py
+
+.PHONY: install test
diff --git a/numpy/f2py/NEWS.txt b/numpy/f2py/NEWS.txt
new file mode 100644
index 000000000..a4a254405
--- /dev/null
+++ b/numpy/f2py/NEWS.txt
@@ -0,0 +1,2 @@
+
+Read docs/HISTORY.txt \ No newline at end of file
diff --git a/numpy/f2py/README.txt b/numpy/f2py/README.txt
new file mode 100644
index 000000000..ebe7e8c88
--- /dev/null
+++ b/numpy/f2py/README.txt
@@ -0,0 +1,5 @@
+======================================================================
+ F2PY - Fortran to Python Interface Generator
+======================================================================
+
+Read docs/README.txt
diff --git a/numpy/f2py/TODO.txt b/numpy/f2py/TODO.txt
new file mode 100644
index 000000000..fd655f2e2
--- /dev/null
+++ b/numpy/f2py/TODO.txt
@@ -0,0 +1,67 @@
+Determine fixed/free format Fortran 90 dialect from the
+contents of Fortran files. See scipy_distutils/command/build_flib.py.
+
+[DONE]
+========================================================================
+Wrapping F90 code as follows:
+
+subroutine foo
+print*,"In foo"
+end subroutine foo
+subroutine bar(func)
+ interface aa ! bug: this interface block is ignored
+ subroutine foo
+ end subroutine foo
+ end interface
+ !external foo
+ external func
+ call func(foo)
+end subroutine bar
+subroutine gun(a)
+ external a
+ call a()
+end subroutine gun
+subroutine fun
+ call bar(gun)
+end subroutine fun
+
+=========================================================================
+Users Guide needs major revision.
+
+[DONE]
+=========================================================================
+On Thu, 27 Sep 2001, José Luis Gómez Dans wrote:
+
+> Hi,
+> just one question: does f2py supporte derived types in F90 code?
+> Stuff like something%or and things like that.
+
+Not yet.
+
+=========================================================================
+Date: Tue, 28 Aug 2001 22:23:04 -0700
+From: Patrick LeGresley <plegresl@ape.stanford.edu>
+To: f2py-users@cens.ioc.ee
+Subject: [f2py] Strange initialization of allocatable arrays
+
+I've noticed an odd behavior when setting an allocatable, multidimensional
+array in a module. If the rank of the array is odd, the initialization is
+fine. However, if the rank is even only the first element of the array is
+set properly. See the attached sample code for example.
+
+=========================================================================
+On Wed, 22 Aug 2001, Patrick LeGresley wrote:
+
+> I've noticed that if a parameter is defined in terms of another parameter,
+> that the parameter is replaced not by a number but by another parameter
+> (try the attached subroutine for example). Is there any way to have f2py
+> automatically recognize the dependencies and generate a signature file
+> without parameter variables ?
+
+It is certainly possible. In fact, f2py has only a basic support for
+PARAMETER statements and it fails in your 'advanced' example to produce a
+robust signature file.
+I am sorry but you have to wait until I'll get back from my travel tour
+(somewhere in the middle of September) and get a chance to work on it.
+
+[DONE]
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
new file mode 100644
index 000000000..5c2c3e927
--- /dev/null
+++ b/numpy/f2py/__init__.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+__all__ = ['run_main','compile','f2py_testing']
+
+import os
+import tempfile
+import sys
+import commands
+
+import f2py2e
+run_main = f2py2e.run_main
+main = f2py2e.main
+import f2py_testing
+
+def compile(source,
+ modulename = 'untitled',
+ extra_args = '',
+ verbose = 1,
+ source_fn = None
+ ):
+ ''' Build extension module from processing source with f2py.
+ Read the source of this function for more information.
+ '''
+ from scipy.distutils.exec_command import exec_command
+ if source_fn is None:
+ fname = os.path.join(tempfile.mktemp()+'.f')
+ else:
+ fname = source_fn
+
+ f = open(fname,'w')
+ f.write(source)
+ f.close()
+
+ args = ' -c -m %s %s %s'%(modulename,fname,extra_args)
+ c = '%s -c "import scipy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable,args)
+ s,o = exec_command(c)
+ if source_fn is None:
+ try: os.remove(fname)
+ except OSError: pass
+ return s
diff --git a/numpy/f2py/__version__.py b/numpy/f2py/__version__.py
new file mode 100644
index 000000000..11fb7b3d2
--- /dev/null
+++ b/numpy/f2py/__version__.py
@@ -0,0 +1,9 @@
+major = 2
+
+try:
+ from __svn_version__ import version
+ version_info = (major,version)
+ version = '%s_%s' % version_info
+except Exception,msg:
+ print msg
+ version = '%s_?' % (major)
diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py
new file mode 100644
index 000000000..51eae0517
--- /dev/null
+++ b/numpy/f2py/auxfuncs.py
@@ -0,0 +1,489 @@
+#!/usr/bin/env python
+"""
+
+Auxiliary functions for f2py2e.
+
+Copyright 1999,2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/07/24 19:01:55 $
+Pearu Peterson
+"""
+__version__ = "$Revision: 1.65 $"[10:-1]
+
+import __version__
+f2py_version = __version__.version
+
+import pprint
+import sys,string,time,types,os
+import cfuncs
+
+
+errmess=sys.stderr.write
+#outmess=sys.stdout.write
+show=pprint.pprint
+
+options={}
+debugoptions=[]
+wrapfuncs = 1
+
+def outmess(t):
+ if options.get('verbose',1):
+ sys.stdout.write(t)
+
+def debugcapi(var): return 'capi' in debugoptions
+def _isstring(var):
+ return var.has_key('typespec') and var['typespec']=='character' and (not isexternal(var))
+def isstring(var):
+ return _isstring(var) and not isarray(var)
+def ischaracter(var):
+ return isstring(var) and not (var.has_key('charselector'))
+def isstringarray(var):
+ return isarray(var) and _isstring(var)
+def isarrayofstrings(var):
+ # leaving out '*' for now so that
+ # `character*(*) a(m)` and `character a(m,*)`
+ # are treated differently. Luckily `character**` is illegal.
+ return isstringarray(var) and var['dimension'][-1]=='(*)'
+def isarray(var): return var.has_key('dimension') and (not isexternal(var))
+def isscalar(var): return not (isarray(var) or isstring(var) or isexternal(var))
+def iscomplex(var):
+ return isscalar(var) and var.get('typespec') in ['complex','double complex']
+def islogical(var):
+ return isscalar(var) and var.get('typespec')=='logical'
+def isinteger(var):
+ return isscalar(var) and var.get('typespec')=='integer'
+def isreal(var):
+ return isscalar(var) and var.get('typespec')=='real'
+def get_kind(var):
+ try: return var['kindselector']['*']
+ except KeyError:
+ try: return var['kindselector']['kind']
+ except KeyError: pass
+def islong_long(var):
+ if not isscalar(var): return 0
+ if var.get('typespec') not in ['integer','logical']: return 0
+ return get_kind(var)=='8'
+def isunsigned_char(var):
+ if not isscalar(var): return 0
+ if var.get('typespec') != 'integer': return 0
+ return get_kind(var)=='-1'
+def isunsigned_short(var):
+ if not isscalar(var): return 0
+ if var.get('typespec') != 'integer': return 0
+ return get_kind(var)=='-2'
+def isunsigned(var):
+ if not isscalar(var): return 0
+ if var.get('typespec') != 'integer': return 0
+ return get_kind(var)=='-4'
+def isunsigned_long_long(var):
+ if not isscalar(var): return 0
+ if var.get('typespec') != 'integer': return 0
+ return get_kind(var)=='-8'
+def isdouble(var):
+ if not isscalar(var): return 0
+ if not var.get('typespec')=='real': return 0
+ return get_kind(var)=='8'
+def islong_double(var):
+ if not isscalar(var): return 0
+ if not var.get('typespec')=='real': return 0
+ return get_kind(var)=='16'
+def islong_complex(var):
+ if not iscomplex(var): return 0
+ return get_kind(var)=='32'
+
+def iscomplexarray(var): return isarray(var) and var.get('typespec') in ['complex','double complex']
+def isint1array(var): return isarray(var) and var.get('typespec')=='integer' \
+ and get_kind(var)=='1'
+def isunsigned_chararray(var): return isarray(var) and var.get('typespec')=='integer' and get_kind(var)=='-1'
+def isunsigned_shortarray(var): return isarray(var) and var.get('typespec')=='integer' and get_kind(var)=='-2'
+def isunsignedarray(var): return isarray(var) and var.get('typespec')=='integer' and get_kind(var)=='-4'
+def isunsigned_long_longarray(var): return isarray(var) and var.get('typespec')=='integer' and get_kind(var)=='-8'
+def isallocatable(var):
+ return var.has_key('attrspec') and 'allocatable' in var['attrspec']
+def ismutable(var): return not (not var.has_key('dimension') or isstring(var))
+def ismoduleroutine(rout): return rout.has_key('modulename')
+def ismodule(rout): return (rout.has_key('block') and 'module'==rout['block'])
+def isfunction(rout): return (rout.has_key('block') and 'function'==rout['block'])
+#def isfunction_wrap(rout):
+# return wrapfuncs and (iscomplexfunction(rout) or isstringfunction(rout)) and (not isexternal(rout))
+def isfunction_wrap(rout):
+ if isintent_c(rout): return 0
+ return wrapfuncs and isfunction(rout) and (not isexternal(rout))
+def issubroutine(rout): return (rout.has_key('block') and 'subroutine'==rout['block'])
+def isroutine(rout): return isfunction(rout) or issubroutine(rout)
+def islogicalfunction(rout):
+ if not isfunction(rout): return 0
+ if rout.has_key('result'): a=rout['result']
+ else: a=rout['name']
+ if rout['vars'].has_key(a): return islogical(rout['vars'][a])
+ return 0
+def islong_longfunction(rout):
+ if not isfunction(rout): return 0
+ if rout.has_key('result'): a=rout['result']
+ else: a=rout['name']
+ if rout['vars'].has_key(a): return islong_long(rout['vars'][a])
+ return 0
+def islong_doublefunction(rout):
+ if not isfunction(rout): return 0
+ if rout.has_key('result'): a=rout['result']
+ else: a=rout['name']
+ if rout['vars'].has_key(a): return islong_double(rout['vars'][a])
+ return 0
+def iscomplexfunction(rout):
+ if not isfunction(rout): return 0
+ if rout.has_key('result'): a=rout['result']
+ else: a=rout['name']
+ if rout['vars'].has_key(a): return iscomplex(rout['vars'][a])
+ return 0
+def iscomplexfunction_warn(rout):
+ if iscomplexfunction(rout):
+ outmess("""\
+ **************************************************************
+ Warning: code with a function returning complex value
+ may not work correctly with your Fortran compiler.
+ Run the following test before using it in your applications:
+ $(f2py install dir)/test-site/{b/runme_scalar,e/runme}
+ When using GNU gcc/g77 compilers, codes should work correctly.
+ **************************************************************\n""")
+ return 1
+ return 0
+def isstringfunction(rout):
+ if not isfunction(rout): return 0
+ if rout.has_key('result'): a=rout['result']
+ else: a=rout['name']
+ if rout['vars'].has_key(a): return isstring(rout['vars'][a])
+ return 0
+def hasexternals(rout): return rout.has_key('externals') and rout['externals']
+def isthreadsafe(rout): return rout.has_key('f2pyenhancements') and rout['f2pyenhancements'].has_key('threadsafe')
+def hasvariables(rout): return rout.has_key('vars') and rout['vars']
+def isoptional(var): return (var.has_key('attrspec') and 'optional' in var['attrspec'] and 'required' not in var['attrspec']) and isintent_nothide(var)
+def isexternal(var): return (var.has_key('attrspec') and 'external' in var['attrspec'])
+def isrequired(var): return not isoptional(var) and isintent_nothide(var)
+def isintent_in(var):
+ if not var.has_key('intent'): return 1
+ if 'hide' in var['intent']: return 0
+ if 'inplace' in var['intent']: return 0
+ if 'in' in var['intent']: return 1
+ if 'out' in var['intent']: return 0
+ if 'inout' in var['intent']: return 0
+ if 'outin' in var['intent']: return 0
+ return 1
+def isintent_inout(var): return var.has_key('intent') and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent']
+def isintent_out(var):
+ return 'out' in var.get('intent',[])
+def isintent_hide(var): return (var.has_key('intent') and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout,isintent_inplace)(var)))))
+def isintent_nothide(var): return not isintent_hide(var)
+def isintent_c(var):
+ return 'c' in var.get('intent',[])
+# def isintent_f(var):
+# return not isintent_c(var)
+def isintent_cache(var):
+ return 'cache' in var.get('intent',[])
+def isintent_copy(var):
+ return 'copy' in var.get('intent',[])
+def isintent_overwrite(var):
+ return 'overwrite' in var.get('intent',[])
+def isintent_callback(var):
+ return 'callback' in var.get('intent',[])
+def isintent_inplace(var):
+ return 'inplace' in var.get('intent',[])
+def isintent_aux(var):
+ return 'aux' in var.get('intent',[])
+
+isintent_dict = {isintent_in:'INTENT_IN',isintent_inout:'INTENT_INOUT',
+ isintent_out:'INTENT_OUT',isintent_hide:'INTENT_HIDE',
+ isintent_cache:'INTENT_CACHE',
+ isintent_c:'INTENT_C',isoptional:'OPTIONAL',
+ isintent_inplace:'INTENT_INPLACE'
+ }
+
+def isprivate(var):
+ return var.has_key('attrspec') and 'private' in var['attrspec']
+
+def hasinitvalue(var): return var.has_key('=')
+def hasinitvalueasstring(var):
+ if not hasinitvalue(var): return 0
+ return var['='][0] in ['"',"'"]
+def hasnote(var):
+ return var.has_key('note')
+def hasresultnote(rout):
+ if not isfunction(rout): return 0
+ if rout.has_key('result'): a=rout['result']
+ else: a=rout['name']
+ if rout['vars'].has_key(a): return hasnote(rout['vars'][a])
+ return 0
+def hascommon(rout):
+ return rout.has_key('common')
+def containscommon(rout):
+ if hascommon(rout): return 1
+ if hasbody(rout):
+ for b in rout['body']:
+ if containscommon(b): return 1
+ return 0
+def containsmodule(block):
+ if ismodule(block): return 1
+ if not hasbody(block): return 0
+ ret = []
+ for b in block['body']:
+ if containsmodule(b): return 1
+ return 0
+def hasbody(rout):
+ return rout.has_key('body')
+def hascallstatement(rout):
+ return getcallstatement(rout) is not None
+
+def istrue(var): return 1
+def isfalse(var): return 0
+
+class F2PYError(Exception):
+ pass
+
+class throw_error:
+ def __init__(self,mess):
+ self.mess = mess
+ def __call__(self,var):
+ mess = '\n\n var = %s\n Message: %s\n' % (var,self.mess)
+ raise F2PYError,mess
+
+def l_and(*f):
+ l,l2='lambda v',[]
+ for i in range(len(f)):
+ l='%s,f%d=f[%d]'%(l,i,i)
+ l2.append('f%d(v)'%(i))
+ return eval('%s:%s'%(l,string.join(l2,' and ')))
+def l_or(*f):
+ l,l2='lambda v',[]
+ for i in range(len(f)):
+ l='%s,f%d=f[%d]'%(l,i,i)
+ l2.append('f%d(v)'%(i))
+ return eval('%s:%s'%(l,string.join(l2,' or ')))
+def l_not(f):
+ return eval('lambda v,f=f:not f(v)')
+
+def isdummyroutine(rout):
+ try:
+ return rout['f2pyenhancements']['fortranname']==''
+ except KeyError:
+ return 0
+
+def getfortranname(rout):
+ try:
+ name = rout['f2pyenhancements']['fortranname']
+ if name=='':
+ raise KeyError
+ if not name:
+ errmess('Failed to use fortranname from %s\n'%(rout['f2pyenhancements']))
+ raise KeyError
+ except KeyError:
+ name = rout['name']
+ return name
+
+def getmultilineblock(rout,blockname,comment=1,counter=0):
+ try:
+ r = rout['f2pyenhancements'].get(blockname)
+ except KeyError:
+ return
+ if not r: return
+ if counter>0 and type(r) is type(''):
+ return
+ if type(r) is type([]):
+ if counter>=len(r): return
+ r = r[counter]
+ if r[:3]=="'''":
+ if comment:
+ r = '\t/* start ' + blockname + ' multiline ('+`counter`+') */\n' + r[3:]
+ else:
+ r = r[3:]
+ if r[-3:]=="'''":
+ if comment:
+ r = r[:-3] + '\n\t/* end multiline ('+`counter`+')*/'
+ else:
+ r = r[:-3]
+ else:
+ errmess("%s multiline block should end with `'''`: %s\n" \
+ % (blockname,repr(r)))
+ return r
+
+def getcallstatement(rout):
+ return getmultilineblock(rout,'callstatement')
+
+def getcallprotoargument(rout,cb_map={}):
+ r = getmultilineblock(rout,'callprotoargument',comment=0)
+ if r: return r
+ if hascallstatement(rout):
+ outmess('warning: callstatement is defined without callprotoargument\n')
+ return
+ from capi_maps import getctype
+ arg_types,arg_types2 = [],[]
+ if l_and(isstringfunction,l_not(isfunction_wrap))(rout):
+ arg_types.extend(['char*','size_t'])
+ for n in rout['args']:
+ var = rout['vars'][n]
+ if isintent_callback(var):
+ continue
+ if cb_map.has_key(n):
+ ctype = cb_map[n]+'_typedef'
+ else:
+ ctype = getctype(var)
+ if l_and(isintent_c,l_or(isscalar,iscomplex))(var):
+ pass
+ elif isstring(var):
+ pass
+ #ctype = 'void*'
+ else:
+ ctype = ctype+'*'
+ if isstring(var) or isarrayofstrings(var):
+ arg_types2.append('size_t')
+ arg_types.append(ctype)
+
+ proto_args = string.join(arg_types+arg_types2,',')
+ if not proto_args:
+ proto_args = 'void'
+ #print proto_args
+ return proto_args
+
+def getusercode(rout):
+ return getmultilineblock(rout,'usercode')
+def getusercode1(rout):
+ return getmultilineblock(rout,'usercode',counter=1)
+
+def getpymethoddef(rout):
+ return getmultilineblock(rout,'pymethoddef')
+
+def getargs(rout):
+ sortargs,args=[],[]
+ if rout.has_key('args'):
+ args=rout['args']
+ if rout.has_key('sortvars'):
+ for a in rout['sortvars']:
+ if a in args: sortargs.append(a)
+ for a in args:
+ if a not in sortargs:
+ sortargs.append(a)
+ else: sortargs=rout['args']
+ return args,sortargs
+
+def getargs2(rout):
+ sortargs,args=[],rout.get('args',[])
+ auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\
+ and a not in args]
+ args = auxvars + args
+ if rout.has_key('sortvars'):
+ for a in rout['sortvars']:
+ if a in args: sortargs.append(a)
+ for a in args:
+ if a not in sortargs:
+ sortargs.append(a)
+ else: sortargs=auxvars + rout['args']
+ return args,sortargs
+
+def getrestdoc(rout):
+ if not rout.has_key('f2pymultilines'):
+ return None
+ k = None
+ if rout['block']=='python module':
+ k = rout['block'],rout['name']
+ return rout['f2pymultilines'].get(k,None)
+
+def gentitle(name):
+ l=(80-len(name)-6)/2
+ return '/*%s %s %s*/'%(l*'*',name,l*'*')
+def flatlist(l):
+ if type(l)==types.ListType:
+ return reduce(lambda x,y,f=flatlist:x+f(y),l,[])
+ return [l]
+def stripcomma(s):
+ if s and s[-1]==',': return s[:-1]
+ return s
+def replace(str,dict,defaultsep=''):
+ if type(dict)==types.ListType:
+ return map(lambda d,f=replace,sep=defaultsep,s=str:f(s,d,sep),dict)
+ if type(str)==types.ListType:
+ return map(lambda s,f=replace,sep=defaultsep,d=dict:f(s,d,sep),str)
+ for k in 2*dict.keys():
+ if k=='separatorsfor': continue
+ if dict.has_key('separatorsfor') and dict['separatorsfor'].has_key(k):
+ sep=dict['separatorsfor'][k]
+ else:
+ sep=defaultsep
+ if type(dict[k])==types.ListType:
+ str=string.replace(str,'#%s#'%(k),string.join(flatlist(dict[k]),sep))
+ else:
+ str=string.replace(str,'#%s#'%(k),dict[k])
+ return str
+
+def dictappend(rd,ar):
+ if type(ar)==types.ListType:
+ for a in ar: rd=dictappend(rd,a)
+ return rd
+ for k in ar.keys():
+ if k[0]=='_': continue
+ if rd.has_key(k):
+ if type(rd[k])==types.StringType: rd[k]=[rd[k]]
+ if type(rd[k])==types.ListType:
+ if type(ar[k])==types.ListType: rd[k]=rd[k]+ar[k]
+ else: rd[k].append(ar[k])
+ elif type(rd[k])==types.DictType:
+ if type(ar[k])==types.DictType:
+ if k=='separatorsfor':
+ for k1 in ar[k].keys():
+ if not rd[k].has_key(k1): rd[k][k1]=ar[k][k1]
+ else: rd[k]=dictappend(rd[k],ar[k])
+ else: rd[k]=ar[k]
+ return rd
+
+def applyrules(rules,dict,var={}):
+ ret={}
+ if type(rules)==types.ListType:
+ for r in rules:
+ rr=applyrules(r,dict,var)
+ ret=dictappend(ret,rr)
+ if rr.has_key('_break'): break
+ return ret
+ if rules.has_key('_check') and (not rules['_check'](var)): return ret
+ if rules.has_key('need'):
+ res = applyrules({'needs':rules['need']},dict,var)
+ if res.has_key('needs'):
+ cfuncs.append_needs(res['needs'])
+
+ for k in rules.keys():
+ if k=='separatorsfor': ret[k]=rules[k]; continue
+ if type(rules[k])==types.StringType:
+ ret[k]=replace(rules[k],dict)
+ elif type(rules[k])==types.ListType:
+ ret[k]=[]
+ for i in rules[k]:
+ ar=applyrules({k:i},dict,var)
+ if ar.has_key(k): ret[k].append(ar[k])
+ elif k[0]=='_':
+ continue
+ elif type(rules[k])==types.DictType:
+ ret[k]=[]
+ for k1 in rules[k].keys():
+ if type(k1)==types.FunctionType and k1(var):
+ if type(rules[k][k1])==types.ListType:
+ for i in rules[k][k1]:
+ if type(i)==types.DictType:
+ res=applyrules({'supertext':i},dict,var)
+ if res.has_key('supertext'): i=res['supertext']
+ else: i=''
+ ret[k].append(replace(i,dict))
+ else:
+ i=rules[k][k1]
+ if type(i)==types.DictType:
+ res=applyrules({'supertext':i},dict)
+ if res.has_key('supertext'): i=res['supertext']
+ else: i=''
+ ret[k].append(replace(i,dict))
+ else:
+ errmess('applyrules: ignoring rule %s.\n'%`rules[k]`)
+ if type(ret[k])==types.ListType:
+ if len(ret[k])==1: ret[k]=ret[k][0]
+ if ret[k]==[]: del ret[k]
+ return ret
+
+
diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py
new file mode 100644
index 000000000..e4a542eaa
--- /dev/null
+++ b/numpy/f2py/capi_maps.py
@@ -0,0 +1,723 @@
+#!/usr/bin/env python
+"""
+
+Copyright 1999,2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/05/06 10:57:33 $
+Pearu Peterson
+"""
+
+__version__ = "$Revision: 1.60 $"[10:-1]
+
+import __version__
+f2py_version = __version__.version
+
+import string,copy,re,os
+from auxfuncs import *
+from crackfortran import markoutercomma
+import cb_rules
+
+# Numarray and Numeric users should set this False
+using_newcore = True
+
+depargs=[]
+lcb_map={}
+lcb2_map={}
+# forced casting: mainly caused by the fact that Python or Numeric
+# C/APIs do not support the corresponding C types.
+c2py_map={'double':'float',
+ 'float':'float', # forced casting
+ 'long_double':'float', # forced casting
+ 'char':'int', # forced casting
+ 'signed_char':'int', # forced casting
+ 'unsigned_char':'int', # forced casting
+ 'short':'int', # forced casting
+ 'unsigned_short':'int', # forced casting
+ 'int':'int', # (forced casting)
+ 'long':'int',
+ 'long_long':'long',
+ 'unsigned':'int', # forced casting
+ 'complex_float':'complex', # forced casting
+ 'complex_double':'complex',
+ 'complex_long_double':'complex', # forced casting
+ 'string':'string',
+ }
+c2capi_map={'double':'PyArray_DOUBLE',
+ 'float':'PyArray_FLOAT',
+ 'long_double':'PyArray_DOUBLE', # forced casting
+ 'char':'PyArray_CHAR',
+ 'unsigned_char':'PyArray_UBYTE',
+ 'signed_char':'PyArray_SBYTE',
+ 'short':'PyArray_SHORT',
+ 'unsigned_short':'PyArray_USHORT',
+ 'int':'PyArray_INT',
+ 'unsigned':'PyArray_UINT',
+ 'long':'PyArray_LONG',
+ 'long_long':'PyArray_LONG', # forced casting
+ 'complex_float':'PyArray_CFLOAT',
+ 'complex_double':'PyArray_CDOUBLE',
+ 'complex_long_double':'PyArray_CDOUBLE', # forced casting
+ 'string':'PyArray_CHAR'}
+
+#These new maps aren't used anyhere yet, but should be by default
+# unless building numeric or numarray extensions.
+if using_newcore:
+ c2capi_map={'double':'PyArray_DOUBLE',
+ 'float':'PyArray_FLOAT',
+ 'long_double':'PyArray_LONGDOUBLE',
+ 'char':'PyArray_BYTE',
+ 'unsigned_char':'PyArray_UBYTE',
+ 'signed_char':'PyArray_BYTE',
+ 'short':'PyArray_SHORT',
+ 'unsigned_short':'PyArray_USHORT',
+ 'int':'PyArray_INT',
+ 'unsigned':'PyArray_UINT',
+ 'long':'PyArray_LONG',
+ 'unsigned_long':'PyArray_ULONG',
+ 'long_long':'PyArray_LONGLONG',
+ 'unsigned_long_long':'Pyarray_ULONGLONG',
+ 'complex_float':'PyArray_CFLOAT',
+ 'complex_double':'PyArray_CDOUBLE',
+ 'complex_long_double':'PyArray_CDOUBLE',
+ 'string':'PyArray_STRING'}
+c2pycode_map={'double':'d',
+ 'float':'f',
+ 'long_double':'d', # forced casting
+ 'char':'1',
+ 'signed_char':'1',
+ 'unsigned_char':'b',
+ 'short':'s',
+ 'unsigned_short':'w',
+ 'int':'i',
+ 'unsigned':'u',
+ 'long':'l',
+ 'long_long':'L',
+ 'complex_float':'F',
+ 'complex_double':'D',
+ 'complex_long_double':'D', # forced casting
+ 'string':'c'
+ }
+if using_newcore:
+ c2pycode_map={'double':'d',
+ 'float':'f',
+ 'long_double':'g',
+ 'char':'b',
+ 'unsigned_char':'B',
+ 'signed_char':'b',
+ 'short':'h',
+ 'unsigned_short':'H',
+ 'int':'i',
+ 'unsigned':'I',
+ 'long':'l',
+ 'unsigned_long':'L',
+ 'long_long':'q',
+ 'unsigned_long_long':'Q',
+ 'complex_float':'F',
+ 'complex_double':'D',
+ 'complex_long_double':'G',
+ 'string':'S'}
+c2buildvalue_map={'double':'d',
+ 'float':'f',
+ 'char':'b',
+ 'signed_char':'b',
+ 'short':'h',
+ 'int':'i',
+ 'long':'l',
+ 'long_long':'L',
+ 'complex_float':'N',
+ 'complex_double':'N',
+ 'complex_long_double':'N',
+ 'string':'z'}
+if using_newcore:
+ #c2buildvalue_map=???
+ pass
+
+f2cmap_all={'real':{'':'float','4':'float','8':'double','12':'long_double','16':'long_double'},
+ 'integer':{'':'int','1':'signed_char','2':'short','4':'int','8':'long_long',
+ '-1':'unsigned_char','-2':'unsigned_short','-4':'unsigned',
+ '-8':'unsigned_long_long'},
+ 'complex':{'':'complex_float','8':'complex_float',
+ '16':'complex_double','24':'complex_long_double',
+ '32':'complex_long_double'},
+ 'complexkind':{'':'complex_float','4':'complex_float',
+ '8':'complex_double','12':'complex_long_double',
+ '16':'complex_long_double'},
+ 'logical':{'':'int','1':'char','2':'short','4':'int','8':'long_long'},
+ 'double complex':{'':'complex_double'},
+ 'double precision':{'':'double'},
+ 'byte':{'':'char'},
+ 'character':{'':'string'}
+ }
+
+if os.path.isfile('.f2py_f2cmap'):
+ # User defined additions to f2cmap_all.
+ # .f2py_f2cmap must contain a dictionary of dictionaries, only.
+ # For example, {'real':{'low':'float'}} means that Fortran 'real(low)' is
+ # interpreted as C 'float'.
+ # This feature is useful for F90/95 users if they use PARAMETERSs
+ # in type specifications.
+ try:
+ outmess('Reading .f2py_f2cmap ...\n')
+ f = open('.f2py_f2cmap','r')
+ d = eval(f.read(),{},{})
+ f.close()
+ for k,d1 in d.items():
+ for k1 in d1.keys():
+ d1[string.lower(k1)] = d1[k1]
+ d[string.lower(k)] = d[k]
+ for k in d.keys():
+ if not f2cmap_all.has_key(k): f2cmap_all[k]={}
+ for k1 in d[k].keys():
+ if c2py_map.has_key(d[k][k1]):
+ if f2cmap_all[k].has_key(k1):
+ outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k,k1,f2cmap_all[k][k1],d[k][k1]))
+ f2cmap_all[k][k1] = d[k][k1]
+ outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k,k1,d[k][k1]))
+ else:
+ errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k,k1,d[k][k1],d[k][k1],c2py_map.keys()))
+ outmess('Succesfully applied user defined changes from .f2py_f2cmap\n')
+ except:
+ errmess('Failed to apply user defined changes from .f2py_f2cmap. Skipping.\n')
+cformat_map={'double':'%g',
+ 'float':'%g',
+ 'long_double':'%Lg',
+ 'char':'%d',
+ 'signed_char':'%d',
+ 'unsigned_char':'%hhu',
+ 'short':'%hd',
+ 'unsigned_short':'%hu',
+ 'int':'%d',
+ 'unsigned':'%u',
+ 'long':'%ld',
+ 'unsigned_long':'%lu',
+ 'long_long':'%ld',
+ 'complex_float':'(%g,%g)',
+ 'complex_double':'(%g,%g)',
+ 'complex_long_double':'(%Lg,%Lg)',
+ 'string':'%s',
+ }
+
+############### Auxiliary functions
+def getctype(var):
+ """
+ Determines C type
+ """
+ ctype='void'
+ if isfunction(var):
+ if var.has_key('result'): a=var['result']
+ else: a=var['name']
+ if var['vars'].has_key(a): return getctype(var['vars'][a])
+ else: errmess('getctype: function %s has no return value?!\n'%a)
+ elif issubroutine(var):
+ return ctype
+ elif var.has_key('typespec') and f2cmap_all.has_key(string.lower(var['typespec'])):
+ typespec = string.lower(var['typespec'])
+ f2cmap=f2cmap_all[typespec]
+ ctype=f2cmap[''] # default type
+ if var.has_key('kindselector'):
+ if var['kindselector'].has_key('*'):
+ try:
+ ctype=f2cmap[var['kindselector']['*']]
+ except KeyError:
+ errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'],'*',var['kindselector']['*']))
+ elif var['kindselector'].has_key('kind'):
+ if f2cmap_all.has_key(typespec+'kind'):
+ f2cmap=f2cmap_all[typespec+'kind']
+ try:
+ ctype=f2cmap[var['kindselector']['kind']]
+ except KeyError:
+ if f2cmap_all.has_key(typespec):
+ f2cmap=f2cmap_all[typespec]
+ try:
+ ctype=f2cmap[str(var['kindselector']['kind'])]
+ except KeyError:
+ errmess('getctype: "%s(kind=%s)" not supported (use .f2py_f2cmap).\n'\
+ %(typespec,var['kindselector']['kind']))
+
+ else:
+ if not isexternal(var):
+ errmess('getctype: No C-type found in "%s", assuming void.\n'%var)
+ return ctype
+def getstrlength(var):
+ if isstringfunction(var):
+ if var.has_key('result'): a=var['result']
+ else: a=var['name']
+ if var['vars'].has_key(a): return getstrlength(var['vars'][a])
+ else: errmess('getstrlength: function %s has no return value?!\n'%a)
+ if not isstring(var):
+ errmess('getstrlength: expected a signature of a string but got: %s\n'%(`var`))
+ len='1'
+ if var.has_key('charselector'):
+ a=var['charselector']
+ if a.has_key('*'): len=a['*']
+ elif a.has_key('len'): len=a['len']
+ if re.match(r'\(\s*([*]|[:])\s*\)',len) or re.match(r'([*]|[:])',len):
+ #if len in ['(*)','*','(:)',':']:
+ if isintent_hide(var):
+ errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(`var`))
+ len='-1'
+ return len
+def getarrdims(a,var,verbose=0):
+ global depargs
+ ret={}
+ if isstring(var) and not isarray(var):
+ ret['dims']=getstrlength(var)
+ ret['size']=ret['dims']
+ ret['rank']='1'
+ elif isscalar(var):
+ ret['size']='1'
+ ret['rank']='0'
+ ret['dims']=''
+ elif isarray(var):
+# if not isintent_c(var):
+# var['dimension'].reverse()
+ dim=copy.copy(var['dimension'])
+ ret['size']=string.join(dim,'*')
+ try: ret['size']=`eval(ret['size'])`
+ except: pass
+ ret['dims']=string.join(dim,',')
+ ret['rank']=`len(dim)`
+ ret['rank*[-1]']=`len(dim)*[-1]`[1:-1]
+ for i in range(len(dim)): # solve dim for dependecies
+ v=[]
+ if dim[i] in depargs: v=[dim[i]]
+ else:
+ for va in depargs:
+ if re.match(r'.*?\b%s\b.*'%va,dim[i]):
+ v.append(va)
+ for va in v:
+ if depargs.index(va)>depargs.index(a):
+ dim[i]='*'
+ break
+ ret['setdims'],i='',-1
+ for d in dim:
+ i=i+1
+ if d not in ['*',':','(*)','(:)']:
+ ret['setdims']='%s#varname#_Dims[%d]=%s,'%(ret['setdims'],i,d)
+ if ret['setdims']: ret['setdims']=ret['setdims'][:-1]
+ ret['cbsetdims'],i='',-1
+ for d in var['dimension']:
+ i=i+1
+ if d not in ['*',':','(*)','(:)']:
+ ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'],i,d)
+ elif verbose :
+ errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(`a`,`d`))
+ if ret['cbsetdims']: ret['cbsetdims']=ret['cbsetdims'][:-1]
+# if not isintent_c(var):
+# var['dimension'].reverse()
+ return ret
+def getpydocsign(a,var):
+ global lcb_map
+ if isfunction(var):
+ if var.has_key('result'): af=var['result']
+ else: af=var['name']
+ if var['vars'].has_key(af): return getpydocsign(af,var['vars'][af])
+ else: errmess('getctype: function %s has no return value?!\n'%af)
+ return '',''
+ sig,sigout=a,a
+ opt=''
+ if isintent_in(var): opt='input'
+ elif isintent_inout(var): opt='in/output'
+ out_a = a
+ if isintent_out(var):
+ for k in var['intent']:
+ if k[:4]=='out=':
+ out_a = k[4:]
+ break
+ init=''
+ ctype=getctype(var)
+
+ if hasinitvalue(var):
+ init,showinit=getinit(a,var)
+ init='= %s'%(showinit)
+ if isscalar(var):
+ if isintent_inout(var):
+ sig='%s :%s %s rank-0 array(%s,\'%s\')'%(a,init,opt,c2py_map[ctype],
+ c2pycode_map[ctype],)
+ else:
+ sig='%s :%s %s %s'%(a,init,opt,c2py_map[ctype])
+ sigout='%s : %s'%(out_a,c2py_map[ctype])
+ elif isstring(var):
+ if isintent_inout(var):
+ sig='%s :%s %s rank-0 array(string(len=%s),\'c\')'%(a,init,opt,getstrlength(var))
+ else:
+ sig='%s :%s %s string(len=%s)'%(a,init,opt,getstrlength(var))
+ sigout='%s : string(len=%s)'%(out_a,getstrlength(var))
+ elif isarray(var):
+ dim=var['dimension']
+ rank=`len(dim)`
+ sig='%s :%s %s rank-%s array(\'%s\') with bounds (%s)'%(a,init,opt,rank,
+ c2pycode_map[ctype],
+ string.join(dim,','))
+ if a==out_a:
+ sigout='%s : rank-%s array(\'%s\') with bounds (%s)'\
+ %(a,rank,c2pycode_map[ctype],string.join(dim,','))
+ else:
+ sigout='%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\
+ %(out_a,rank,c2pycode_map[ctype],string.join(dim,','),a)
+ elif isexternal(var):
+ ua=''
+ if lcb_map.has_key(a) and lcb2_map.has_key(lcb_map[a]) and lcb2_map[lcb_map[a]].has_key('argname'):
+ ua=lcb2_map[lcb_map[a]]['argname']
+ if not ua==a: ua=' => %s'%ua
+ else: ua=''
+ sig='%s : call-back function%s'%(a,ua)
+ sigout=sig
+ else:
+ errmess('getpydocsign: Could not resolve docsignature for "%s".\\n'%a)
+ return sig,sigout
+def getarrdocsign(a,var):
+ ctype=getctype(var)
+ if isstring(var) and (not isarray(var)):
+ sig='%s : rank-0 array(string(len=%s),\'c\')'%(a,getstrlength(var))
+ elif isscalar(var):
+ sig='%s : rank-0 array(%s,\'%s\')'%(a,c2py_map[ctype],
+ c2pycode_map[ctype],)
+ elif isarray(var):
+ dim=var['dimension']
+ rank=`len(dim)`
+ sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a,rank,
+ c2pycode_map[ctype],
+ string.join(dim,','))
+ return sig
+
+def getinit(a,var):
+ if isstring(var): init,showinit='""',"''"
+ else: init,showinit='',''
+ if hasinitvalue(var):
+ init=var['=']
+ showinit=init
+ if iscomplex(var) or iscomplexarray(var):
+ ret={}
+
+ try:
+ v = var["="]
+ if ',' in v:
+ ret['init.r'],ret['init.i']=string.split(markoutercomma(v[1:-1]),'@,@')
+ else:
+ v = eval(v,{},{})
+ ret['init.r'],ret['init.i']=str(v.real),str(v.imag)
+ except: raise 'sign2map: expected complex number `(r,i)\' but got `%s\' as initial value of %s.'%(init,`a`)
+ if isarray(var):
+ init='(capi_c.r=%s,capi_c.i=%s,capi_c)'%(ret['init.r'],ret['init.i'])
+ elif isstring(var):
+ if not init: init,showinit='""',"''"
+ if init[0]=="'":
+ init='"%s"'%(string.replace(init[1:-1],'"','\\"'))
+ if init[0]=='"': showinit="'%s'"%(init[1:-1])
+ return init,showinit
+
+def sign2map(a,var):
+ """
+ varname,ctype,atype
+ init,init.r,init.i,pytype
+ vardebuginfo,vardebugshowvalue,varshowvalue
+ varrfromat
+ intent
+ """
+ global lcb_map,cb_map
+ out_a = a
+ if isintent_out(var):
+ for k in var['intent']:
+ if k[:4]=='out=':
+ out_a = k[4:]
+ break
+ ret={'varname':a,'outvarname':out_a}
+ ret['ctype']=getctype(var)
+ intent_flags = []
+ for f,s in isintent_dict.items():
+ if f(var): intent_flags.append('F2PY_%s'%s)
+ if intent_flags:
+ #XXX: Evaluate intent_flags here.
+ ret['intent'] = string.join(intent_flags,'|')
+ else:
+ ret['intent'] = 'F2PY_INTENT_IN'
+ if isarray(var): ret['varrformat']='N'
+ elif c2buildvalue_map.has_key(ret['ctype']):
+ ret['varrformat']=c2buildvalue_map[ret['ctype']]
+ else: ret['varrformat']='O'
+ ret['init'],ret['showinit']=getinit(a,var)
+ if hasinitvalue(var) and iscomplex(var) and not isarray(var):
+ ret['init.r'],ret['init.i'] = string.split(markoutercomma(ret['init'][1:-1]),'@,@')
+ if isexternal(var):
+ ret['cbnamekey']=a
+ if lcb_map.has_key(a):
+ ret['cbname']=lcb_map[a]
+ ret['maxnofargs']=lcb2_map[lcb_map[a]]['maxnofargs']
+ ret['nofoptargs']=lcb2_map[lcb_map[a]]['nofoptargs']
+ ret['cbdocstr']=lcb2_map[lcb_map[a]]['docstr']
+ ret['cblatexdocstr']=lcb2_map[lcb_map[a]]['latexdocstr']
+ else:
+ ret['cbname']=a
+ errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a,lcb_map.keys()))
+ if isstring(var):
+ ret['length']=getstrlength(var)
+ if isarray(var):
+ ret=dictappend(ret,getarrdims(a,var))
+ dim=copy.copy(var['dimension'])
+ if c2capi_map.has_key(ret['ctype']): ret['atype']=c2capi_map[ret['ctype']]
+ # Debug info
+ if debugcapi(var):
+ il=[isintent_in,'input',isintent_out,'output',
+ isintent_inout,'inoutput',isrequired,'required',
+ isoptional,'optional',isintent_hide,'hidden',
+ iscomplex,'complex scalar',
+ l_and(isscalar,l_not(iscomplex)),'scalar',
+ isstring,'string',isarray,'array',
+ iscomplexarray,'complex array',isstringarray,'string array',
+ iscomplexfunction,'complex function',
+ l_and(isfunction,l_not(iscomplexfunction)),'function',
+ isexternal,'callback',
+ isintent_callback,'callback',
+ isintent_aux,'auxiliary',
+ #ismutable,'mutable',l_not(ismutable),'immutable',
+ ]
+ rl=[]
+ for i in range(0,len(il),2):
+ if il[i](var): rl.append(il[i+1])
+ if isstring(var):
+ rl.append('slen(%s)=%s'%(a,ret['length']))
+ if isarray(var):
+# if not isintent_c(var):
+# var['dimension'].reverse()
+ ddim=string.join(map(lambda x,y:'%s|%s'%(x,y),var['dimension'],dim),',')
+ rl.append('dims(%s)'%ddim)
+# if not isintent_c(var):
+# var['dimension'].reverse()
+ if isexternal(var):
+ ret['vardebuginfo']='debug-capi:%s=>%s:%s'%(a,ret['cbname'],string.join(rl,','))
+ else:
+ ret['vardebuginfo']='debug-capi:%s %s=%s:%s'%(ret['ctype'],a,ret['showinit'],string.join(rl,','))
+ if isscalar(var):
+ if cformat_map.has_key(ret['ctype']):
+ ret['vardebugshowvalue']='debug-capi:%s=%s'%(a,cformat_map[ret['ctype']])
+ if isstring(var):
+ ret['vardebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a,a)
+ if isexternal(var):
+ ret['vardebugshowvalue']='debug-capi:%s=%%p'%(a)
+ if cformat_map.has_key(ret['ctype']):
+ ret['varshowvalue']='#name#:%s=%s'%(a,cformat_map[ret['ctype']])
+ ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
+ if isstring(var):
+ ret['varshowvalue']='#name#:slen(%s)=%%d %s=\\"%%s\\"'%(a,a)
+ ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var)
+ if hasnote(var):
+ ret['note']=var['note']
+ return ret
+
+def routsign2map(rout):
+ """
+ name,NAME,begintitle,endtitle
+ rname,ctype,rformat
+ routdebugshowvalue
+ """
+ global lcb_map
+ name = rout['name']
+ fname = getfortranname(rout)
+ ret={'name':name,
+ 'texname':string.replace(name,'_','\\_'),
+ 'name_lower':string.lower(name),
+ 'NAME':string.upper(name),
+ 'begintitle':gentitle(name),
+ 'endtitle':gentitle('end of %s'%name),
+ 'fortranname':fname,
+ 'FORTRANNAME':string.upper(fname),
+ 'callstatement':getcallstatement(rout) or '',
+ 'usercode':getusercode(rout) or '',
+ 'usercode1':getusercode1(rout) or '',
+ }
+ if '_' in fname:
+ ret['F_FUNC'] = 'F_FUNC_US'
+ else:
+ ret['F_FUNC'] = 'F_FUNC'
+ if '_' in name:
+ ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US'
+ else:
+ ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC'
+ lcb_map={}
+ if rout.has_key('use'):
+ for u in rout['use'].keys():
+ if cb_rules.cb_map.has_key(u):
+ for un in cb_rules.cb_map[u]:
+ ln=un[0]
+ if rout['use'][u].has_key('map'):
+ for k in rout['use'][u]['map'].keys():
+ if rout['use'][u]['map'][k]==un[0]: ln=k;break
+ lcb_map[ln]=un[1]
+ #else:
+ # errmess('routsign2map: cb_map does not contain module "%s" used in "use" statement.\n'%(u))
+ elif rout.has_key('externals') and rout['externals']:
+ errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'],`rout['externals']`))
+ ret['callprotoargument'] = getcallprotoargument(rout,lcb_map) or ''
+ if isfunction(rout):
+ if rout.has_key('result'): a=rout['result']
+ else: a=rout['name']
+ ret['rname']=a
+ ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,rout)
+ ret['ctype']=getctype(rout['vars'][a])
+ if hasresultnote(rout):
+ ret['resultnote']=rout['vars'][a]['note']
+ rout['vars'][a]['note']=['See elsewhere.']
+ if c2buildvalue_map.has_key(ret['ctype']):
+ ret['rformat']=c2buildvalue_map[ret['ctype']]
+ else:
+ ret['rformat']='O'
+ errmess('routsign2map: no c2buildvalue key for type %s\n'%(`ret['ctype']`))
+ if debugcapi(rout):
+ if cformat_map.has_key(ret['ctype']):
+ ret['routdebugshowvalue']='debug-capi:%s=%s'%(a,cformat_map[ret['ctype']])
+ if isstringfunction(rout):
+ ret['routdebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a,a)
+ if isstringfunction(rout):
+ ret['rlength']=getstrlength(rout['vars'][a])
+ if ret['rlength']=='-1':
+ errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n'%(`rout['name']`))
+ ret['rlength']='10'
+ if hasnote(rout):
+ ret['note']=rout['note']
+ rout['note']=['See elsewhere.']
+ return ret
+
+def modsign2map(m):
+ """
+ modulename
+ """
+ if ismodule(m):
+ ret={'f90modulename':m['name'],
+ 'F90MODULENAME':string.upper(m['name']),
+ 'texf90modulename':string.replace(m['name'],'_','\\_')}
+ else:
+ ret={'modulename':m['name'],
+ 'MODULENAME':string.upper(m['name']),
+ 'texmodulename':string.replace(m['name'],'_','\\_')}
+ ret['restdoc'] = getrestdoc(m) or []
+ if hasnote(m):
+ ret['note']=m['note']
+ #m['note']=['See elsewhere.']
+ ret['usercode'] = getusercode(m) or ''
+ ret['usercode1'] = getusercode1(m) or ''
+ if m['body']:
+ ret['interface_usercode'] = getusercode(m['body'][0]) or ''
+ else:
+ ret['interface_usercode'] = ''
+ ret['pymethoddef'] = getpymethoddef(m) or ''
+ return ret
+
+def cb_sign2map(a,var):
+ ret={'varname':a}
+ ret['ctype']=getctype(var)
+ if c2capi_map.has_key(ret['ctype']):
+ ret['atype']=c2capi_map[ret['ctype']]
+ if cformat_map.has_key(ret['ctype']):
+ ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
+ if isarray(var):
+ ret=dictappend(ret,getarrdims(a,var))
+ ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var)
+ if hasnote(var):
+ ret['note']=var['note']
+ var['note']=['See elsewhere.']
+ return ret
+
+def cb_routsign2map(rout,um):
+ """
+ name,begintitle,endtitle,argname
+ ctype,rctype,maxnofargs,nofoptargs,returncptr
+ """
+ ret={'name':'cb_%s_in_%s'%(rout['name'],um),
+ 'returncptr':''}
+ if isintent_callback(rout):
+ if '_' in rout['name']:
+ F_FUNC='F_FUNC_US'
+ else:
+ F_FUNC='F_FUNC'
+ ret['callbackname'] = '%s(%s,%s)' \
+ % (F_FUNC,
+ rout['name'].lower(),
+ rout['name'].upper(),
+ )
+ ret['static'] = 'extern'
+ else:
+ ret['callbackname'] = ret['name']
+ ret['static'] = 'static'
+ ret['argname']=rout['name']
+ ret['begintitle']=gentitle(ret['name'])
+ ret['endtitle']=gentitle('end of %s'%ret['name'])
+ ret['ctype']=getctype(rout)
+ ret['rctype']='void'
+ if ret['ctype']=='string': ret['rctype']='void'
+ else:
+ ret['rctype']=ret['ctype']
+ if ret['rctype']!='void':
+ if iscomplexfunction(rout):
+ ret['returncptr'] = """
+#ifdef F2PY_CB_RETURNCOMPLEX
+return_value=
+#endif
+"""
+ else:
+ ret['returncptr'] = 'return_value='
+ if cformat_map.has_key(ret['ctype']):
+ ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
+ if isstringfunction(rout):
+ ret['strlength']=getstrlength(rout)
+ if isfunction(rout):
+ if rout.has_key('result'): a=rout['result']
+ else: a=rout['name']
+ if hasnote(rout['vars'][a]):
+ ret['note']=rout['vars'][a]['note']
+ rout['vars'][a]['note']=['See elsewhere.']
+ ret['rname']=a
+ ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,rout)
+ if iscomplexfunction(rout):
+ ret['rctype']="""
+#ifdef F2PY_CB_RETURNCOMPLEX
+#ctype#
+#else
+void
+#endif
+"""
+ else:
+ if hasnote(rout):
+ ret['note']=rout['note']
+ rout['note']=['See elsewhere.']
+ nofargs=0
+ nofoptargs=0
+ if rout.has_key('args') and rout.has_key('vars'):
+ for a in rout['args']:
+ var=rout['vars'][a]
+ if l_or(isintent_in,isintent_inout)(var):
+ nofargs=nofargs+1
+ if isoptional(var):
+ nofoptargs=nofoptargs+1
+ ret['maxnofargs']=`nofargs`
+ ret['nofoptargs']=`nofoptargs`
+ if hasnote(rout) and isfunction(rout) and rout.has_key('result'):
+ ret['routnote']=rout['note']
+ rout['note']=['See elsewhere.']
+ return ret
+
+def common_sign2map(a,var): # obsolute
+ ret={'varname':a}
+ ret['ctype']=getctype(var)
+ if isstringarray(var): ret['ctype']='char'
+ if c2capi_map.has_key(ret['ctype']):
+ ret['atype']=c2capi_map[ret['ctype']]
+ if cformat_map.has_key(ret['ctype']):
+ ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
+ if isarray(var):
+ ret=dictappend(ret,getarrdims(a,var))
+ elif isstring(var):
+ ret['size']=getstrlength(var)
+ ret['rank']='1'
+ ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var)
+ if hasnote(var):
+ ret['note']=var['note']
+ var['note']=['See elsewhere.']
+ ret['arrdocstr']=getarrdocsign(a,var) # for strings this returns 0-rank but actually is 1-rank
+ return ret
+
+
diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py
new file mode 100644
index 000000000..666c1c6c8
--- /dev/null
+++ b/numpy/f2py/cb_rules.py
@@ -0,0 +1,534 @@
+#!/usr/bin/env python
+"""
+
+Build call-back mechanism for f2py2e.
+
+Copyright 2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/07/20 11:27:58 $
+Pearu Peterson
+"""
+
+__version__ = "$Revision: 1.53 $"[10:-1]
+
+import __version__
+f2py_version = __version__.version
+
+
+import pprint
+import sys,string,time,types,copy
+errmess=sys.stderr.write
+outmess=sys.stdout.write
+show=pprint.pprint
+
+from auxfuncs import *
+import capi_maps
+#from capi_maps import *
+import cfuncs
+
+################## Rules for callback function ##############
+
+cb_routine_rules={
+ 'cbtypedefs':'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);',
+ 'body':"""
+#begintitle#
+PyObject *#name#_capi = NULL;/*was Py_None*/
+PyTupleObject *#name#_args_capi = NULL;
+int #name#_nofargs = 0;
+jmp_buf #name#_jmpbuf;
+/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/
+#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) {
+\tPyTupleObject *capi_arglist = #name#_args_capi;
+\tPyObject *capi_return = NULL;
+\tPyObject *capi_tmp = NULL;
+\tint capi_j,capi_i = 0;
+\tint capi_longjmp_ok = 1;
+#decl#
+#ifdef F2PY_REPORT_ATEXIT
+f2py_cb_start_clock();
+#endif
+\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\");
+\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi);
+\tif (#name#_capi==NULL) {
+\t\tcapi_longjmp_ok = 0;
+\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\");
+\t}
+\tif (#name#_capi==NULL) {
+\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\");
+\t\tgoto capi_fail;
+\t}
+\tif (PyCObject_Check(#name#_capi)) {
+\t#name#_typedef #name#_cptr;
+\t#name#_cptr = PyCObject_AsVoidPtr(#name#_capi);
+\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm#);
+\t#return#
+\t}
+\tif (capi_arglist==NULL) {
+\t\tcapi_longjmp_ok = 0;
+\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\");
+\t\tif (capi_tmp) {
+\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp);
+\t\t\tif (capi_arglist==NULL) {
+\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\");
+\t\t\t\tgoto capi_fail;
+\t\t\t}
+\t\t} else {
+\t\t\tPyErr_Clear();
+\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\");
+\t\t}
+\t}
+\tif (capi_arglist == NULL) {
+\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\");
+\t\tgoto capi_fail;
+\t}
+#setdims#
+#pyobjfrom#
+\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist);
+\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\");
+#ifdef F2PY_REPORT_ATEXIT
+f2py_cb_start_call_clock();
+#endif
+\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist);
+#ifdef F2PY_REPORT_ATEXIT
+f2py_cb_stop_call_clock();
+#endif
+\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return);
+\tif (capi_return == NULL) {
+\t\tfprintf(stderr,\"capi_return is NULL\\n\");
+\t\tgoto capi_fail;
+\t}
+\tif (capi_return == Py_None) {
+\t\tPy_DECREF(capi_return);
+\t\tcapi_return = Py_BuildValue(\"()\");
+\t}
+\telse if (!PyTuple_Check(capi_return)) {
+\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return);
+\t}
+\tcapi_j = PyTuple_Size(capi_return);
+\tcapi_i = 0;
+#frompyobj#
+\tCFUNCSMESS(\"cb:#name#:successful\\n\");
+\tPy_DECREF(capi_return);
+#ifdef F2PY_REPORT_ATEXIT
+f2py_cb_stop_clock();
+#endif
+\tgoto capi_return_pt;
+capi_fail:
+\tfprintf(stderr,\"Call-back #name# failed.\\n\");
+\tPy_XDECREF(capi_return);
+\tif (capi_longjmp_ok)
+\t\tlongjmp(#name#_jmpbuf,-1);
+capi_return_pt:
+\t;
+#return#
+}
+#endtitle#
+""",
+ 'need':['setjmp.h','CFUNCSMESS'],
+ 'maxnofargs':'#maxnofargs#',
+ 'nofoptargs':'#nofoptargs#',
+ 'docstr':"""\
+\tdef #argname#(#docsignature#): return #docreturn#\\n\\
+#docstrsigns#""",
+ 'latexdocstr':"""
+{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}}
+#routnote#
+
+#latexdocstrsigns#""",
+ 'docstrshort':'def #argname#(#docsignature#): return #docreturn#'
+ }
+cb_rout_rules=[
+ {# Init
+ 'separatorsfor':{'decl':'\n',
+ 'args':',','optargs':'','pyobjfrom':'\n','freemem':'\n',
+ 'args_td':',','optargs_td':'',
+ 'args_nm':',','optargs_nm':'',
+ 'frompyobj':'\n','setdims':'\n',
+ 'docstrsigns':'\\n"\n"',
+ 'latexdocstrsigns':'\n',
+ 'latexdocstrreq':'\n','latexdocstropt':'\n',
+ 'latexdocstrout':'\n','latexdocstrcbs':'\n',
+ },
+ 'decl':'/*decl*/','pyobjfrom':'/*pyobjfrom*/','frompyobj':'/*frompyobj*/',
+ 'args':[],'optargs':'','return':'','strarglens':'','freemem':'/*freemem*/',
+ 'args_td':[],'optargs_td':'','strarglens_td':'',
+ 'args_nm':[],'optargs_nm':'','strarglens_nm':'',
+ 'noargs':'',
+ 'setdims':'/*setdims*/',
+ 'docstrsigns':'','latexdocstrsigns':'',
+ 'docstrreq':'\tRequired arguments:',
+ 'docstropt':'\tOptional arguments:',
+ 'docstrout':'\tReturn objects:',
+ 'docstrcbs':'\tCall-back functions:',
+ 'docreturn':'','docsign':'','docsignopt':'',
+ 'latexdocstrreq':'\\noindent Required arguments:',
+ 'latexdocstropt':'\\noindent Optional arguments:',
+ 'latexdocstrout':'\\noindent Return objects:',
+ 'latexdocstrcbs':'\\noindent Call-back functions:',
+ 'routnote':{hasnote:'--- #note#',l_not(hasnote):''},
+ },{ # Function
+ 'decl':'\t#ctype# return_value;',
+ 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'},
+ '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");',
+ {debugcapi:'\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'}
+ ],
+ 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'},'GETSCALARFROMPYTUPLE'],
+ 'return':'\treturn return_value;',
+ '_check':l_and(isfunction,l_not(isstringfunction),l_not(iscomplexfunction))
+ },
+ {# String function
+ 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'},
+ 'args':'#ctype# return_value,int return_value_len',
+ 'args_nm':'return_value,&return_value_len',
+ 'args_td':'#ctype# ,int',
+ 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->\\"");'},
+ """\tif (capi_j>capi_i)
+\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""",
+ {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'}
+ ],
+ 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'},
+ 'string.h','GETSTRFROMPYTUPLE'],
+ 'return':'return;',
+ '_check':isstringfunction
+ },
+ {# Complex function
+ 'optargs':"""
+#ifndef F2PY_CB_RETURNCOMPLEX
+#ctype# *return_value
+#endif
+""",
+ 'optargs_nm':"""
+#ifndef F2PY_CB_RETURNCOMPLEX
+return_value
+#endif
+""",
+ 'optargs_td':"""
+#ifndef F2PY_CB_RETURNCOMPLEX
+#ctype# *
+#endif
+""",
+ 'decl':"""
+#ifdef F2PY_CB_RETURNCOMPLEX
+\t#ctype# return_value;
+#endif
+""",
+ 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'},
+ """\
+\tif (capi_j>capi_i)
+#ifdef F2PY_CB_RETURNCOMPLEX
+\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\");
+#else
+\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\");
+#endif
+""",
+ {debugcapi:"""
+#ifdef F2PY_CB_RETURNCOMPLEX
+\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i);
+#else
+\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i);
+#endif
+
+"""}
+ ],
+ 'return':"""
+#ifdef F2PY_CB_RETURNCOMPLEX
+\treturn return_value;
+#else
+\treturn;
+#endif
+""",
+ 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'},
+ 'string.h','GETSCALARFROMPYTUPLE','#ctype#'],
+ '_check':iscomplexfunction
+ },
+ {'docstrout':'\t\t#pydocsignout#',
+ 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}',
+ {hasnote:'--- #note#'}],
+ 'docreturn':'#rname#,',
+ '_check':isfunction},
+ {'_check':issubroutine,'return':'return;'}
+ ]
+
+cb_arg_rules=[
+ { # Doc
+ 'docstropt':{l_and(isoptional,isintent_nothide):'\t\t#pydocsign#'},
+ 'docstrreq':{l_and(isrequired,isintent_nothide):'\t\t#pydocsign#'},
+ 'docstrout':{isintent_out:'\t\t#pydocsignout#'},
+ 'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
+ {hasnote:'--- #note#'}]},
+ 'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
+ {hasnote:'--- #note#'}]},
+ 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}',
+ {l_and(hasnote,isintent_hide):'--- #note#',
+ l_and(hasnote,isintent_nothide):'--- See above.'}]},
+ 'docsign':{l_and(isrequired,isintent_nothide):'#varname#,'},
+ 'docsignopt':{l_and(isoptional,isintent_nothide):'#varname#,'},
+ 'depend':''
+ },
+ {
+ 'args':{
+ l_and (isscalar,isintent_c):'#ctype# #varname#',
+ l_and (isscalar,l_not(isintent_c)):'#ctype# *#varname#_cb_capi',
+ isarray:'#ctype# *#varname#',
+ isstring:'#ctype# #varname#'
+ },
+ 'args_nm':{
+ l_and (isscalar,isintent_c):'#varname#',
+ l_and (isscalar,l_not(isintent_c)):'#varname#_cb_capi',
+ isarray:'#varname#',
+ isstring:'#varname#'
+ },
+ 'args_td':{
+ l_and (isscalar,isintent_c):'#ctype#',
+ l_and (isscalar,l_not(isintent_c)):'#ctype# *',
+ isarray:'#ctype# *',
+ isstring:'#ctype#'
+ },
+ 'strarglens':{isstring:',int #varname#_cb_len'}, # untested with multiple args
+ 'strarglens_td':{isstring:',int'}, # untested with multiple args
+
+ },
+ { # Scalars
+ 'decl':{l_not(isintent_c):'\t#ctype# #varname#=(*#varname#_cb_capi);'},
+ 'error': {l_and(isintent_c,isintent_out,
+ throw_error('intent(c,out) is forbidden for callback scalar arguments')):\
+ ''},
+ 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'},
+ {isintent_out:'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'},
+ {l_and(debugcapi,l_and(l_not(iscomplex),isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",#varname#);'},
+ {l_and(debugcapi,l_and(l_not(iscomplex),l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname#_cb_capi);'},
+ {l_and(debugcapi,l_and(iscomplex,isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname#).r,(#varname#).i);'},
+ {l_and(debugcapi,l_and(iscomplex,l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname#_cb_capi).r,(*#varname#_cb_capi).i);'},
+ ],
+ 'need':[{isintent_out:['#ctype#_from_pyobj','GETSCALARFROMPYTUPLE']},
+ {debugcapi:'CFUNCSMESS'}],
+ '_check':isscalar
+ },{
+ 'pyobjfrom':[{isintent_in:"""\
+\tif (#name#_nofargs>capi_i)
+\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname#)))
+\t\t\tgoto capi_fail;"""},
+ {isintent_inout:"""\
+\tif (#name#_nofargs>capi_i)
+\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname#_cb_capi)))
+\t\t\tgoto capi_fail;"""}],
+ 'need':[{isintent_in:'pyobj_from_#ctype#1'},
+ {isintent_inout:'pyarr_from_p_#ctype#1'},
+ {iscomplex:'#ctype#'}],
+ '_check':l_and(isscalar,isintent_nothide),
+ '_optional':''
+ },{# String
+ 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->\\"");'},
+ """\tif (capi_j>capi_i)
+\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname#,#varname#_cb_len);""",
+ {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname#,#varname#_cb_len);'},
+ ],
+ 'need':['#ctype#','GETSTRFROMPYTUPLE',
+ {debugcapi:'CFUNCSMESS'},'string.h'],
+ '_check':l_and(isstring,isintent_out)
+ },{
+ 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname#,#varname#_cb_len);'},
+ {isintent_in:"""\
+\tif (#name#_nofargs>capi_i)
+\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname#)))
+\t\t\tgoto capi_fail;"""},
+ {isintent_inout:"""\
+\tif (#name#_nofargs>capi_i) {
+\t\tint #varname#_cb_dims[] = {#varname#_cb_len};
+\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname#,#varname#_cb_dims)))
+\t\t\tgoto capi_fail;
+\t}"""}],
+ 'need':[{isintent_in:'pyobj_from_#ctype#1'},
+ {isintent_inout:'pyarr_from_p_#ctype#1'}],
+ '_check':l_and(isstring,isintent_nothide),
+ '_optional':''
+ },
+# Array ...
+ {
+ 'decl':'\tintp #varname#_Dims[#rank#] = {#rank*[-1]#};',
+ 'setdims':'\t#cbsetdims#;',
+ '_check':isarray,
+ '_depend':''
+ },
+ {
+ 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'},
+ {isintent_c:"""\
+\tif (#name#_nofargs>capi_i) {
+\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname#_Dims,#atype#,NULL,(char*)#varname#,0,CARRAY_FLAGS,NULL); /*XXX: Hmm, what will destroy this array??? */
+""",
+ l_not(isintent_c):"""\
+\tif (#name#_nofargs>capi_i) {
+\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname#_Dims,#atype#,NULL,(char*)#varname#,0,FARRAY_FLAGS,NULL); /*XXX: Hmm, what will destroy this array??? */
+""",
+ },
+ """
+\t\tif (tmp_arr==NULL)
+\t\t\tgoto capi_fail;
+\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr))
+\t\t\tgoto capi_fail;
+}"""],
+ '_check':l_and(isarray,isintent_nothide,l_or(isintent_in,isintent_inout)),
+ '_optional':'',
+ },{
+ 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'},
+ """\tif (capi_j>capi_i) {
+\t\tPyArrayObject *rv_cb_arr = NULL;
+\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail;
+\t\trv_cb_arr = array_from_pyobj(#atype#,#varname#_Dims,#rank#,F2PY_INTENT_IN""",
+ {isintent_c:'|F2PY_INTENT_C'},
+ """,capi_tmp);
+\t\tif (rv_cb_arr == NULL) {
+\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\");
+\t\t\tgoto capi_fail;
+\t\t}
+\t\tMEMCOPY(#varname#,rv_cb_arr->data,PyArray_NBYTES(rv_cb_arr));
+\t\tif (capi_tmp != (PyObject *)rv_cb_arr) {
+\t\t\tPy_DECREF(rv_cb_arr);
+\t\t}
+\t}""",
+ {debugcapi:'\tfprintf(stderr,"<-.\\n");'},
+ ],
+ 'need':['MEMCOPY',{iscomplexarray:'#ctype#'}],
+ '_check':l_and(isarray,isintent_out)
+ },{
+ 'docreturn':'#varname#,',
+ '_check':isintent_out
+ }
+ ]
+
+################## Build call-back module #############
+cb_map={}
+def buildcallbacks(m):
+ global cb_map
+ cb_map[m['name']]=[]
+ for bi in m['body']:
+ if bi['block']=='interface':
+ for b in bi['body']:
+ if b:
+ buildcallback(b,m['name'])
+ else:
+ errmess('warning: empty body for %s\n' % (m['name']))
+
+def buildcallback(rout,um):
+ global cb_map
+ outmess('\tConstructing call-back function "cb_%s_in_%s"\n'%(rout['name'],um))
+ args,depargs=getargs(rout)
+ capi_maps.depargs=depargs
+ var=rout['vars']
+ vrd=capi_maps.cb_routsign2map(rout,um)
+ rd=dictappend({},vrd)
+ cb_map[um].append([rout['name'],rd['name']])
+ for r in cb_rout_rules:
+ if (r.has_key('_check') and r['_check'](rout)) or (not r.has_key('_check')):
+ ar=applyrules(r,vrd,rout)
+ rd=dictappend(rd,ar)
+ savevrd={}
+ for a in args:
+ vrd=capi_maps.cb_sign2map(a,var[a])
+ savevrd[a]=vrd
+ for r in cb_arg_rules:
+ if r.has_key('_depend'): continue
+ if r.has_key('_optional') and isoptional(var[a]): continue
+ if (r.has_key('_check') and r['_check'](var[a])) or (not r.has_key('_check')):
+ ar=applyrules(r,vrd,var[a])
+ rd=dictappend(rd,ar)
+ if r.has_key('_break'): break
+ for a in args:
+ vrd=savevrd[a]
+ for r in cb_arg_rules:
+ if r.has_key('_depend'): continue
+ if (not r.has_key('_optional')) or (r.has_key('_optional') and isrequired(var[a])): continue
+ if (r.has_key('_check') and r['_check'](var[a])) or (not r.has_key('_check')):
+ ar=applyrules(r,vrd,var[a])
+ rd=dictappend(rd,ar)
+ if r.has_key('_break'): break
+ for a in depargs:
+ vrd=savevrd[a]
+ for r in cb_arg_rules:
+ if not r.has_key('_depend'): continue
+ if r.has_key('_optional'): continue
+ if (r.has_key('_check') and r['_check'](var[a])) or (not r.has_key('_check')):
+ ar=applyrules(r,vrd,var[a])
+ rd=dictappend(rd,ar)
+ if r.has_key('_break'): break
+ if rd.has_key('args') and rd.has_key('optargs'):
+ if type(rd['optargs'])==type([]):
+ rd['optargs']=rd['optargs']+["""
+#ifndef F2PY_CB_RETURNCOMPLEX
+,
+#endif
+"""]
+ rd['optargs_nm']=rd['optargs_nm']+["""
+#ifndef F2PY_CB_RETURNCOMPLEX
+,
+#endif
+"""]
+ rd['optargs_td']=rd['optargs_td']+["""
+#ifndef F2PY_CB_RETURNCOMPLEX
+,
+#endif
+"""]
+ if type(rd['docreturn'])==types.ListType:
+ rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))
+ optargs=stripcomma(replace('#docsignopt#',
+ {'docsignopt':rd['docsignopt']}
+ ))
+ if optargs=='':
+ rd['docsignature']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']}))
+ else:
+ rd['docsignature']=replace('#docsign#[#docsignopt#]',
+ {'docsign':rd['docsign'],
+ 'docsignopt':optargs,
+ })
+ rd['latexdocsignature']=string.replace(rd['docsignature'],'_','\\_')
+ rd['latexdocsignature']=string.replace(rd['latexdocsignature'],',',', ')
+ rd['docstrsigns']=[]
+ rd['latexdocstrsigns']=[]
+ for k in ['docstrreq','docstropt','docstrout','docstrcbs']:
+ if rd.has_key(k) and type(rd[k])==types.ListType:
+ rd['docstrsigns']=rd['docstrsigns']+rd[k]
+ k='latex'+k
+ if rd.has_key(k) and type(rd[k])==types.ListType:
+ rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
+ ['\\begin{description}']+rd[k][1:]+\
+ ['\\end{description}']
+ if not rd.has_key('args'):
+ rd['args']=''
+ rd['args_td']=''
+ rd['args_nm']=''
+ if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')):
+ rd['noargs'] = 'void'
+
+ ar=applyrules(cb_routine_rules,rd)
+ cfuncs.callbacks[rd['name']]=ar['body']
+ if type(ar['need'])==types.StringType:
+ ar['need']=[ar['need']]
+
+ if rd.has_key('need'):
+ for t in cfuncs.typedefs.keys():
+ if t in rd['need']:
+ ar['need'].append(t)
+
+ cfuncs.typedefs_generated[rd['name']+'_typedef'] = ar['cbtypedefs']
+ ar['need'].append(rd['name']+'_typedef')
+ cfuncs.needs[rd['name']]=ar['need']
+
+ capi_maps.lcb2_map[rd['name']]={'maxnofargs':ar['maxnofargs'],
+ 'nofoptargs':ar['nofoptargs'],
+ 'docstr':ar['docstr'],
+ 'latexdocstr':ar['latexdocstr'],
+ 'argname':rd['argname']
+ }
+ outmess('\t %s\n'%(ar['docstrshort']))
+ #print ar['body']
+ return
+################## Build call-back function #############
+
+
+
+
+
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
new file mode 100644
index 000000000..aec1509ff
--- /dev/null
+++ b/numpy/f2py/cfuncs.py
@@ -0,0 +1,1134 @@
+#!/usr/bin/env python
+"""
+
+C declarations, CPP macros, and C functions for f2py2e.
+Only required declarations/macros/functions will be used.
+
+Copyright 1999,2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/05/06 11:42:34 $
+Pearu Peterson
+"""
+
+__version__ = "$Revision: 1.75 $"[10:-1]
+
+import __version__
+f2py_version = __version__.version
+
+import types,sys,copy,os
+errmess=sys.stderr.write
+
+##################### Definitions ##################
+
+outneeds={'includes0':[],'includes':[],'typedefs':[],'typedefs_generated':[],
+ 'userincludes':[],
+ 'cppmacros':[],'cfuncs':[],'callbacks':[],'f90modhooks':[],
+ 'commonhooks':[]}
+needs={}
+includes0={'includes0':'/*need_includes0*/'}
+includes={'includes':'/*need_includes*/'}
+userincludes={'userincludes':'/*need_userincludes*/'}
+typedefs={'typedefs':'/*need_typedefs*/'}
+typedefs_generated={'typedefs_generated':'/*need_typedefs_generated*/'}
+cppmacros={'cppmacros':'/*need_cppmacros*/'}
+cfuncs={'cfuncs':'/*need_cfuncs*/'}
+callbacks={'callbacks':'/*need_callbacks*/'}
+f90modhooks={'f90modhooks':'/*need_f90modhooks*/',
+ 'initf90modhooksstatic':'/*initf90modhooksstatic*/',
+ 'initf90modhooksdynamic':'/*initf90modhooksdynamic*/',
+ }
+commonhooks={'commonhooks':'/*need_commonhooks*/',
+ 'initcommonhooks':'/*need_initcommonhooks*/',
+ }
+
+############ Includes ###################
+
+includes0['math.h']='#include <math.h>'
+includes0['string.h']='#include <string.h>'
+includes0['setjmp.h']='#include <setjmp.h>'
+
+includes['Python.h']='#include "Python.h"'
+needs['arrayobject.h']=['Python.h']
+includes['arrayobject.h']='''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API
+#include "arrayobject.h"'''
+
+includes['arrayobject.h']='#include "fortranobject.h"'
+
+############# Type definitions ###############
+
+typedefs['unsigned_char']='typedef unsigned char unsigned_char;'
+typedefs['unsigned_short']='typedef unsigned short unsigned_short;'
+typedefs['unsigned_long']='typedef unsigned long unsigned_long;'
+typedefs['signed_char']='typedef signed char signed_char;'
+typedefs['long_long']="""\
+#ifdef _WIN32
+typedef __int64 long_long;
+#else
+typedef long long long_long;
+typedef unsigned long long unsigned_long_long;
+#endif
+"""
+typedefs['insinged_long_long']="""\
+#ifdef _WIN32
+typedef __uint64 long_long;
+#else
+typedef unsigned long long unsigned_long_long;
+#endif
+"""
+typedefs['long_double']="""\
+#ifndef _LONG_DOUBLE
+typedef long double long_double;
+#endif
+"""
+typedefs['complex_long_double']='typedef struct {long double r,i;} complex_long_double;'
+typedefs['complex_float']='typedef struct {float r,i;} complex_float;'
+typedefs['complex_double']='typedef struct {double r,i;} complex_double;'
+typedefs['string']="""typedef char * string;"""
+
+
+############### CPP macros ####################
+cppmacros['CFUNCSMESS']="""\
+#ifdef DEBUGCFUNCS
+#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess);
+#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\
+\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
+\tfprintf(stderr,\"\\n\");
+#else
+#define CFUNCSMESS(mess)
+#define CFUNCSMESSPY(mess,obj)
+#endif
+"""
+cppmacros['F_FUNC']="""\
+#if defined(PREPEND_FORTRAN)
+#if defined(NO_APPEND_FORTRAN)
+#if defined(UPPERCASE_FORTRAN)
+#define F_FUNC(f,F) _##F
+#else
+#define F_FUNC(f,F) _##f
+#endif
+#else
+#if defined(UPPERCASE_FORTRAN)
+#define F_FUNC(f,F) _##F##_
+#else
+#define F_FUNC(f,F) _##f##_
+#endif
+#endif
+#else
+#if defined(NO_APPEND_FORTRAN)
+#if defined(UPPERCASE_FORTRAN)
+#define F_FUNC(f,F) F
+#else
+#define F_FUNC(f,F) f
+#endif
+#else
+#if defined(UPPERCASE_FORTRAN)
+#define F_FUNC(f,F) F##_
+#else
+#define F_FUNC(f,F) f##_
+#endif
+#endif
+#endif
+#if defined(UNDERSCORE_G77)
+#define F_FUNC_US(f,F) F_FUNC(f##_,F##_)
+#else
+#define F_FUNC_US(f,F) F_FUNC(f,F)
+#endif
+"""
+cppmacros['F_WRAPPEDFUNC']="""\
+#if defined(PREPEND_FORTRAN)
+#if defined(NO_APPEND_FORTRAN)
+#if defined(UPPERCASE_FORTRAN)
+#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F
+#else
+#define F_WRAPPEDFUNC(f,F) _f2pywrap##f
+#endif
+#else
+#if defined(UPPERCASE_FORTRAN)
+#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_
+#else
+#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_
+#endif
+#endif
+#else
+#if defined(NO_APPEND_FORTRAN)
+#if defined(UPPERCASE_FORTRAN)
+#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F
+#else
+#define F_WRAPPEDFUNC(f,F) f2pywrap##f
+#endif
+#else
+#if defined(UPPERCASE_FORTRAN)
+#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_
+#else
+#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_
+#endif
+#endif
+#endif
+#if defined(UNDERSCORE_G77)
+#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_)
+#else
+#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F)
+#endif
+"""
+cppmacros['F_MODFUNC']="""\
+#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */
+#if defined(NO_APPEND_FORTRAN)
+#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f
+#else
+#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _
+#endif
+#endif
+
+#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */
+#if defined(NO_APPEND_FORTRAN)
+#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f
+#else
+#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _
+#endif
+#endif
+
+#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */
+#if defined(NO_APPEND_FORTRAN)
+#define F_MODFUNCNAME(m,f) f ## .in. ## m
+#else
+#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _
+#endif
+#endif
+/*
+#if defined(UPPERCASE_FORTRAN)
+#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F)
+#else
+#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f)
+#endif
+*/
+
+#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f))
+"""
+cppmacros['SWAPUNSAFE']="""\
+#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\
+ (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\
+ (size_t)(a) = ((size_t)(a) ^ (size_t)(b))
+"""
+cppmacros['SWAP']="""\
+#define SWAP(a,b,t) {\\
+\tt *c;\\
+\tc = a;\\
+\ta = b;\\
+\tb = c;}
+"""
+#cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) ((m)->flags & CONTIGUOUS)'
+cppmacros['PRINTPYOBJERR']="""\
+#define PRINTPYOBJERR(obj)\\
+\tfprintf(stderr,\"#modulename#.error is related to \");\\
+\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
+\tfprintf(stderr,\"\\n\");
+"""
+cppmacros['MINMAX']="""\
+#ifndef MAX
+#define MAX(a,b) ((a > b) ? (a) : (b))
+#endif
+#ifndef MIN
+#define MIN(a,b) ((a < b) ? (a) : (b))
+#endif
+"""
+cppmacros['len..']="""\
+#define rank(var) var ## _Rank
+#define shape(var,dim) var ## _Dims[dim]
+#define old_rank(var) (((PyArrayObject *)(capi_ ## var ## _tmp))->nd)
+#define old_shape(var,dim) (((PyArrayObject *)(capi_ ## var ## _tmp))->dimensions[dim])
+#define fshape(var,dim) shape(var,rank(var)-dim-1)
+#define len(var) shape(var,0)
+#define flen(var) fshape(var,0)
+#define size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp))
+/* #define index(i) capi_i ## i */
+#define slen(var) capi_ ## var ## _len
+"""
+
+cppmacros['pyobj_from_char1']='#define pyobj_from_char1(v) (PyInt_FromLong(v))'
+cppmacros['pyobj_from_short1']='#define pyobj_from_short1(v) (PyInt_FromLong(v))'
+needs['pyobj_from_int1']=['signed_char']
+cppmacros['pyobj_from_int1']='#define pyobj_from_int1(v) (PyInt_FromLong(v))'
+cppmacros['pyobj_from_long1']='#define pyobj_from_long1(v) (PyLong_FromLong(v))'
+needs['pyobj_from_long_long1']=['long_long']
+cppmacros['pyobj_from_long_long1']="""\
+#ifdef HAVE_LONG_LONG
+#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v))
+#else
+#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long.
+#define pyobj_from_long_long1(v) (PyLong_FromLong(v))
+#endif
+"""
+needs['pyobj_from_long_double1']=['long_double']
+cppmacros['pyobj_from_long_double1']='#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))'
+cppmacros['pyobj_from_double1']='#define pyobj_from_double1(v) (PyFloat_FromDouble(v))'
+cppmacros['pyobj_from_float1']='#define pyobj_from_float1(v) (PyFloat_FromDouble(v))'
+needs['pyobj_from_complex_long_double1']=['complex_long_double']
+cppmacros['pyobj_from_complex_long_double1']='#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
+needs['pyobj_from_complex_double1']=['complex_double']
+cppmacros['pyobj_from_complex_double1']='#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
+needs['pyobj_from_complex_float1']=['complex_float']
+cppmacros['pyobj_from_complex_float1']='#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))'
+needs['pyobj_from_string1']=['string']
+cppmacros['pyobj_from_string1']='#define pyobj_from_string1(v) (PyString_FromString((char *)v))'
+needs['TRYPYARRAYTEMPLATE']=['PRINTPYOBJERR']
+cppmacros['TRYPYARRAYTEMPLATE']="""\
+/* New SciPy */
+#define TRYPYARRAYTEMPLATECHAR case PyArray_STRING: *(char *)(arr->data)=*v; break;
+#define TRYPYARRAYTEMPLATELONG case PyArray_LONG: *(long *)(arr->data)=*v; break;
+#define TRYPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data); break;
+
+#define TRYPYARRAYTEMPLATE(ctype,typecode) \\
+ PyArrayObject *arr = NULL;\\
+ if (!obj) return -2;\\
+ if (!PyArray_Check(obj)) return -1;\\
+ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
+ if (arr->descr->type==typecode) {*(ctype *)(arr->data)=*v; return 1;}\\
+ switch (arr->descr->type_num) {\\
+ case PyArray_DOUBLE: *(double *)(arr->data)=*v; break;\\
+ case PyArray_INT: *(int *)(arr->data)=*v; break;\\
+ case PyArray_LONG: *(long *)(arr->data)=*v; break;\\
+ case PyArray_FLOAT: *(float *)(arr->data)=*v; break;\\
+ case PyArray_CDOUBLE: *(double *)(arr->data)=*v; break;\\
+ case PyArray_CFLOAT: *(float *)(arr->data)=*v; break;\\
+ case PyArray_BOOL: *(Bool *)(arr->data)=(*v!=0); break;\\
+ case PyArray_UBYTE: *(unsigned char *)(arr->data)=*v; break;\\
+ case PyArray_BYTE: *(signed char *)(arr->data)=*v; break;\\
+ case PyArray_SHORT: *(short *)(arr->data)=*v; break;\\
+ case PyArray_USHORT: *(ushort *)(arr->data)=*v; break;\\
+ case PyArray_UINT: *(uint *)(arr->data)=*v; break;\\
+ case PyArray_ULONG: *(ulong *)(arr->data)=*v; break;\\
+ case PyArray_LONGLONG: *(longlong *)(arr->data)=*v; break;\\
+ case PyArray_ULONGLONG: *(ulonglong *)(arr->data)=*v; break;\\
+ case PyArray_LONGDOUBLE: *(longdouble *)(arr->data)=*v; break;\\
+ case PyArray_CLONGDOUBLE: *(longdouble *)(arr->data)=*v; break;\\
+ case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data, arr); break;\\
+ default: return -2;\\
+ };\\
+ return 1
+"""
+
+needs['TRYCOMPLEXPYARRAYTEMPLATE']=['PRINTPYOBJERR']
+cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\
+#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;
+#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\
+ PyArrayObject *arr = NULL;\\
+ if (!obj) return -2;\\
+ if (!PyArray_Check(obj)) return -1;\\
+ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
+ if (arr->descr->type==typecode) {\\
+ *(ctype *)(arr->data)=(*v).r;\\
+ *(ctype *)(arr->data+sizeof(ctype))=(*v).i;\\
+ return 1;\\
+ }\\
+ switch (arr->descr->type_num) {\\
+ case PyArray_CDOUBLE: *(double *)(arr->data)=(*v).r;*(double *)(arr->data+sizeof(double))=(*v).i;break;\\
+ case PyArray_CFLOAT: *(float *)(arr->data)=(*v).r;*(float *)(arr->data+sizeof(float))=(*v).i;break;\\
+ case PyArray_DOUBLE: *(double *)(arr->data)=(*v).r; break;\\
+ case PyArray_LONG: *(long *)(arr->data)=(*v).r; break;\\
+ case PyArray_FLOAT: *(float *)(arr->data)=(*v).r; break;\\
+ case PyArray_INT: *(int *)(arr->data)=(*v).r; break;\\
+ case PyArray_SHORT: *(short *)(arr->data)=(*v).r; break;\\
+ case PyArray_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\
+ case PyArray_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\
+ case PyArray_BOOL: *(Bool *)(arr->data)=((*v).r!=0 && (*v).i!=0)); break;\\
+ case PyArray_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\
+ case PyArray_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\
+ case PyArray_SHORT: *(short *)(arr->data)=(*v).r; break;\\
+ case PyArray_USHORT: *(ushort *)(arr->data)=(*v).r; break;\\
+ case PyArray_UINT: *(uint *)(arr->data)=(*v).r; break;\\
+ case PyArray_ULONG: *(ulong *)(arr->data)=(*v).r; break;\\
+ case PyArray_LONGLONG: *(longlong *)(arr->data)=(*v).r; break;\\
+ case PyArray_ULONGLONG: *(ulonglong *)(arr->data)=(*v).r; break;\\
+ case PyArray_LONGDOUBLE: *(longdouble *)(arr->data)=(*v).r; break;\\
+ case PyArray_CLONGDOUBLE: *(longdouble *)(arr->data)=(*v).r;*(longdouble *)(arr->data+sizeof(longdouble))=(*v).i;break;\\
+ case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;\\
+ default: return -2;\\
+ };\\
+ return -1;
+"""
+## cppmacros['NUMFROMARROBJ']="""\
+## #define NUMFROMARROBJ(typenum,ctype) \\
+## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
+## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
+## \tif (arr) {\\
+## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\
+## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\
+## \t\t\tgoto capi_fail;\\
+## \t\t} else {\\
+## \t\t\t(arr->descr->cast[typenum])(arr->data,1,(char*)v,1,1);\\
+## \t\t}\\
+## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
+## \t\treturn 1;\\
+## \t}
+## """
+## #XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ
+## cppmacros['CNUMFROMARROBJ']="""\
+## #define CNUMFROMARROBJ(typenum,ctype) \\
+## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
+## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
+## \tif (arr) {\\
+## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\
+## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\
+## \t\t\tgoto capi_fail;\\
+## \t\t} else {\\
+## \t\t\t(arr->descr->cast[typenum])((void *)(arr->data),1,(void *)(v),1,1);\\
+## \t\t}\\
+## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
+## \t\treturn 1;\\
+## \t}
+## """
+
+
+needs['GETSTRFROMPYTUPLE']=['STRINGCOPYN','PRINTPYOBJERR']
+cppmacros['GETSTRFROMPYTUPLE']="""\
+#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\
+\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
+\t\tif (rv_cb_str == NULL)\\
+\t\t\tgoto capi_fail;\\
+\t\tif (PyString_Check(rv_cb_str)) {\\
+\t\t\tstr[len-1]='\\0';\\
+\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\
+\t\t} else {\\
+\t\t\tPRINTPYOBJERR(rv_cb_str);\\
+\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\
+\t\t\tgoto capi_fail;\\
+\t\t}\\
+\t}
+"""
+cppmacros['GETSCALARFROMPYTUPLE']="""\
+#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\
+\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
+\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
+\t\t\tgoto capi_fail;\\
+\t}
+"""
+
+needs['MEMCOPY']=['string.h']
+cppmacros['MEMCOPY']="""\
+#define MEMCOPY(to,from,n)\\
+\tif ((memcpy(to,from,n)) == NULL) {\\
+\t\tPyErr_SetString(PyExc_MemoryError, \"memcpy failed\");\\
+\t\tgoto capi_fail;\\
+\t}
+"""
+cppmacros['STRINGMALLOC']="""\
+#define STRINGMALLOC(str,len)\\
+\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\
+\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
+\t\tgoto capi_fail;\\
+\t} else {\\
+\t\t(str)[len] = '\\0';\\
+\t}
+"""
+cppmacros['STRINGFREE']="""\
+#define STRINGFREE(str)\\
+\tif (!(str == NULL)) free(str);
+"""
+needs['STRINGCOPYN']=['string.h']
+cppmacros['STRINGCOPYN']="""\
+#define STRINGCOPYN(to,from,n)\\
+\tif ((strncpy(to,from,sizeof(char)*(n))) == NULL) {\\
+\t\tPyErr_SetString(PyExc_MemoryError, \"strncpy failed\");\\
+\t\tgoto capi_fail;\\
+\t} else if (strlen(to)<(n)) {\\
+\t\tmemset((to)+strlen(to), ' ', (n)-strlen(to));\\
+\t} /* Padding with spaces instead of nulls. */
+"""
+needs['STRINGCOPY']=['string.h']
+cppmacros['STRINGCOPY']="""\
+#define STRINGCOPY(to,from)\\
+\tif ((strcpy(to,from)) == NULL) {\\
+\t\tPyErr_SetString(PyExc_MemoryError, \"strcpy failed\");\\
+\t\tgoto capi_fail;\\
+\t}
+"""
+cppmacros['CHECKGENERIC']="""\
+#define CHECKGENERIC(check,tcheck,name) \\
+\tif (!(check)) {\\
+\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
+\t\t/*goto capi_fail;*/\\
+\t} else """
+cppmacros['CHECKARRAY']="""\
+#define CHECKARRAY(check,tcheck,name) \\
+\tif (!(check)) {\\
+\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
+\t\t/*goto capi_fail;*/\\
+\t} else """
+cppmacros['CHECKSTRING']="""\
+#define CHECKSTRING(check,tcheck,name,show,var)\\
+\tif (!(check)) {\\
+\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
+\t\tfprintf(stderr,show\"\\n\",slen(var),var);\\
+\t\t/*goto capi_fail;*/\\
+\t} else """
+cppmacros['CHECKSCALAR']="""\
+#define CHECKSCALAR(check,tcheck,name,show,var)\\
+\tif (!(check)) {\\
+\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
+\t\tfprintf(stderr,show\"\\n\",var);\\
+\t\t/*goto capi_fail;*/\\
+\t} else """
+## cppmacros['CHECKDIMS']="""\
+## #define CHECKDIMS(dims,rank) \\
+## \tfor (int i=0;i<(rank);i++)\\
+## \t\tif (dims[i]<0) {\\
+## \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
+## \t\t\tgoto capi_fail;\\
+## \t\t}
+## """
+cppmacros['ARRSIZE']='#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
+cppmacros['OLDPYNUM']="""\
+#ifdef OLDPYNUM
+#error You need to intall Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369
+#endif
+"""
+################# C functions ###############
+
+cfuncs['calcarrindex']="""\
+static int calcarrindex(int *i,PyArrayObject *arr) {
+\tint k,ii = i[0];
+\tfor (k=1; k < arr->nd; k++)
+\t\tii += (ii*(arr->dimensions[k] - 1)+i[k]); /* assuming contiguous arr */
+\treturn ii;
+}"""
+cfuncs['calcarrindextr']="""\
+static int calcarrindextr(int *i,PyArrayObject *arr) {
+\tint k,ii = i[arr->nd-1];
+\tfor (k=1; k < arr->nd; k++)
+\t\tii += (ii*(arr->dimensions[arr->nd-k-1] - 1)+i[arr->nd-k-1]); /* assuming contiguous arr */
+\treturn ii;
+}"""
+cfuncs['forcomb']="""\
+static struct { int nd;intp *d;int *i,*i_tr,tr; } forcombcache;
+static int initforcomb(intp *dims,int nd,int tr) {
+ int k;
+ if (dims==NULL) return 0;
+ if (nd<0) return 0;
+ forcombcache.nd = nd;
+ forcombcache.d = dims;
+ forcombcache.tr = tr;
+ if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
+ if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
+ for (k=1;k<nd;k++) {
+ forcombcache.i[k] = forcombcache.i_tr[nd-k-1] = 0;
+ }
+ forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1;
+ return 1;
+}
+static int *nextforcomb(void) {
+ int j,*i,*i_tr,k;
+ int nd=forcombcache.nd;
+ if ((i=forcombcache.i) == NULL) return NULL;
+ if ((i_tr=forcombcache.i_tr) == NULL) return NULL;
+ if (forcombcache.d == NULL) return NULL;
+ i[0]++;
+ if (i[0]==forcombcache.d[0]) {
+ j=1;
+ while ((j<nd) && (i[j]==forcombcache.d[j]-1)) j++;
+ if (j==nd) {
+ free(i);
+ free(i_tr);
+ return NULL;
+ }
+ for (k=0;k<j;k++) i[k] = i_tr[nd-k-1] = 0;
+ i[j]++;
+ i_tr[nd-j-1]++;
+ } else
+ i_tr[nd-1]++;
+ if (forcombcache.tr) return i_tr;
+ return i;
+}"""
+needs['try_pyarr_from_string']=['STRINGCOPYN','PRINTPYOBJERR','string']
+cfuncs['try_pyarr_from_string']="""\
+static int try_pyarr_from_string(PyObject *obj,const string str) {
+\tPyArrayObject *arr = NULL;
+\tif (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL)))
+\t\t{ STRINGCOPYN(arr->data,str,PyArray_NBYTES(arr)); }
+\treturn 1;
+capi_fail:
+\tPRINTPYOBJERR(obj);
+\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\");
+\treturn 0;
+}
+"""
+needs['string_from_pyobj']=['string','STRINGMALLOC','STRINGCOPYN']
+cfuncs['string_from_pyobj']="""\
+static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) {
+\tPyArrayObject *arr = NULL;
+\tPyObject *tmp = NULL;
+#ifdef DEBUGCFUNCS
+fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj);
+#endif
+\tif (obj == Py_None) {
+\t\tif (*len == -1)
+\t\t\t*len = strlen(inistr); /* Will this cause problems? */
+\t\tSTRINGMALLOC(*str,*len);
+\t\tSTRINGCOPYN(*str,inistr,*len);
+\t\treturn 1;
+\t}
+\tif (PyArray_Check(obj)) {
+\t\tif ((arr = (PyArrayObject *)obj) == NULL)
+\t\t\tgoto capi_fail;
+\t\tif (!ISCONTIGUOUS(arr)) {
+\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\");
+\t\t\tgoto capi_fail;
+\t\t}
+\t\tif (*len == -1)
+\t\t\t*len = (arr->descr->elsize)*PyArray_SIZE(arr);
+\t\tSTRINGMALLOC(*str,*len);
+\t\tSTRINGCOPYN(*str,arr->data,*len);
+\t\treturn 1;
+\t}
+\tif (PyString_Check(obj)) {
+\t\ttmp = obj;
+\t\tPy_INCREF(tmp);
+\t}
+\telse
+\t\ttmp = PyObject_Str(obj);
+\tif (tmp == NULL) goto capi_fail;
+\tif (*len == -1)
+\t\t*len = PyString_GET_SIZE(tmp);
+\tSTRINGMALLOC(*str,*len);
+\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len);
+\tPy_DECREF(tmp);
+\treturn 1;
+capi_fail:
+\tPy_XDECREF(tmp);
+\t{
+\t\tPyObject* err = PyErr_Occurred();
+\t\tif (err==NULL) err = #modulename#_error;
+\t\tPyErr_SetString(err,errmess);
+\t}
+\treturn 0;
+}
+"""
+needs['char_from_pyobj']=['int_from_pyobj']
+cfuncs['char_from_pyobj']="""\
+static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) {
+\tint i=0;
+\tif (int_from_pyobj(&i,obj,errmess)) {
+\t\t*v = (char)i;
+\t\treturn 1;
+\t}
+\treturn 0;
+}
+"""
+needs['signed_char_from_pyobj']=['int_from_pyobj','signed_char']
+cfuncs['signed_char_from_pyobj']="""\
+static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) {
+\tint i=0;
+\tif (int_from_pyobj(&i,obj,errmess)) {
+\t\t*v = (signed_char)i;
+\t\treturn 1;
+\t}
+\treturn 0;
+}
+"""
+needs['short_from_pyobj']=['int_from_pyobj']
+cfuncs['short_from_pyobj']="""\
+static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) {
+\tint i=0;
+\tif (int_from_pyobj(&i,obj,errmess)) {
+\t\t*v = (short)i;
+\t\treturn 1;
+\t}
+\treturn 0;
+}
+"""
+cfuncs['int_from_pyobj']="""\
+static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) {
+\tPyObject* tmp = NULL;
+\tif (PyInt_Check(obj)) {
+\t\t*v = (int)PyInt_AS_LONG(obj);
+\t\treturn 1;
+\t}
+\ttmp = PyNumber_Int(obj);
+\tif (tmp) {
+\t\t*v = PyInt_AS_LONG(tmp);
+\t\tPy_DECREF(tmp);
+\t\treturn 1;
+\t}
+\tif (PyComplex_Check(obj))
+\t\ttmp = PyObject_GetAttrString(obj,\"real\");
+\telse if (PyString_Check(obj))
+\t\t/*pass*/;
+\telse if (PySequence_Check(obj))
+\t\ttmp = PySequence_GetItem(obj,0);
+\tif (tmp) {
+\t\tPyErr_Clear();
+\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+\t\tPy_DECREF(tmp);
+\t}
+\t{
+\t\tPyObject* err = PyErr_Occurred();
+\t\tif (err==NULL) err = #modulename#_error;
+\t\tPyErr_SetString(err,errmess);
+\t}
+\treturn 0;
+}
+"""
+cfuncs['long_from_pyobj']="""\
+static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) {
+\tPyObject* tmp = NULL;
+\tif (PyInt_Check(obj)) {
+\t\t*v = PyInt_AS_LONG(obj);
+\t\treturn 1;
+\t}
+\ttmp = PyNumber_Int(obj);
+\tif (tmp) {
+\t\t*v = PyInt_AS_LONG(tmp);
+\t\tPy_DECREF(tmp);
+\t\treturn 1;
+\t}
+\tif (PyComplex_Check(obj))
+\t\ttmp = PyObject_GetAttrString(obj,\"real\");
+\telse if (PyString_Check(obj))
+\t\t/*pass*/;
+\telse if (PySequence_Check(obj))
+\t\ttmp = PySequence_GetItem(obj,0);
+\tif (tmp) {
+\t\tPyErr_Clear();
+\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+\t\tPy_DECREF(tmp);
+\t}
+\t{
+\t\tPyObject* err = PyErr_Occurred();
+\t\tif (err==NULL) err = #modulename#_error;
+\t\tPyErr_SetString(err,errmess);
+\t}
+\treturn 0;
+}
+"""
+needs['long_long_from_pyobj']=['long_long']
+cfuncs['long_long_from_pyobj']="""\
+static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) {
+\tPyObject* tmp = NULL;
+\tif (PyLong_Check(obj)) {
+\t\t*v = PyLong_AsLongLong(obj);
+\t\treturn (!PyErr_Occurred());
+\t}
+\tif (PyInt_Check(obj)) {
+\t\t*v = (long_long)PyInt_AS_LONG(obj);
+\t\treturn 1;
+\t}
+\ttmp = PyNumber_Long(obj);
+\tif (tmp) {
+\t\t*v = PyLong_AsLongLong(tmp);
+\t\tPy_DECREF(tmp);
+\t\treturn (!PyErr_Occurred());
+\t}
+\tif (PyComplex_Check(obj))
+\t\ttmp = PyObject_GetAttrString(obj,\"real\");
+\telse if (PyString_Check(obj))
+\t\t/*pass*/;
+\telse if (PySequence_Check(obj))
+\t\ttmp = PySequence_GetItem(obj,0);
+\tif (tmp) {
+\t\tPyErr_Clear();
+\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+\t\tPy_DECREF(tmp);
+\t}
+\t{
+\t\tPyObject* err = PyErr_Occurred();
+\t\tif (err==NULL) err = #modulename#_error;
+\t\tPyErr_SetString(err,errmess);
+\t}
+\treturn 0;
+}
+"""
+needs['long_double_from_pyobj']=['double_from_pyobj','long_double']
+cfuncs['long_double_from_pyobj']="""\
+static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) {
+\tdouble d=0;
+\tif (PyArray_CheckScalar(obj)){
+\t\tif PyArray_IsScalar(obj, LongDouble) {
+\t\t\tPyArray_ScalarAsCtype(obj, v);
+\t\t\treturn 1;
+\t\t}
+\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_LONGDOUBLE) {
+\t\t\t(*v) = *((longdouble *)PyArray_DATA(obj))
+\t\t\treturn 1;
+\t\t}
+\t}
+\tif (double_from_pyobj(&d,obj,errmess)) {
+\t\t*v = (long_double)d;
+\t\treturn 1;
+\t}
+\treturn 0;
+}
+"""
+cfuncs['double_from_pyobj']="""\
+static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) {
+\tPyObject* tmp = NULL;
+\tif (PyFloat_Check(obj)) {
+#ifdef __sgi
+\t\t*v = PyFloat_AsDouble(obj);
+#else
+\t\t*v = PyFloat_AS_DOUBLE(obj);
+#endif
+\t\treturn 1;
+\t}
+\ttmp = PyNumber_Float(obj);
+\tif (tmp) {
+#ifdef __sgi
+\t\t*v = PyFloat_AsDouble(tmp);
+#else
+\t\t*v = PyFloat_AS_DOUBLE(tmp);
+#endif
+\t\tPy_DECREF(tmp);
+\t\treturn 1;
+\t}
+\tif (PyComplex_Check(obj))
+\t\ttmp = PyObject_GetAttrString(obj,\"real\");
+\telse if (PyString_Check(obj))
+\t\t/*pass*/;
+\telse if (PySequence_Check(obj))
+\t\ttmp = PySequence_GetItem(obj,0);
+\tif (tmp) {
+\t\tPyErr_Clear();
+\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+\t\tPy_DECREF(tmp);
+\t}
+\t{
+\t\tPyObject* err = PyErr_Occurred();
+\t\tif (err==NULL) err = #modulename#_error;
+\t\tPyErr_SetString(err,errmess);
+\t}
+\treturn 0;
+}
+"""
+needs['float_from_pyobj']=['double_from_pyobj']
+cfuncs['float_from_pyobj']="""\
+static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) {
+\tdouble d=0.0;
+\tif (double_from_pyobj(&d,obj,errmess)) {
+\t\t*v = (float)d;
+\t\treturn 1;
+\t}
+\treturn 0;
+}
+"""
+needs['complex_long_double_from_pyobj']=['complex_long_double','long_double',
+ 'complex_double_from_pyobj']
+cfuncs['complex_long_double_from_pyobj']="""\
+static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) {
+\tcomplex_double cd={0.0,0.0};
+\tif (PyArray_CheckScalar(obj)){
+\t\tif PyArray_IsScalar(obj, CLongDouble) {
+\t\t\tPyArray_ScalarAsCtype(obj, v);
+\t\t\treturn 1;
+\t\t}
+\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_CLONGDOUBLE) {
+\t\t\t(*v).r = ((clongdouble *)PyArray_DATA(obj))->real;
+\t\t\t(*v).i = ((clongdouble *)PyArray_DATA(obj))->imag;
+\t\t\treturn 1;
+\t\t}
+\t}
+\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
+\t\t(*v).r = (long_double)cd.r;
+\t\t(*v).i = (long_double)cd.i;
+\t\treturn 1;
+\t}
+\treturn 0;
+}
+"""
+needs['complex_double_from_pyobj']=['complex_double']
+cfuncs['complex_double_from_pyobj']="""\
+static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) {
+\tPy_complex c;
+\tif (PyComplex_Check(obj)) {
+\t\tc=PyComplex_AsCComplex(obj);
+\t\t(*v).r=c.real, (*v).i=c.imag;
+\t\treturn 1;
+\t}
+\tif (PyArray_IsScalar(obj, ComplexFloating)) {
+\t\tif (PyArray_IsScalar(obj, CFloat)) {
+\t\t\tcfloat new;
+\t\t\tPyArray_ScalarAsCtype(obj, &new);
+\t\t\t(*v).r = (double)new.real;
+\t\t\t(*v).i = (double)new.imag;
+\t\t}
+\t\telse if (PyArray_IsScalar(obj, CLongDouble)) {
+\t\t\tclongdouble new;
+\t\t\tPyArray_ScalarAsCtype(obj, &new);
+\t\t\t(*v).r = (double)new.real;
+\t\t\t(*v).i = (double)new.imag;
+\t\t}
+\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */
+\t\t\tPyArray_ScalarAsCtype(obj, v);
+\t\t}
+\t\treturn 1;
+\t}
+\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
+\t\tPyObject *arr;
+\t\tif (PyArray_Check(obj)) {
+\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, PyArray_CDOUBLE);
+\t\t}
+\t\telse {
+\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(PyArray_CDOUBLE));
+\t\t}
+\t\tif (arr==NULL) return 0;
+\t\t(*v).r = ((cdouble *)PyArray_DATA(arr))->real;
+\t\t(*v).i = ((cdouble *)PyArray_DATA(arr))->imag;
+\t\treturn 1;
+\t}
+\t/* Python does not provide PyNumber_Complex function :-( */
+\t(*v).i=0.0;
+\tif (PyFloat_Check(obj)) {
+#ifdef __sgi
+\t\t(*v).r = PyFloat_AsDouble(obj);
+#else
+\t\t(*v).r = PyFloat_AS_DOUBLE(obj);
+#endif
+\t\treturn 1;
+\t}
+\tif (PyInt_Check(obj)) {
+\t\t(*v).r = (double)PyInt_AS_LONG(obj);
+\t\treturn 1;
+\t}
+\tif (PyLong_Check(obj)) {
+\t\t(*v).r = PyLong_AsDouble(obj);
+\t\treturn (!PyErr_Occurred());
+\t}
+\tif (PySequence_Check(obj) && (!PyString_Check(obj))) {
+\t\tPyObject *tmp = PySequence_GetItem(obj,0);
+\t\tif (tmp) {
+\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) {
+\t\t\t\tPy_DECREF(tmp);
+\t\t\t\treturn 1;
+\t\t\t}
+\t\t\tPy_DECREF(tmp);
+\t\t}
+\t}
+\t{
+\t\tPyObject* err = PyErr_Occurred();
+\t\tif (err==NULL)
+\t\t\terr = PyExc_TypeError;
+\t\tPyErr_SetString(err,errmess);
+\t}
+\treturn 0;
+}
+"""
+needs['complex_float_from_pyobj']=['complex_float','complex_double_from_pyobj']
+cfuncs['complex_float_from_pyobj']="""\
+static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) {
+\tcomplex_double cd={0.0,0.0};
+\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
+\t\t(*v).r = (float)cd.r;
+\t\t(*v).i = (float)cd.i;
+\t\treturn 1;
+\t}
+\treturn 0;
+}
+"""
+needs['try_pyarr_from_char']=['pyobj_from_char1','TRYPYARRAYTEMPLATE']
+cfuncs['try_pyarr_from_char']='static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
+needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','unsigned_char']
+cfuncs['try_pyarr_from_unsigned_char']='static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
+needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','signed_char']
+cfuncs['try_pyarr_from_signed_char']='static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
+needs['try_pyarr_from_short']=['pyobj_from_short1','TRYPYARRAYTEMPLATE']
+cfuncs['try_pyarr_from_short']='static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
+needs['try_pyarr_from_int']=['pyobj_from_int1','TRYPYARRAYTEMPLATE']
+cfuncs['try_pyarr_from_int']='static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
+needs['try_pyarr_from_long']=['pyobj_from_long1','TRYPYARRAYTEMPLATE']
+cfuncs['try_pyarr_from_long']='static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
+needs['try_pyarr_from_long_long']=['pyobj_from_long_long1','TRYPYARRAYTEMPLATE','long_long']
+cfuncs['try_pyarr_from_long_long']='static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
+needs['try_pyarr_from_float']=['pyobj_from_float1','TRYPYARRAYTEMPLATE']
+cfuncs['try_pyarr_from_float']='static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
+needs['try_pyarr_from_double']=['pyobj_from_double1','TRYPYARRAYTEMPLATE']
+cfuncs['try_pyarr_from_double']='static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
+needs['try_pyarr_from_complex_float']=['pyobj_from_complex_float1','TRYCOMPLEXPYARRAYTEMPLATE','complex_float']
+cfuncs['try_pyarr_from_complex_float']='static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
+needs['try_pyarr_from_complex_double']=['pyobj_from_complex_double1','TRYCOMPLEXPYARRAYTEMPLATE','complex_double']
+cfuncs['try_pyarr_from_complex_double']='static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
+
+needs['create_cb_arglist']=['CFUNCSMESS','PRINTPYOBJERR','MINMAX']
+cfuncs['create_cb_arglist']="""\
+static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) {
+\tPyObject *tmp = NULL;
+\tPyObject *tmp_fun = NULL;
+\tint tot,opt,ext,siz,i,di=0;
+\tCFUNCSMESS(\"create_cb_arglist\\n\");
+\ttot=opt=ext=siz=0;
+\t/* Get the total number of arguments */
+\tif (PyFunction_Check(fun))
+\t\ttmp_fun = fun;
+\telse {
+\t\tdi = 1;
+\t\tif (PyObject_HasAttrString(fun,\"im_func\")) {
+\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\");
+\t\t}
+\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) {
+\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\");
+\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\"))
+\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
+\t\t\telse {
+\t\t\t\ttmp_fun = fun; /* built-in function */
+\t\t\t\ttot = maxnofargs;
+\t\t\t\tif (xa != NULL)
+\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
+\t\t\t}
+\t\t\tPy_XDECREF(tmp);
+\t\t}
+\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
+\t\t\ttot = maxnofargs;
+\t\t\tif (xa != NULL)
+\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
+\t\t\ttmp_fun = fun;
+\t\t}
+\t\telse if (PyCObject_Check(fun)) {
+\t\t\ttot = maxnofargs;
+\t\t\tif (xa != NULL)
+\t\t\t\text = PyTuple_Size((PyObject *)xa);
+\t\t\tif(ext>0) {
+\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\");
+\t\t\t\tgoto capi_fail;
+\t\t\t}
+\t\t\ttmp_fun = fun;
+\t\t}
+\t}
+if (tmp_fun==NULL) {
+fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":fun->ob_type->tp_name));
+goto capi_fail;
+}
+\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
+\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\"))
+\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di;
+\t\tPy_XDECREF(tmp);
+\t}
+\t/* Get the number of optional arguments */
+\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\"))
+\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\")))
+\t\t\topt = PyTuple_Size(tmp);
+\t\tPy_XDECREF(tmp);
+\t/* Get the number of extra arguments */
+\tif (xa != NULL)
+\t\text = PyTuple_Size((PyObject *)xa);
+\t/* Calculate the size of call-backs argument list */
+\tsiz = MIN(maxnofargs+ext,tot);
+\t*nofargs = MAX(0,siz-ext);
+#ifdef DEBUGCFUNCS
+\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs);
+#endif
+\tif (siz<tot-opt) {
+\t\tfprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt);
+\t\tgoto capi_fail;
+\t}
+\t/* Initialize argument list */
+\t*args = (PyTupleObject *)PyTuple_New(siz);
+\tfor (i=0;i<*nofargs;i++) {
+\t\tPy_INCREF(Py_None);
+\t\tPyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
+\t}
+\tif (xa != NULL)
+\t\tfor (i=(*nofargs);i<siz;i++) {
+\t\t\ttmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
+\t\t\tPy_INCREF(tmp);
+\t\t\tPyTuple_SET_ITEM(*args,i,tmp);
+\t\t}
+\tCFUNCSMESS(\"create_cb_arglist-end\\n\");
+\treturn 1;
+capi_fail:
+\tif ((PyErr_Occurred())==NULL)
+\t\tPyErr_SetString(#modulename#_error,errmess);
+\treturn 0;
+}
+"""
+
+def buildcfuncs():
+ from capi_maps import c2capi_map
+ for k in c2capi_map.keys():
+ m='pyarr_from_p_%s1'%k
+ cppmacros[m]='#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))'%(m,c2capi_map[k])
+ k='string'
+ m='pyarr_from_p_%s1'%k
+ cppmacros[m]='#define %s(v,dims) (PyArray_SimpleNewFromData(1,dims,PyArray_CHAR,(char *)v))'%(m)
+
+
+############ Auxiliary functions for sorting needs ###################
+
+def append_needs(need,flag=1):
+ global outneeds,needs
+ if type(need)==types.ListType:
+ for n in need:
+ append_needs(n,flag)
+ elif type(need)==types.StringType:
+ if not need: return
+ if includes0.has_key(need): n = 'includes0'
+ elif includes.has_key(need): n = 'includes'
+ elif typedefs.has_key(need): n = 'typedefs'
+ elif typedefs_generated.has_key(need): n = 'typedefs_generated'
+ elif cppmacros.has_key(need): n = 'cppmacros'
+ elif cfuncs.has_key(need): n = 'cfuncs'
+ elif callbacks.has_key(need): n = 'callbacks'
+ elif f90modhooks.has_key(need): n = 'f90modhooks'
+ elif commonhooks.has_key(need): n = 'commonhooks'
+ else:
+ errmess('append_needs: unknown need %s\n'%(`need`))
+ return
+ if need in outneeds[n]: return
+ if flag:
+ tmp={}
+ if needs.has_key(need):
+ for nn in needs[need]:
+ t=append_needs(nn,0)
+ if type(t)==types.DictType:
+ for nnn in t.keys():
+ if tmp.has_key(nnn): tmp[nnn]=tmp[nnn]+t[nnn]
+ else: tmp[nnn]=t[nnn]
+ for nn in tmp.keys():
+ for nnn in tmp[nn]:
+ if nnn not in outneeds[nn]:
+ outneeds[nn]=[nnn]+outneeds[nn]
+ outneeds[n].append(need)
+ else:
+ tmp={}
+ if needs.has_key(need):
+ for nn in needs[need]:
+ t=append_needs(nn,flag)
+ if type(t)==types.DictType:
+ for nnn in t.keys():
+ if tmp.has_key(nnn): tmp[nnn]=t[nnn]+tmp[nnn]
+ else: tmp[nnn]=t[nnn]
+ if not tmp.has_key(n): tmp[n]=[]
+ tmp[n].append(need)
+ return tmp
+ else:
+ errmess('append_needs: expected list or string but got :%s\n'%(`need`))
+
+def get_needs():
+ global outneeds,needs
+ res={}
+ for n in outneeds.keys():
+ out=[]
+ saveout=copy.copy(outneeds[n])
+ while len(outneeds[n])>0:
+ if not needs.has_key(outneeds[n][0]):
+ out.append(outneeds[n][0])
+ del outneeds[n][0]
+ else:
+ flag=0
+ for k in outneeds[n][1:]:
+ if k in needs[outneeds[n][0]]:
+ flag=1
+ break
+ if flag:
+ outneeds[n]=outneeds[n][1:]+[outneeds[n][0]]
+ else:
+ out.append(outneeds[n][0])
+ del outneeds[n][0]
+ if saveout and (0 not in map(lambda x,y:x==y,saveout,outneeds[n])):
+ print n,saveout
+ errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n')
+ out=out+saveout
+ break
+ saveout=copy.copy(outneeds[n])
+ if out==[]: out=[n]
+ res[n]=out
+ return res
diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py
new file mode 100644
index 000000000..e7b4decca
--- /dev/null
+++ b/numpy/f2py/common_rules.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+"""
+
+Build common block mechanism for f2py2e.
+
+Copyright 2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/05/06 10:57:33 $
+Pearu Peterson
+"""
+
+__version__ = "$Revision: 1.19 $"[10:-1]
+
+import __version__
+f2py_version = __version__.version
+
+import pprint
+import sys,string,time,types,copy
+errmess=sys.stderr.write
+outmess=sys.stdout.write
+show=pprint.pprint
+
+from auxfuncs import *
+import capi_maps
+import cfuncs
+import func2subr
+from crackfortran import rmbadname
+##############
+
+def findcommonblocks(block,top=1):
+ ret = []
+ if hascommon(block):
+ for n in block['common'].keys():
+ vars={}
+ for v in block['common'][n]:
+ vars[v]=block['vars'][v]
+ ret.append((n,block['common'][n],vars))
+ elif hasbody(block):
+ for b in block['body']:
+ ret=ret+findcommonblocks(b,0)
+ if top:
+ tret=[]
+ names=[]
+ for t in ret:
+ if t[0] not in names:
+ names.append(t[0])
+ tret.append(t)
+ return tret
+ return ret
+
+def buildhooks(m):
+ ret = {'commonhooks':[],'initcommonhooks':[],'docs':['"COMMON blocks:\\n"']}
+ fwrap = ['']
+ def fadd(line,s=fwrap): s[0] = '%s\n %s'%(s[0],line)
+ chooks = ['']
+ def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0],line)
+ ihooks = ['']
+ def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line)
+ doc = ['']
+ def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0],line)
+ for (name,vnames,vars) in findcommonblocks(m):
+ lower_name = string.lower(name)
+ hnames,inames = [],[]
+ for n in vnames:
+ if isintent_hide(vars[n]): hnames.append(n)
+ else: inames.append(n)
+ if hnames:
+ outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n'%(name,string.join(inames,','),string.join(hnames,',')))
+ else:
+ outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n'%(name,string.join(inames,',')))
+ fadd('subroutine f2pyinit%s(setupfunc)'%name)
+ fadd('external setupfunc')
+ for n in vnames:
+ fadd(func2subr.var2fixfortran(vars,n))
+ if name=='_BLNK_':
+ fadd('common %s'%(string.join(vnames,',')))
+ else:
+ fadd('common /%s/ %s'%(name,string.join(vnames,',')))
+ fadd('call setupfunc(%s)'%(string.join(inames,',')))
+ fadd('end\n')
+ cadd('static FortranDataDef f2py_%s_def[] = {'%(name))
+ idims=[]
+ for n in inames:
+ ct = capi_maps.getctype(vars[n])
+ at = capi_maps.c2capi_map[ct]
+ dm = capi_maps.getarrdims(n,vars[n])
+ if dm['dims']: idims.append('(%s)'%(dm['dims']))
+ else: idims.append('')
+ dms=string.strip(dm['dims'])
+ if not dms: dms='-1'
+ cadd('\t{\"%s\",%s,{{%s}},%s},'%(n,dm['rank'],dms,at))
+ cadd('\t{NULL}\n};')
+ inames1 = rmbadname(inames)
+ inames1_tps = string.join(map(lambda s:'char *'+s,inames1),',')
+ cadd('static void f2py_setup_%s(%s) {'%(name,inames1_tps))
+ cadd('\tint i_f2py=0;')
+ for n in inames1:
+ cadd('\tf2py_%s_def[i_f2py++].data = %s;'%(name,n))
+ cadd('}')
+ if '_' in lower_name:
+ F_FUNC='F_FUNC_US'
+ else:
+ F_FUNC='F_FUNC'
+ cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'\
+ %(F_FUNC,lower_name,string.upper(name),
+ string.join(['char*']*len(inames1),',')))
+ cadd('static void f2py_init_%s(void) {'%name)
+ cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\
+ %(F_FUNC,lower_name,string.upper(name),name))
+ cadd('}\n')
+ iadd('\tPyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(name,name,name))
+ tname = string.replace(name,'_','\\_')
+ dadd('\\subsection{Common block \\texttt{%s}}\n'%(tname))
+ dadd('\\begin{description}')
+ for n in inames:
+ dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,vars[n])))
+ if hasnote(vars[n]):
+ note = vars[n]['note']
+ if type(note) is type([]): note=string.join(note,'\n')
+ dadd('--- %s'%(note))
+ dadd('\\end{description}')
+ ret['docs'].append('"\t/%s/ %s\\n"'%(name,string.join(map(lambda v,d:v+d,inames,idims),',')))
+ ret['commonhooks']=chooks
+ ret['initcommonhooks']=ihooks
+ ret['latexdoc']=doc[0]
+ if len(ret['docs'])<=1: ret['docs']=''
+ return ret,fwrap[0]
+
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
new file mode 100755
index 000000000..2a91709bc
--- /dev/null
+++ b/numpy/f2py/crackfortran.py
@@ -0,0 +1,2659 @@
+#!/usr/bin/env python
+"""
+crackfortran --- read fortran (77,90) code and extract declaration information.
+ Usage is explained in the comment block below.
+
+Copyright 1999-2004 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/09/27 07:13:49 $
+Pearu Peterson
+"""
+__version__ = "$Revision: 1.177 $"[10:-1]
+
+import __version__
+f2py_version = __version__.version
+
+"""
+ Usage of crackfortran:
+ ======================
+ Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
+ -m <module name for f77 routines>,--ignore-contains
+ Functions: crackfortran, crack2fortran
+ The following Fortran statements/constructions are supported
+ (or will be if needed):
+ block data,byte,call,character,common,complex,contains,data,
+ dimension,double complex,double precision,end,external,function,
+ implicit,integer,intent,interface,intrinsic,
+ logical,module,optional,parameter,private,public,
+ program,real,(sequence?),subroutine,type,use,virtual,
+ include,pythonmodule
+ Note: 'virtual' is mapped to 'dimension'.
+ Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
+ Note: code after 'contains' will be ignored until its scope ends.
+ Note: 'common' statement is extended: dimensions are moved to variable definitions
+ Note: f2py directive: <commentchar>f2py<line> is read as <line>
+ Note: pythonmodule is introduced to represent Python module
+
+ Usage:
+ `postlist=crackfortran(files,funcs)`
+ `postlist` contains declaration information read from the list of files `files`.
+ `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
+
+ `postlist` has the following structure:
+ *** it is a list of dictionaries containing `blocks':
+ B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
+ 'implicit','externals','interfaced','common','sortvars',
+ 'commonvars','note']}
+ B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
+ 'program' | 'block data' | 'type' | 'pythonmodule'
+ B['body'] --- list containing `subblocks' with the same structure as `blocks'
+ B['parent_block'] --- dictionary of a parent block:
+ C['body'][<index>]['parent_block'] is C
+ B['vars'] --- dictionary of variable definitions
+ B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
+ B['name'] --- name of the block (not if B['block']=='interface')
+ B['prefix'] --- prefix string (only if B['block']=='function')
+ B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
+ B['result'] --- name of the return value (only if B['block']=='function')
+ B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
+ B['externals'] --- list of variables being external
+ B['interfaced'] --- list of variables being external and defined
+ B['common'] --- dictionary of common blocks (list of objects)
+ B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
+ B['from'] --- string showing the 'parents' of the current block
+ B['use'] --- dictionary of modules used in current block:
+ {<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
+ B['note'] --- list of LaTeX comments on the block
+ B['f2pyenhancements'] --- optional dictionary
+ {'threadsafe':'','fortranname':<name>,
+ 'callstatement':<C-expr>|<multi-line block>,
+ 'callprotoargument':<C-expr-list>,
+ 'usercode':<multi-line block>|<list of multi-line blocks>,
+ 'pymethoddef:<multi-line block>'
+ }
+ B['entry'] --- dictionary {entryname:argslist,..}
+ B['varnames'] --- list of variable names given in the order of reading the
+ Fortran code, useful for derived types.
+ *** Variable definition is a dictionary
+ D = B['vars'][<variable name>] =
+ {'typespec'[,'attrspec','kindselector','charselector','=','typename']}
+ D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
+ 'double precision' | 'integer' | 'logical' | 'real' | 'type'
+ D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
+ 'external','intent(in|out|inout|hide|c|callback|cache)',
+ 'optional','required', etc)
+ K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
+ 'complex' | 'integer' | 'logical' | 'real' )
+ C = D['charselector'] = {['*','len','kind']}
+ (only if D['typespec']=='character')
+ D['='] --- initialization expression string
+ D['typename'] --- name of the type if D['typespec']=='type'
+ D['dimension'] --- list of dimension bounds
+ D['intent'] --- list of intent specifications
+ D['depend'] --- list of variable names on which current variable depends on
+ D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
+ D['note'] --- list of LaTeX comments on the variable
+ *** Meaning of kind/char selectors (few examples):
+ D['typespec>']*K['*']
+ D['typespec'](kind=K['kind'])
+ character*C['*']
+ character(len=C['len'],kind=C['kind'])
+ (see also fortran type declaration statement formats below)
+
+ Fortran 90 type declaration statement format (F77 is subset of F90)
+====================================================================
+ (Main source: IBM XL Fortran 5.1 Language Reference Manual)
+ type declaration = <typespec> [[<attrspec>]::] <entitydecl>
+ <typespec> = byte |
+ character[<charselector>] |
+ complex[<kindselector>] |
+ double complex |
+ double precision |
+ integer[<kindselector>] |
+ logical[<kindselector>] |
+ real[<kindselector>] |
+ type(<typename>)
+ <charselector> = * <charlen> |
+ ([len=]<len>[,[kind=]<kind>]) |
+ (kind=<kind>[,len=<len>])
+ <kindselector> = * <intlen> |
+ ([kind=]<kind>)
+ <attrspec> = comma separated list of attributes.
+ Only the following attributes are used in
+ building up the interface:
+ external
+ (parameter --- affects '=' key)
+ optional
+ intent
+ Other attributes are ignored.
+ <intentspec> = in | out | inout
+ <arrayspec> = comma separated list of dimension bounds.
+ <entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
+ [/<init_expr>/ | =<init_expr>] [,<entitydecl>]
+
+ In addition, the following attributes are used: check,depend,note
+
+ TODO:
+ * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
+ -> 'real x(2)')
+ The above may be solved by creating appropriate preprocessor program, for example.
+"""
+#
+import sys,string,fileinput,re,pprint,os,copy
+from auxfuncs import *
+
+# Global flags:
+strictf77=1 # Ignore `!' comments unless line[0]=='!'
+sourcecodeform='fix' # 'fix','free'
+quiet=0 # Be verbose if 0 (Obsolete: not used any more)
+verbose=1 # Be quiet if 0, extra verbose if > 1.
+tabchar=4*' '
+pyffilename=''
+f77modulename=''
+skipemptyends=0 # for old F77 programs without 'program' statement
+ignorecontains=1
+dolowercase=1
+debug=[]
+## do_analyze = 1
+
+###### global variables
+
+## use reload(crackfortran) to reset these variables
+
+groupcounter=0
+grouplist={groupcounter:[]}
+neededmodule=-1
+expectbegin=1
+skipblocksuntil=-1
+usermodules=[]
+f90modulevars={}
+gotnextfile=1
+filepositiontext=''
+currentfilename=''
+skipfunctions=[]
+skipfuncs=[]
+onlyfuncs=[]
+include_paths=[]
+previous_context = None
+
+###### Some helper functions
+def show(o,f=0):pprint.pprint(o)
+errmess=sys.stderr.write
+def outmess(line,flag=1):
+ global filepositiontext
+ if not verbose: return
+ if not quiet:
+ if flag:sys.stdout.write(filepositiontext)
+ sys.stdout.write(line)
+re._MAXCACHE=50
+defaultimplicitrules={}
+for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c]={'typespec':'real'}
+for c in "ijklmn": defaultimplicitrules[c]={'typespec':'integer'}
+del c
+badnames={}
+invbadnames={}
+for n in ['int','double','float','char','short','long','void','case','while',
+ 'return','signed','unsigned','if','for','typedef','sizeof','union',
+ 'struct','static','register','new','break','do','goto','switch',
+ 'continue','else','inline','extern','delete','const','auto',
+ 'len','rank','shape','index','slen','size','_i',
+ 'flen','fshape',
+ 'string','complex_double','float_double','stdin','stderr','stdout',
+ 'type','default']:
+ badnames[n]=n+'_bn'
+ invbadnames[n+'_bn']=n
+def rmbadname1(name):
+ if badnames.has_key(name):
+ errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name,badnames[name]))
+ return badnames[name]
+ return name
+def rmbadname(names): return map(rmbadname1,names)
+
+def undo_rmbadname1(name):
+ if invbadnames.has_key(name):
+ errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'\
+ %(name,invbadnames[name]))
+ return invbadnames[name]
+ return name
+def undo_rmbadname(names): return map(undo_rmbadname1,names)
+
+def getextension(name):
+ i=string.rfind(name,'.')
+ if i==-1: return ''
+ if '\\' in name[i:]: return ''
+ if '/' in name[i:]: return ''
+ return name[i+1:]
+
+is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match
+_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search
+_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search
+_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search
+_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]',re.I).match
+def is_free_format(file):
+ """Check if file is in free format Fortran."""
+ # f90 allows both fixed and free format, assuming fixed unless
+ # signs of free format are detected.
+ result = 0
+ f = open(file,'r')
+ line = f.readline()
+ n = 15 # the number of non-comment lines to scan for hints
+ if _has_f_header(line):
+ n = 0
+ elif _has_f90_header(line):
+ n = 0
+ result = 1
+ while n>0 and line:
+ if line[0]!='!':
+ n -= 1
+ if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-2:-1]=='&':
+ result = 1
+ break
+ line = f.readline()
+ f.close()
+ return result
+
+
+####### Read fortran (77,90) code
+def readfortrancode(ffile,dowithline=show,istop=1):
+ """
+ Read fortran codes from files and
+ 1) Get rid of comments, line continuations, and empty lines; lower cases.
+ 2) Call dowithline(line) on every line.
+ 3) Recursively call itself when statement \"include '<filename>'\" is met.
+ """
+ global gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\
+ beginpattern,quiet,verbose,dolowercase,include_paths
+ if not istop:
+ saveglobals=gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\
+ beginpattern,quiet,verbose,dolowercase
+ if ffile==[]: return
+ localdolowercase = dolowercase
+ cont=0
+ finalline=''
+ ll=''
+ commentline=re.compile(r'(?P<line>([^"]*"[^"]*"[^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!]*))!{1}(?P<rest>.*)')
+ includeline=re.compile(r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")',re.I)
+ cont1=re.compile(r'(?P<line>.*)&\s*\Z')
+ cont2=re.compile(r'(\s*&|)(?P<line>.*)')
+ mline_mark = re.compile(r".*?'''")
+ if istop: dowithline('',-1)
+ ll,l1='',''
+ spacedigits=[' ']+map(str,range(10))
+ filepositiontext=''
+ fin=fileinput.FileInput(ffile)
+ while 1:
+ l=fin.readline()
+ if not l: break
+ if fin.isfirstline():
+ filepositiontext=''
+ currentfilename=fin.filename()
+ gotnextfile=1
+ l1=l
+ strictf77=0
+ sourcecodeform='fix'
+ ext = os.path.splitext(currentfilename)[1]
+ if is_f_file(currentfilename) and \
+ not (_has_f90_header(l) or _has_fix_header(l)):
+ strictf77=1
+ elif is_free_format(currentfilename) and not _has_fix_header(l):
+ sourcecodeform='free'
+ if strictf77: beginpattern=beginpattern77
+ else: beginpattern=beginpattern90
+ outmess('\tReading file %s (format:%s%s)\n'\
+ %(`currentfilename`,sourcecodeform,
+ strictf77 and ',strict' or ''))
+
+ l=string.expandtabs(l).replace('\xa0',' ')
+ while not l=='': # Get rid of newline characters
+ if l[-1] not in "\n\r\f": break
+ l=l[:-1]
+ if not strictf77:
+ r=commentline.match(l)
+ if r:
+ l=r.group('line')+' ' # Strip comments starting with `!'
+ rl=r.group('rest')
+ if string.lower(rl[:4])=='f2py': # f2py directive
+ l = l + 4*' '
+ r=commentline.match(rl[4:])
+ if r: l=l+r('line')
+ else: l = l + rl[4:]
+ if string.strip(l)=='': # Skip empty line
+ cont=0
+ continue
+ if sourcecodeform=='fix':
+ if l[0] in ['*','c','!','C','#']:
+ if string.lower(l[1:5])=='f2py': # f2py directive
+ l=' '+l[5:]
+ else: # Skip comment line
+ cont=0
+ continue
+ elif strictf77:
+ if len(l)>72: l=l[:72]
+ if not (l[0] in spacedigits):
+ raise 'readfortrancode: Found non-(space,digit) char in the first column.\n\tAre you sure that this code is in fix form?\n\tline=%s'%`l`
+
+ if (not cont or strictf77) and (len(l)>5 and not l[5]==' '):
+ # Continuation of a previous line
+ ll=ll+l[6:]
+ finalline=''
+ origfinalline=''
+ else:
+ if not strictf77:
+ # F90 continuation
+ r=cont1.match(l)
+ if r: l=r.group('line') # Continuation follows ..
+ if cont:
+ ll=ll+cont2.match(l).group('line')
+ finalline=''
+ origfinalline=''
+ else:
+ l=' '+l[5:] # clean up line beginning from possible digits.
+ if localdolowercase: finalline=string.lower(ll)
+ else: finalline=ll
+ origfinalline=ll
+ ll=l
+ cont=(r is not None)
+ else:
+ l=' '+l[5:] # clean up line beginning from possible digits.
+ if localdolowercase: finalline=string.lower(ll)
+ else: finalline=ll
+ origfinalline =ll
+ ll=l
+
+ elif sourcecodeform=='free':
+ if not cont and ext=='.pyf' and mline_mark.match(l):
+ l = l + '\n'
+ while 1:
+ lc = fin.readline()
+ if not lc:
+ errmess('Unexpected end of file when reading multiline\n')
+ break
+ l = l + lc
+ if mline_mark.match(lc):
+ break
+ l = l.rstrip()
+ r=cont1.match(l)
+ if r: l=r.group('line') # Continuation follows ..
+ if cont:
+ ll=ll+cont2.match(l).group('line')
+ finalline=''
+ origfinalline=''
+ else:
+ if localdolowercase: finalline=string.lower(ll)
+ else: finalline=ll
+ origfinalline =ll
+ ll=l
+ cont=(r is not None)
+ else:
+ raise ValueError,"Flag sourcecodeform must be either 'fix' or 'free': %s"%`sourcecodeform`
+ filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1)
+ m=includeline.match(origfinalline)
+ if m:
+ fn=m.group('name')
+ if os.path.isfile(fn):
+ readfortrancode(fn,dowithline=dowithline,istop=0)
+ else:
+ include_dirs = [os.path.dirname(currentfilename)] + include_paths
+ foundfile = 0
+ for inc_dir in include_dirs:
+ fn1 = os.path.join(inc_dir,fn)
+ if os.path.isfile(fn1):
+ foundfile = 1
+ readfortrancode(fn1,dowithline=dowithline,istop=0)
+ break
+ if not foundfile:
+ outmess('readfortrancode: could not find include file %s. Ignoring.\n'%(`fn`))
+ else:
+ dowithline(finalline)
+ l1=ll
+ if localdolowercase:
+ finalline=string.lower(ll)
+ else: finalline=ll
+ origfinalline = ll
+ filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1)
+ m=includeline.match(origfinalline)
+ if m:
+ fn=m.group('name')
+ fn1=os.path.join(os.path.dirname(currentfilename),fn)
+ if os.path.isfile(fn):
+ readfortrancode(fn,dowithline=dowithline,istop=0)
+ elif os.path.isfile(fn1):
+ readfortrancode(fn1,dowithline=dowithline,istop=0)
+ else:
+ outmess('readfortrancode: could not find include file %s. Ignoring.\n'%(`fn`))
+ else:
+ dowithline(finalline)
+ filepositiontext=''
+ fin.close()
+ if istop: dowithline('',1)
+ else:
+ gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\
+ beginpattern,quiet,verbose,dolowercase=saveglobals
+
+########### Crack line
+beforethisafter=r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))'+ \
+ r'\s*(?P<this>(\b(%s)\b))'+ \
+ r'\s*(?P<after>%s)\s*\Z'
+##
+fortrantypes='character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
+typespattern=re.compile(beforethisafter%('',fortrantypes,fortrantypes,'.*'),re.I),'type'
+typespattern4implicit=re.compile(beforethisafter%('',fortrantypes+'|static|automatic|undefined',fortrantypes+'|static|automatic|undefined','.*'),re.I)
+#
+functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)','function','function','.*'),re.I),'begin'
+subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?','subroutine','subroutine','.*'),re.I),'begin'
+#modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
+#
+groupbegins77=r'program|block\s*data'
+beginpattern77=re.compile(beforethisafter%('',groupbegins77,groupbegins77,'.*'),re.I),'begin'
+groupbegins90=groupbegins77+r'|module|python\s*module|interface|type(?!\s*\()'
+beginpattern90=re.compile(beforethisafter%('',groupbegins90,groupbegins90,'.*'),re.I),'begin'
+groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface'
+endpattern=re.compile(beforethisafter%('',groupends,groupends,'[\w\s]*'),re.I),'end'
+#endifs='end\s*(if|do|where|select|while|forall)'
+endifs='(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
+endifpattern=re.compile(beforethisafter%('[\w]*?',endifs,endifs,'[\w\s]*'),re.I),'endif'
+#
+implicitpattern=re.compile(beforethisafter%('','implicit','implicit','.*'),re.I),'implicit'
+dimensionpattern=re.compile(beforethisafter%('','dimension|virtual','dimension|virtual','.*'),re.I),'dimension'
+externalpattern=re.compile(beforethisafter%('','external','external','.*'),re.I),'external'
+optionalpattern=re.compile(beforethisafter%('','optional','optional','.*'),re.I),'optional'
+requiredpattern=re.compile(beforethisafter%('','required','required','.*'),re.I),'required'
+publicpattern=re.compile(beforethisafter%('','public','public','.*'),re.I),'public'
+privatepattern=re.compile(beforethisafter%('','private','private','.*'),re.I),'private'
+intrisicpattern=re.compile(beforethisafter%('','intrisic','intrisic','.*'),re.I),'intrisic'
+intentpattern=re.compile(beforethisafter%('','intent|depend|note|check','intent|depend|note|check','\s*\(.*?\).*'),re.I),'intent'
+parameterpattern=re.compile(beforethisafter%('','parameter','parameter','\s*\(.*'),re.I),'parameter'
+datapattern=re.compile(beforethisafter%('','data','data','.*'),re.I),'data'
+callpattern=re.compile(beforethisafter%('','call','call','.*'),re.I),'call'
+entrypattern=re.compile(beforethisafter%('','entry','entry','.*'),re.I),'entry'
+callfunpattern=re.compile(beforethisafter%('','callfun','callfun','.*'),re.I),'callfun'
+commonpattern=re.compile(beforethisafter%('','common','common','.*'),re.I),'common'
+usepattern=re.compile(beforethisafter%('','use','use','.*'),re.I),'use'
+containspattern=re.compile(beforethisafter%('','contains','contains',''),re.I),'contains'
+formatpattern=re.compile(beforethisafter%('','format','format','.*'),re.I),'format'
+## Non-fortran and f2py-specific statements
+f2pyenhancementspattern=re.compile(beforethisafter%('','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','.*'),re.I|re.S),'f2pyenhancements'
+multilinepattern = re.compile(r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z",re.S),'multiline'
+##
+
+def _simplifyargs(argsline):
+ a = []
+ for n in string.split(markoutercomma(argsline),'@,@'):
+ for r in '(),':
+ n = string.replace(n,r,'_')
+ a.append(n)
+ return string.join(a,',')
+
+crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*',re.I)
+def crackline(line,reset=0):
+ """
+ reset=-1 --- initialize
+ reset=0 --- crack the line
+ reset=1 --- final check if mismatch of blocks occured
+
+ Cracked data is saved in grouplist[0].
+ """
+ global beginpattern,groupcounter,groupname,groupcache,grouplist,gotnextfile,\
+ filepositiontext,currentfilename,neededmodule,expectbegin,skipblocksuntil,\
+ skipemptyends,previous_context
+ if ';' in line and not (f2pyenhancementspattern[0].match(line) or
+ multilinepattern[0].match(line)):
+ for l in line.split(';'):
+ assert reset==0,`reset` # XXX: non-zero reset values need testing
+ crackline(l,reset)
+ return
+ if reset<0:
+ groupcounter=0
+ groupname={groupcounter:''}
+ groupcache={groupcounter:{}}
+ grouplist={groupcounter:[]}
+ groupcache[groupcounter]['body']=[]
+ groupcache[groupcounter]['vars']={}
+ groupcache[groupcounter]['block']=''
+ groupcache[groupcounter]['name']=''
+ neededmodule=-1
+ skipblocksuntil=-1
+ return
+ if reset>0:
+ fl=0
+ if f77modulename and neededmodule==groupcounter: fl=2
+ while groupcounter>fl:
+ outmess('crackline: groupcounter=%s groupname=%s\n'%(`groupcounter`,`groupname`))
+ outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
+ grouplist[groupcounter-1].append(groupcache[groupcounter])
+ grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter=groupcounter-1
+ if f77modulename and neededmodule==groupcounter:
+ grouplist[groupcounter-1].append(groupcache[groupcounter])
+ grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter=groupcounter-1 # end interface
+ grouplist[groupcounter-1].append(groupcache[groupcounter])
+ grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter=groupcounter-1 # end module
+ neededmodule=-1
+ return
+ if line=='': return
+ flag=0
+ for pat in [dimensionpattern,externalpattern,intentpattern,optionalpattern,
+ requiredpattern,
+ parameterpattern,datapattern,publicpattern,privatepattern,
+ intrisicpattern,
+ endifpattern,endpattern,
+ formatpattern,
+ beginpattern,functionpattern,subroutinepattern,
+ implicitpattern,typespattern,commonpattern,
+ callpattern,usepattern,containspattern,
+ entrypattern,
+ f2pyenhancementspattern,
+ multilinepattern
+ ]:
+ m = pat[0].match(line)
+ if m:
+ break
+ flag=flag+1
+ if not m:
+ re_1 = crackline_re_1
+ if 0<=skipblocksuntil<=groupcounter:return
+ if groupcache[groupcounter].has_key('externals'):
+ for name in groupcache[groupcounter]['externals']:
+ if invbadnames.has_key(name):
+ name=invbadnames[name]
+ if groupcache[groupcounter].has_key('interfaced') and name in groupcache[groupcounter]['interfaced']: continue
+ m1=re.match(r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z'%name,markouterparen(line),re.I)
+ if m1:
+ m2 = re_1.match(m1.group('before'))
+ a = _simplifyargs(m1.group('args'))
+ if m2:
+ line='callfun %s(%s) result (%s)'%(name,a,m2.group('result'))
+ else: line='callfun %s(%s)'%(name,a)
+ m = callfunpattern[0].match(line)
+ if not m:
+ outmess('crackline: could not resolve function call for line=%s.\n'%`line`)
+ return
+ analyzeline(m,'callfun',line)
+ return
+ if verbose>1:
+ previous_context = None
+ outmess('crackline:%d: No pattern for line\n'%(groupcounter))
+ return
+ elif pat[1]=='end':
+ if 0<=skipblocksuntil<groupcounter:
+ groupcounter=groupcounter-1
+ if skipblocksuntil<=groupcounter: return
+ if groupcounter<=0:
+ raise 'crackline: groupcounter(=%s) is nonpositive. Check the blocks.'\
+ % (groupcounter)
+ m1 = beginpattern[0].match((line))
+ if (m1) and (not m1.group('this')==groupname[groupcounter]):
+ raise 'crackline: End group %s does not match with previous Begin group %s\n\t%s'%(`m1.group('this')`,`groupname[groupcounter]`,filepositiontext)
+ if skipblocksuntil==groupcounter:
+ skipblocksuntil=-1
+ grouplist[groupcounter-1].append(groupcache[groupcounter])
+ grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter=groupcounter-1
+ if not skipemptyends:
+ expectbegin=1
+ elif pat[1] == 'begin':
+ if 0<=skipblocksuntil<=groupcounter:
+ groupcounter=groupcounter+1
+ return
+ gotnextfile=0
+ analyzeline(m,pat[1],line)
+ expectbegin=0
+ elif pat[1]=='endif':
+ pass
+ elif pat[1]=='contains':
+ if ignorecontains: return
+ if 0<=skipblocksuntil<=groupcounter: return
+ skipblocksuntil=groupcounter
+ else:
+ if 0<=skipblocksuntil<=groupcounter:return
+ analyzeline(m,pat[1],line)
+
+def markouterparen(line):
+ l='';f=0
+ for c in line:
+ if c=='(':
+ f=f+1
+ if f==1: l=l+'@(@'; continue
+ elif c==')':
+ f=f-1
+ if f==0: l=l+'@)@'; continue
+ l=l+c
+ return l
+def markoutercomma(line,comma=','):
+ l='';f=0
+ cc=''
+ for c in line:
+ if (not cc or cc==')') and c=='(':
+ f=f+1
+ cc = ')'
+ elif not cc and c=='\'' and (not l or l[-1]!='\\'):
+ f=f+1
+ cc = '\''
+ elif c==cc:
+ f=f-1
+ if f==0:
+ cc=''
+ elif c==comma and f==0:
+ l=l+'@'+comma+'@'
+ continue
+ l=l+c
+ assert not f,`f,line,l,cc`
+ return l
+def unmarkouterparen(line):
+ r = string.replace(string.replace(line,'@(@','('),'@)@',')')
+ return r
+def appenddecl(decl,decl2,force=1):
+ if not decl: decl={}
+ if not decl2: return decl
+ if decl is decl2: return decl
+ for k in decl2.keys():
+ if k=='typespec':
+ if force or not decl.has_key(k): decl[k]=decl2[k]
+ elif k=='attrspec':
+ for l in decl2[k]:
+ decl=setattrspec(decl,l,force)
+ elif k=='kindselector':
+ decl=setkindselector(decl,decl2[k],force)
+ elif k=='charselector':
+ decl=setcharselector(decl,decl2[k],force)
+ elif k in ['=','typename']:
+ if force or not decl.has_key(k): decl[k]=decl2[k]
+ elif k=='note':
+ pass
+ elif k in ['intent','check','dimension','optional','required']:
+ errmess('appenddecl: "%s" not implemented.\n'%k)
+ else:
+ raise 'appenddecl: Unknown variable definition key:', k
+ return decl
+
+selectpattern=re.compile(r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z',re.I)
+nameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*(result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))*\s*\Z',re.I)
+callnameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z',re.I)
+real16pattern = re.compile(r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
+real8pattern = re.compile(r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
+
+_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b',re.I)
+def _is_intent_callback(vdecl):
+ for a in vdecl.get('attrspec',[]):
+ if _intentcallbackpattern.match(a):
+ return 1
+ return 0
+
+def _resolvenameargspattern(line):
+ line = markouterparen(line)
+ m1=nameargspattern.match(line)
+ if m1: return m1.group('name'),m1.group('args'),m1.group('result')
+ m1=callnameargspattern.match(line)
+ if m1: return m1.group('name'),m1.group('args'),None
+ return None,[],None
+
+def analyzeline(m,case,line):
+ global groupcounter,groupname,groupcache,grouplist,filepositiontext,\
+ currentfilename,f77modulename,neededinterface,neededmodule,expectbegin,\
+ gotnextfile,previous_context
+ block=m.group('this')
+ if case != 'multiline':
+ previous_context = None
+ if expectbegin and case not in ['begin','call','callfun','type'] \
+ and not skipemptyends and groupcounter<1:
+ newname=string.split(os.path.basename(currentfilename),'.')[0]
+ outmess('analyzeline: no group yet. Creating program group with name "%s".\n'%newname)
+ gotnextfile=0
+ groupcounter=groupcounter+1
+ groupname[groupcounter]='program'
+ groupcache[groupcounter]={}
+ grouplist[groupcounter]=[]
+ groupcache[groupcounter]['body']=[]
+ groupcache[groupcounter]['vars']={}
+ groupcache[groupcounter]['block']='program'
+ groupcache[groupcounter]['name']=newname
+ groupcache[groupcounter]['from']='fromsky'
+ expectbegin=0
+ if case in ['begin','call','callfun']:
+ # Crack line => block,name,args,result
+ block = block.lower()
+ if re.match(r'block\s*data',block,re.I): block='block data'
+ if re.match(r'python\s*module',block,re.I): block='python module'
+ name,args,result = _resolvenameargspattern(m.group('after'))
+ if name is None:
+ if block=='block data':
+ name = '_BLOCK_DATA_'
+ else:
+ name = ''
+ if block not in ['interface','block data']:
+ outmess('analyzeline: No name/args pattern found for line.\n')
+
+ previous_context = (block,name,groupcounter)
+ if args: args=rmbadname(map(string.strip,string.split(markoutercomma(args),'@,@')))
+ else: args=[]
+ if '' in args:
+ while '' in args:
+ args.remove('')
+ outmess('analyzeline: argument list is malformed (missing argument).\n')
+
+ # end of crack line => block,name,args,result
+ needmodule=0
+ needinterface=0
+
+ if case in ['call','callfun']:
+ needinterface=1
+ if not groupcache[groupcounter].has_key('args'): return
+ if name not in groupcache[groupcounter]['args']:
+ return
+ for it in grouplist[groupcounter]:
+ if it['name']==name: return
+ if name in groupcache[groupcounter]['interfaced']: return
+ block={'call':'subroutine','callfun':'function'}[case]
+ if f77modulename and neededmodule==-1 and groupcounter<=1:
+ neededmodule=groupcounter+2
+ needmodule=1
+ needinterface=1
+ # Create new block(s)
+ groupcounter=groupcounter+1
+ groupcache[groupcounter]={}
+ grouplist[groupcounter]=[]
+ if needmodule:
+ if verbose>1:
+ outmess('analyzeline: Creating module block %s\n'%`f77modulename`,0)
+ groupname[groupcounter]='module'
+ groupcache[groupcounter]['block']='python module'
+ groupcache[groupcounter]['name']=f77modulename
+ groupcache[groupcounter]['from']=''
+ groupcache[groupcounter]['body']=[]
+ groupcache[groupcounter]['externals']=[]
+ groupcache[groupcounter]['interfaced']=[]
+ groupcache[groupcounter]['vars']={}
+ groupcounter=groupcounter+1
+ groupcache[groupcounter]={}
+ grouplist[groupcounter]=[]
+ if needinterface:
+ if verbose>1:
+ outmess('analyzeline: Creating additional interface block.\n',0)
+ groupname[groupcounter]='interface'
+ groupcache[groupcounter]['block']='interface'
+ groupcache[groupcounter]['name']='unknown_interface'
+ groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name'])
+ groupcache[groupcounter]['body']=[]
+ groupcache[groupcounter]['externals']=[]
+ groupcache[groupcounter]['interfaced']=[]
+ groupcache[groupcounter]['vars']={}
+ groupcounter=groupcounter+1
+ groupcache[groupcounter]={}
+ grouplist[groupcounter]=[]
+ groupname[groupcounter]=block
+ groupcache[groupcounter]['block']=block
+ if not name: name='unknown_'+block
+ groupcache[groupcounter]['prefix']=m.group('before')
+ groupcache[groupcounter]['name']=rmbadname1(name)
+ groupcache[groupcounter]['result']=result
+ if groupcounter==1:
+ groupcache[groupcounter]['from']=currentfilename
+ else:
+ if f77modulename and groupcounter==3:
+ groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],currentfilename)
+ else:
+ groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name'])
+ for k in groupcache[groupcounter].keys():
+ if not groupcache[groupcounter][k]: del groupcache[groupcounter][k]
+ groupcache[groupcounter]['args']=args
+ groupcache[groupcounter]['body']=[]
+ groupcache[groupcounter]['externals']=[]
+ groupcache[groupcounter]['interfaced']=[]
+ groupcache[groupcounter]['vars']={}
+ groupcache[groupcounter]['entry']={}
+ # end of creation
+ if block=='type':
+ groupcache[groupcounter]['varnames'] = []
+
+ if case in ['call','callfun']: # set parents variables
+ if name not in groupcache[groupcounter-2]['externals']:
+ groupcache[groupcounter-2]['externals'].append(name)
+ groupcache[groupcounter]['vars']=copy.deepcopy(groupcache[groupcounter-2]['vars'])
+ #try: del groupcache[groupcounter]['vars'][groupcache[groupcounter-2]['name']]
+ #except: pass
+ try: del groupcache[groupcounter]['vars'][name][groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
+ except: pass
+ if block in ['function','subroutine']: # set global attributes
+ try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter-2]['vars'][''])
+ except: pass
+ if case=='callfun': # return type
+ if result and groupcache[groupcounter]['vars'].has_key(result):
+ if not name==result:
+ groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter]['vars'][result])
+ #if groupcounter>1: # name is interfaced
+ try: groupcache[groupcounter-2]['interfaced'].append(name)
+ except: pass
+ if block=='function':
+ t=typespattern[0].match(m.group('before')+' '+name)
+ if t:
+ typespec,selector,attr,edecl=cracktypespec0(t.group('this'),t.group('after'))
+ updatevars(typespec,selector,attr,edecl)
+ if case in ['call','callfun']:
+ grouplist[groupcounter-1].append(groupcache[groupcounter])
+ grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter=groupcounter-1 # end routine
+ grouplist[groupcounter-1].append(groupcache[groupcounter])
+ grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter=groupcounter-1 # end interface
+ elif case=='entry':
+ name,args,result=_resolvenameargspattern(m.group('after'))
+ if name is not None:
+ if args:
+ args=rmbadname(map(string.strip,string.split(markoutercomma(args),'@,@')))
+ else: args=[]
+ assert result is None,`result`
+ groupcache[groupcounter]['entry'][name] = args
+ previous_context = ('entry',name,groupcounter)
+ elif case=='type':
+ typespec,selector,attr,edecl=cracktypespec0(block,m.group('after'))
+ last_name = updatevars(typespec,selector,attr,edecl)
+ if last_name is not None:
+ previous_context = ('variable',last_name,groupcounter)
+ elif case in ['dimension','intent','optional','required','external','public','private','intrisic']:
+ edecl=groupcache[groupcounter]['vars']
+ ll=m.group('after')
+ i=string.find(ll,'::')
+ if i<0 and case=='intent':
+ i=string.find(markouterparen(ll),'@)@')-2
+ ll=ll[:i+1]+'::'+ll[i+1:]
+ i=string.find(ll,'::')
+ if ll[i:]=='::' and groupcache[groupcounter].has_key('args'):
+ outmess('All arguments will have attribute %s%s\n'%(m.group('this'),ll[:i]))
+ ll = ll + string.join(groupcache[groupcounter]['args'],',')
+ if i<0:i=0;pl=''
+ else: pl=string.strip(ll[:i]);ll=ll[i+2:]
+ ch = string.split(markoutercomma(pl),'@,@')
+ if len(ch)>1:
+ pl = ch[0]
+ outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (','.join(ch[1:])))
+ last_name = None
+ for e in map(string.strip,string.split(markoutercomma(ll),'@,@')):
+ m1=namepattern.match(e)
+ if not m1:
+ if case in ['public','private']: k=''
+ else:
+ print m.groupdict()
+ outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case,`e`))
+ continue
+ else:
+ k=rmbadname1(m1.group('name'))
+ if not edecl.has_key(k): edecl[k]={}
+ if case=='dimension': ap=case+m1.group('after')
+ if case=='intent':
+ ap=m.group('this')+pl
+ if _intentcallbackpattern.match(ap):
+ if k not in groupcache[groupcounter]['args']:
+ if groupcounter>1 and \
+ string.find(groupcache[groupcounter-2]['name'],
+ '__user__')==-1:
+ outmess('analyzeline: appending intent(callback) %s'\
+ ' to %s arguments\n' % (k,groupcache[groupcounter]['name']))
+ groupcache[groupcounter]['args'].append(k)
+ else:
+ errmess('analyzeline: intent(callback) %s is already'\
+ ' in argument list' % (k))
+ if case in ['optional','required','public','external','private','intrisic']: ap=case
+ if edecl[k].has_key('attrspec'): edecl[k]['attrspec'].append(ap)
+ else: edecl[k]['attrspec']=[ap]
+ if case=='external':
+ if groupcache[groupcounter]['block']=='program':
+ outmess('analyzeline: ignoring program arguments\n')
+ continue
+ if k not in groupcache[groupcounter]['args']:
+ #outmess('analyzeline: ignoring external %s (not in arguments list)\n'%(`k`))
+ continue
+ if not groupcache[groupcounter].has_key('externals'):
+ groupcache[groupcounter]['externals']=[]
+ groupcache[groupcounter]['externals'].append(k)
+ last_name = k
+ groupcache[groupcounter]['vars']=edecl
+ if last_name is not None:
+ previous_context = ('variable',last_name,groupcounter)
+ elif case=='parameter':
+ edecl=groupcache[groupcounter]['vars']
+ ll=string.strip(m.group('after'))[1:-1]
+ last_name = None
+ for e in string.split(markoutercomma(ll),'@,@'):
+ try:
+ k,initexpr=map(string.strip,string.split(e,'='))
+ except:
+ outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e,ll));continue
+ params = get_parameters(edecl)
+ k=rmbadname1(k)
+ if not edecl.has_key(k): edecl[k]={}
+ if edecl[k].has_key('=') and (not edecl[k]['=']==initexpr):
+ outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k,edecl[k]['='],initexpr))
+ t = determineexprtype(initexpr,params)
+ if t:
+ if t.get('typespec')=='real':
+ tt = list(initexpr)
+ for m in real16pattern.finditer(initexpr):
+ tt[m.start():m.end()] = list(\
+ initexpr[m.start():m.end()].lower().replace('d', 'e'))
+ initexpr = "".join(tt)
+ elif t.get('typespec')=='complex':
+ initexpr = initexpr[1:].lower().replace('d','e').\
+ replace(',','+1j*(')
+ try:
+ v = eval(initexpr,{},params)
+ except (SyntaxError,NameError),msg:
+ errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\
+ % (initexpr, msg))
+ continue
+ edecl[k]['='] = repr(v)
+ if edecl[k].has_key('attrspec'):
+ edecl[k]['attrspec'].append('parameter')
+ else: edecl[k]['attrspec']=['parameter']
+ last_name = k
+ groupcache[groupcounter]['vars']=edecl
+ if last_name is not None:
+ previous_context = ('variable',last_name,groupcounter)
+ elif case=='implicit':
+ if string.lower(string.strip(m.group('after')))=='none':
+ groupcache[groupcounter]['implicit']=None
+ elif m.group('after'):
+ if groupcache[groupcounter].has_key('implicit'):
+ impl=groupcache[groupcounter]['implicit']
+ else: impl={}
+ if impl is None:
+ outmess('analyzeline: Overwriting earlier "implicit none" statement.\n')
+ impl={}
+ for e in string.split(markoutercomma(m.group('after')),'@,@'):
+ decl={}
+ m1=re.match(r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z',e,re.I)
+ if not m1:
+ outmess('analyzeline: could not extract info of implicit statement part "%s"\n'%(e));continue
+ m2=typespattern4implicit.match(m1.group('this'))
+ if not m2:
+ outmess('analyzeline: could not extract types pattern of implicit statement part "%s"\n'%(e));continue
+ typespec,selector,attr,edecl=cracktypespec0(m2.group('this'),m2.group('after'))
+ kindselect,charselect,typename=cracktypespec(typespec,selector)
+ decl['typespec']=typespec
+ decl['kindselector']=kindselect
+ decl['charselector']=charselect
+ decl['typename']=typename
+ for k in decl.keys():
+ if not decl[k]: del decl[k]
+ for r in string.split(markoutercomma(m1.group('after')),'@,@'):
+ if '-' in r:
+ try: begc,endc=map(string.strip,string.split(r,'-'))
+ except:
+ outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n'%r);continue
+ else: begc=endc=string.strip(r)
+ if not len(begc)==len(endc)==1:
+ outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n'%r);continue
+ for o in range(ord(begc),ord(endc)+1):
+ impl[chr(o)]=decl
+ groupcache[groupcounter]['implicit']=impl
+ elif case=='data':
+ ll=[]
+ dl='';il='';f=0;fc=1
+ for c in m.group('after'):
+ if c=="'": fc=not fc
+ if c=='/' and fc: f=f+1;continue
+ if f==0: dl=dl+c
+ elif f==1: il=il+c
+ elif f==2:
+ dl = dl.strip()
+ if dl.startswith(','):
+ dl = dl[1:].strip()
+ ll.append([dl,il])
+ dl=c;il='';f=0
+ if f==2:
+ dl = dl.strip()
+ if dl.startswith(','):
+ dl = dl[1:].strip()
+ ll.append([dl,il])
+ vars={}
+ if groupcache[groupcounter].has_key('vars'):
+ vars=groupcache[groupcounter]['vars']
+ last_name = None
+ for l in ll:
+ l=map(string.strip,l)
+ if l[0][0]==',':l[0]=l[0][1:]
+ if l[0][0]=='(':
+ outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%l[0])
+ continue
+ #if '(' in l[0]:
+ # #outmess('analyzeline: ignoring this data statement.\n')
+ # continue
+ i=0;j=0;llen=len(l[1])
+ for v in rmbadname(map(string.strip,string.split(markoutercomma(l[0]),'@,@'))):
+ fc=0
+ while (i<llen) and (fc or not l[1][i]==','):
+ if l[1][i]=="'": fc=not fc
+ i=i+1
+ i=i+1
+ #v,l[1][j:i-1]=name,initvalue
+ if not vars.has_key(v):
+ vars[v]={}
+ if vars[v].has_key('=') and not vars[v]['=']==l[1][j:i-1]:
+ outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n'%(v,vars[v]['='],l[1][j:i-1]))
+ vars[v]['=']=l[1][j:i-1]
+ j=i
+ last_name = v
+ groupcache[groupcounter]['vars']=vars
+ if last_name is not None:
+ previous_context = ('variable',last_name,groupcounter)
+ elif case=='common':
+ line=string.strip(m.group('after'))
+ if not line[0]=='/':line='//'+line
+ cl=[]
+ f=0;bn='';ol=''
+ for c in line:
+ if c=='/':f=f+1;continue
+ if f>=3:
+ bn = string.strip(bn)
+ if not bn: bn='_BLNK_'
+ cl.append([bn,ol])
+ f=f-2;bn='';ol=''
+ if f%2: bn=bn+c
+ else: ol=ol+c
+ bn = string.strip(bn)
+ if not bn: bn='_BLNK_'
+ cl.append([bn,ol])
+ commonkey={}
+ if groupcache[groupcounter].has_key('common'):
+ commonkey=groupcache[groupcounter]['common']
+ for c in cl:
+ if commonkey.has_key(c[0]):
+ outmess('analyzeline: previously defined common block encountered. Skipping.\n')
+ continue
+ commonkey[c[0]]=[]
+ for i in map(string.strip,string.split(markoutercomma(c[1]),'@,@')):
+ if i: commonkey[c[0]].append(i)
+ groupcache[groupcounter]['common']=commonkey
+ previous_context = ('common',bn,groupcounter)
+ elif case=='use':
+ m1=re.match(r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z',m.group('after'),re.I)
+ if m1:
+ mm=m1.groupdict()
+ if not groupcache[groupcounter].has_key('use'): groupcache[groupcounter]['use']={}
+ name=m1.group('name')
+ groupcache[groupcounter]['use'][name]={}
+ isonly=0
+ if mm.has_key('list') and mm['list'] is not None:
+ if mm.has_key('notonly') and mm['notonly'] is None:isonly=1
+ groupcache[groupcounter]['use'][name]['only']=isonly
+ ll=map(string.strip,string.split(mm['list'],','))
+ rl={}
+ for l in ll:
+ if '=' in l:
+ m2=re.match(r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z',l,re.I)
+ if m2: rl[string.strip(m2.group('local'))]=string.strip(m2.group('use'))
+ else:
+ outmess('analyzeline: Not local=>use pattern found in %s\n'%`l`)
+ else:
+ rl[l]=l
+ groupcache[groupcounter]['use'][name]['map']=rl
+ else:
+ pass
+
+ else:
+ print m.groupdict()
+ outmess('analyzeline: Could not crack the use statement.\n')
+ elif case in ['f2pyenhancements']:
+ if not groupcache[groupcounter].has_key ('f2pyenhancements'):
+ groupcache[groupcounter]['f2pyenhancements'] = {}
+ d = groupcache[groupcounter]['f2pyenhancements']
+ if m.group('this')=='usercode' and d.has_key('usercode'):
+ if type(d['usercode']) is type(''):
+ d['usercode'] = [d['usercode']]
+ d['usercode'].append(m.group('after'))
+ else:
+ d[m.group('this')] = m.group('after')
+ elif case=='multiline':
+ if previous_context is None:
+ if verbose:
+ outmess('analyzeline: No context for multiline block.\n')
+ return
+ gc = groupcounter
+ #gc = previous_context[2]
+ appendmultiline(groupcache[gc],
+ previous_context[:2],
+ m.group('this'))
+ else:
+ if verbose>1:
+ print m.groupdict()
+ outmess('analyzeline: No code implemented for line.\n')
+
+def appendmultiline(group, context_name,ml):
+ if not group.has_key('f2pymultilines'):
+ group['f2pymultilines'] = {}
+ d = group['f2pymultilines']
+ if not d.has_key(context_name):
+ d[context_name] = []
+ d[context_name].append(ml)
+ return
+
+def cracktypespec0(typespec,ll):
+ selector=None
+ attr=None
+ if re.match(r'double\s*complex',typespec,re.I): typespec='double complex'
+ elif re.match(r'double\s*precision',typespec,re.I): typespec='double precision'
+ else: typespec=string.lower(string.strip(typespec))
+ m1=selectpattern.match(markouterparen(ll))
+ if not m1:
+ outmess('cracktypespec0: no kind/char_selector pattern found for line.\n')
+ return
+ d=m1.groupdict()
+ for k in d.keys(): d[k]=unmarkouterparen(d[k])
+ if typespec in ['complex','integer','logical','real','character','type']:
+ selector=d['this']
+ ll=d['after']
+ i=string.find(ll,'::')
+ if i>=0:
+ attr=string.strip(ll[:i])
+ ll=ll[i+2:]
+ return typespec,selector,attr,ll
+#####
+namepattern=re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z',re.I)
+kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z',re.I)
+charselector=re.compile(r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z',re.I)
+lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z',re.I)
+lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z',re.I)
+def removespaces(expr):
+ expr=string.strip(expr)
+ if len(expr)<=1: return expr
+ expr2=expr[0]
+ for i in range(1,len(expr)-1):
+ if expr[i]==' ' and \
+ ((expr[i+1] in "()[]{}= ") or (expr[i-1] in "()[]{}= ")): continue
+ expr2=expr2+expr[i]
+ expr2=expr2+expr[-1]
+ return expr2
+def markinnerspaces(line):
+ l='';f=0
+ cc='\''
+ cc1='"'
+ cb=''
+ for c in line:
+ if cb=='\\' and c in ['\\','\'','"']:
+ l=l+c;
+ cb=c
+ continue
+ if f==0 and c in ['\'','"']: cc=c; cc1={'\'':'"','"':'\''}[c]
+ if c==cc:f=f+1
+ elif c==cc:f=f-1
+ elif c==' ' and f==1: l=l+'@_@'; continue
+ l=l+c;cb=c
+ return l
+def updatevars(typespec,selector,attrspec,entitydecl):
+ global groupcache,groupcounter
+ last_name = None
+ kindselect,charselect,typename=cracktypespec(typespec,selector)
+ if attrspec:
+ attrspec=map(string.strip,string.split(markoutercomma(attrspec),'@,@'))
+ l = []
+ c = re.compile(r'(?P<start>[a-zA-Z]+)')
+ for a in attrspec:
+ m = c.match(a)
+ if m:
+ s = string.lower(m.group('start'))
+ a = s + a[len(s):]
+ l.append(a)
+ attrspec = l
+ el=map(string.strip,string.split(markoutercomma(entitydecl),'@,@'))
+ el1=[]
+ for e in el:
+ for e1 in map(string.strip,string.split(markoutercomma(removespaces(markinnerspaces(e)),comma=' '),'@ @')):
+ if e1: el1.append(string.replace(e1,'@_@',' '))
+ for e in el1:
+ m=namepattern.match(e)
+ if not m:
+ outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(`e`))
+ continue
+ ename=rmbadname1(m.group('name'))
+ edecl={}
+ if groupcache[groupcounter]['vars'].has_key(ename):
+ edecl=groupcache[groupcounter]['vars'][ename].copy()
+ has_typespec = edecl.has_key('typespec')
+ if not has_typespec:
+ edecl['typespec']=typespec
+ elif typespec and (not typespec==edecl['typespec']):
+ outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typespec'],typespec))
+ if not edecl.has_key('kindselector'):
+ edecl['kindselector']=copy.copy(kindselect)
+ elif kindselect:
+ for k in kindselect.keys():
+ if edecl['kindselector'].has_key(k) and (not kindselect[k]==edecl['kindselector'][k]):
+ outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['kindselector'][k],kindselect[k]))
+ else: edecl['kindselector'][k]=copy.copy(kindselect[k])
+ if not edecl.has_key('charselector') and charselect:
+ if not has_typespec:
+ edecl['charselector']=charselect
+ else:
+ errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \
+ %(ename,charselect))
+ elif charselect:
+ for k in charselect.keys():
+ if edecl['charselector'].has_key(k) and (not charselect[k]==edecl['charselector'][k]):
+ outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['charselector'][k],charselect[k]))
+ else: edecl['charselector'][k]=copy.copy(charselect[k])
+ if not edecl.has_key('typename'):
+ edecl['typename']=typename
+ elif typename and (not edecl['typename']==typename):
+ outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typename'],typename))
+ if not edecl.has_key('attrspec'):
+ edecl['attrspec']=copy.copy(attrspec)
+ elif attrspec:
+ for a in attrspec:
+ if a not in edecl['attrspec']:
+ edecl['attrspec'].append(a)
+ else:
+ edecl['typespec']=copy.copy(typespec)
+ edecl['kindselector']=copy.copy(kindselect)
+ edecl['charselector']=copy.copy(charselect)
+ edecl['typename']=typename
+ edecl['attrspec']=copy.copy(attrspec)
+ if m.group('after'):
+ m1=lenarraypattern.match(markouterparen(m.group('after')))
+ if m1:
+ d1=m1.groupdict()
+ for lk in ['len','array','init']:
+ if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2']
+ for k in d1.keys():
+ if d1[k] is not None: d1[k]=unmarkouterparen(d1[k])
+ else: del d1[k]
+ if d1.has_key('len') and d1.has_key('array'):
+ if d1['len']=='':
+ d1['len']=d1['array']
+ del d1['array']
+ else:
+ d1['array']=d1['array']+','+d1['len']
+ del d1['len']
+ errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec,e,typespec,ename,d1['array']))
+ if d1.has_key('array'):
+ dm = 'dimension(%s)'%d1['array']
+ if not edecl.has_key('attrspec') or (not edecl['attrspec']):
+ edecl['attrspec']=[dm]
+ else:
+ edecl['attrspec'].append(dm)
+ for dm1 in edecl['attrspec']:
+ if dm1[:9]=='dimension' and dm1!=dm:
+ del edecl['attrspec'][-1]
+ errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' \
+ % (ename,dm1,dm))
+ break
+
+ if d1.has_key('len'):
+ if typespec in ['complex','integer','logical','real']:
+ if (not edecl.has_key('kindselector')) or (not edecl['kindselector']):
+ edecl['kindselector']={}
+ edecl['kindselector']['*']=d1['len']
+ elif typespec == 'character':
+ if (not edecl.has_key('charselector')) or (not edecl['charselector']): edecl['charselector']={}
+ if edecl['charselector'].has_key('len'): del edecl['charselector']['len']
+ edecl['charselector']['*']=d1['len']
+ if d1.has_key('init'):
+ if edecl.has_key('=') and (not edecl['=']==d1['init']):
+ outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['='],d1['init']))
+ else:
+ edecl['=']=d1['init']
+ else:
+ outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n'%(ename+m.group('after')))
+ for k in edecl.keys():
+ if not edecl[k]: del edecl[k]
+ groupcache[groupcounter]['vars'][ename]=edecl
+ if groupcache[groupcounter].has_key('varnames'):
+ groupcache[groupcounter]['varnames'].append(ename)
+ last_name = ename
+ return last_name
+
+def cracktypespec(typespec,selector):
+ kindselect=None
+ charselect=None
+ typename=None
+ if selector:
+ if typespec in ['complex','integer','logical','real']:
+ kindselect=kindselector.match(selector)
+ if not kindselect:
+ outmess('cracktypespec: no kindselector pattern found for %s\n'%(`selector`))
+ return
+ kindselect=kindselect.groupdict()
+ kindselect['*']=kindselect['kind2']
+ del kindselect['kind2']
+ for k in kindselect.keys():
+ if not kindselect[k]: del kindselect[k]
+ for k,i in kindselect.items():
+ kindselect[k] = rmbadname1(i)
+ elif typespec=='character':
+ charselect=charselector.match(selector)
+ if not charselect:
+ outmess('cracktypespec: no charselector pattern found for %s\n'%(`selector`))
+ return
+ charselect=charselect.groupdict()
+ charselect['*']=charselect['charlen']
+ del charselect['charlen']
+ if charselect['lenkind']:
+ lenkind=lenkindpattern.match(markoutercomma(charselect['lenkind']))
+ lenkind=lenkind.groupdict()
+ for lk in ['len','kind']:
+ if lenkind[lk+'2']:
+ lenkind[lk]=lenkind[lk+'2']
+ charselect[lk]=lenkind[lk]
+ del lenkind[lk+'2']
+ del charselect['lenkind']
+ for k in charselect.keys():
+ if not charselect[k]: del charselect[k]
+ for k,i in charselect.items():
+ charselect[k] = rmbadname1(i)
+ elif typespec=='type':
+ typename=re.match(r'\s*\(\s*(?P<name>\w+)\s*\)',selector,re.I)
+ if typename: typename=typename.group('name')
+ else: outmess('cracktypespec: no typename found in %s\n'%(`typespec+selector`))
+ else:
+ outmess('cracktypespec: no selector used for %s\n'%(`selector`))
+ return kindselect,charselect,typename
+######
+def setattrspec(decl,attr,force=0):
+ if not decl: decl={}
+ if not attr: return decl
+ if not decl.has_key('attrspec'):
+ decl['attrspec']=[attr]
+ return decl
+ if force: decl['attrspec'].append(attr)
+ if attr in decl['attrspec']: return decl
+ if attr=='static' and 'automatic' not in decl['attrspec']:
+ decl['attrspec'].append(attr)
+ elif attr=='automatic' and 'static' not in decl['attrspec']:
+ decl['attrspec'].append(attr)
+ elif attr=='public' and 'private' not in decl['attrspec']:
+ decl['attrspec'].append(attr)
+ elif attr=='private' and 'public' not in decl['attrspec']:
+ decl['attrspec'].append(attr)
+ else:
+ decl['attrspec'].append(attr)
+ return decl
+def setkindselector(decl,sel,force=0):
+ if not decl: decl={}
+ if not sel: return decl
+ if not decl.has_key('kindselector'):
+ decl['kindselector']=sel
+ return decl
+ for k in sel.keys():
+ if force or not decl['kindselector'].has_key(k):
+ decl['kindselector'][k]=sel[k]
+ return decl
+def setcharselector(decl,sel,force=0):
+ if not decl: decl={}
+ if not sel: return decl
+ if not decl.has_key('charselector'):
+ decl['charselector']=sel
+ return decl
+ for k in sel.keys():
+ if force or not decl['charselector'].has_key(k):
+ decl['charselector'][k]=sel[k]
+ return decl
+def getblockname(block,unknown='unknown'):
+ if block.has_key('name'): return block['name']
+ return unknown
+###### post processing
+def setmesstext(block):
+ global filepositiontext
+ try: filepositiontext='In: %s:%s\n'%(block['from'],block['name'])
+ except: pass
+
+def get_usedict(block):
+ usedict = {}
+ if block.has_key('parent_block'):
+ usedict = get_usedict(block['parent_block'])
+ if block.has_key('use'):
+ usedict.update(block['use'])
+ return usedict
+
+def get_useparameters(block, param_map=None):
+ global f90modulevars
+ if param_map is None:
+ param_map = {}
+ usedict = get_usedict(block)
+ if not usedict:
+ return param_map
+ for usename,mapping in usedict.items():
+ usename = string.lower(usename)
+ if not f90modulevars.has_key(usename):
+ continue
+ mvars = f90modulevars[usename]
+ params = get_parameters(mvars)
+ if not params:
+ continue
+ # XXX: apply mapping
+ if mapping:
+ errmess('get_useparameters: mapping for %s not impl.' % (mapping))
+ for k,v in params.items():
+ if param_map.has_key(k):
+ outmess('get_useparameters: overriding parameter %s with'\
+ ' value from module %s' % (`k`,`usename`))
+ param_map[k] = v
+ return param_map
+
+def postcrack2(block,tab='',param_map=None):
+ global f90modulevars
+ if not f90modulevars:
+ return block
+ if type(block)==types.ListType:
+ ret = []
+ for g in block:
+ g = postcrack2(g,tab=tab+'\t',param_map=param_map)
+ ret.append(g)
+ return ret
+ setmesstext(block)
+ outmess('%sBlock: %s\n'%(tab,block['name']),0)
+
+ if param_map is None:
+ param_map = get_useparameters(block)
+
+ if param_map is not None and block.has_key('vars'):
+ vars = block['vars']
+ for n in vars.keys():
+ var = vars[n]
+ if var.has_key('kindselector'):
+ kind = var['kindselector']
+ if kind.has_key('kind'):
+ val = kind['kind']
+ if param_map.has_key(val):
+ kind['kind'] = param_map[val]
+ new_body = []
+ for b in block['body']:
+ b = postcrack2(b,tab=tab+'\t',param_map=param_map)
+ new_body.append(b)
+ block['body'] = new_body
+
+ return block
+
+def postcrack(block,args=None,tab=''):
+ """
+ TODO:
+ function return values
+ determine expression types if in argument list
+ """
+ global usermodules,onlyfunctions
+ if type(block)==types.ListType:
+ gret=[]
+ uret=[]
+ for g in block:
+ setmesstext(g)
+ g=postcrack(g,tab=tab+'\t')
+ if g.has_key('name') and string.find(g['name'],'__user__')>=0: # sort user routines to appear first
+ uret.append(g)
+ else:
+ gret.append(g)
+ return uret+gret
+ setmesstext(block)
+ if (not type(block)==types.DictType) and not block.has_key('block'):
+ raise 'postcrack: Expected block dictionary instead of ',block
+ if block.has_key('name') and not block['name']=='unknown_interface':
+ outmess('%sBlock: %s\n'%(tab,block['name']),0)
+ blocktype=block['block']
+ block=analyzeargs(block)
+ block=analyzecommon(block)
+ block['vars']=analyzevars(block)
+ block['sortvars']=sortvarnames(block['vars'])
+ if block.has_key('args') and block['args']:
+ args=block['args']
+ block['body']=analyzebody(block,args,tab=tab)
+
+ userisdefined=[]
+## fromuser = []
+ if block.has_key('use'):
+ useblock=block['use']
+ for k in useblock.keys():
+ if string.find(k,'__user__')>=0:
+ userisdefined.append(k)
+## if useblock[k].has_key('map'):
+## for n in useblock[k]['map'].values():
+## if n not in fromuser: fromuser.append(n)
+ else: useblock={}
+ name=''
+ if block.has_key('name'):name=block['name']
+ if block.has_key('externals') and block['externals']:# and not userisdefined: # Build a __user__ module
+ interfaced=[]
+ if block.has_key('interfaced'): interfaced=block['interfaced']
+ mvars=copy.copy(block['vars'])
+ if name: mname=name+'__user__routines'
+ else: mname='unknown__user__routines'
+ if mname in userisdefined:
+ i=1
+ while '%s_%i'%(mname,i) in userisdefined: i=i+1
+ mname='%s_%i'%(mname,i)
+ interface={'block':'interface','body':[],'vars':{},'name':name+'_user_interface'}
+ for e in block['externals']:
+## if e in fromuser:
+## outmess(' Skipping %s that is defined explicitly in another use statement\n'%(`e`))
+## continue
+ if e in interfaced:
+ edef=[]
+ j=-1
+ for b in block['body']:
+ j=j+1
+ if b['block']=='interface':
+ i=-1
+ for bb in b['body']:
+ i=i+1
+ if bb.has_key('name') and bb['name']==e:
+ edef=copy.copy(bb)
+ del b['body'][i]
+ break
+ if edef:
+ if not b['body']: del block['body'][j]
+ del interfaced[interfaced.index(e)]
+ break
+ interface['body'].append(edef)
+ else:
+ if mvars.has_key(e) and not isexternal(mvars[e]):
+ interface['vars'][e]=mvars[e]
+ if interface['vars'] or interface['body']:
+ block['interfaced']=interfaced
+ mblock={'block':'python module','body':[interface],'vars':{},'name':mname,'interfaced':block['externals']}
+ useblock[mname]={}
+ usermodules.append(mblock)
+ if useblock:
+ block['use']=useblock
+ return block
+
+def sortvarnames(vars):
+ indep = []
+ dep = []
+ for v in vars.keys():
+ if vars[v].has_key('depend') and vars[v]['depend']:
+ dep.append(v)
+ #print '%s depends on %s'%(v,vars[v]['depend'])
+ else: indep.append(v)
+ n = len(dep)
+ i = 0
+ while dep: #XXX: How to catch dependence cycles correctly?
+ v = dep[0]
+ fl = 0
+ for w in dep[1:]:
+ if w in vars[v]['depend']:
+ fl = 1
+ break
+ if fl:
+ dep = dep[1:]+[v]
+ i = i + 1
+ if i>n:
+ errmess('sortvarnames: failed to compute dependencies because'
+ ' of cyclic dependencies between '
+ +string.join(dep,', ')+'\n')
+ indep = indep + dep
+ break
+ else:
+ indep.append(v)
+ dep = dep[1:]
+ n = len(dep)
+ i = 0
+ #print indep
+ return indep
+
+def analyzecommon(block):
+ if not hascommon(block): return block
+ commonvars=[]
+ for k in block['common'].keys():
+ comvars=[]
+ for e in block['common'][k]:
+ m=re.match(r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z',e,re.I)
+ if m:
+ dims=[]
+ if m.group('dims'):
+ dims=map(string.strip,string.split(markoutercomma(m.group('dims')),'@,@'))
+ n=string.strip(m.group('name'))
+ if block['vars'].has_key(n):
+ if block['vars'][n].has_key('attrspec'):
+ block['vars'][n]['attrspec'].append('dimension(%s)'%(string.join(dims,',')))
+ else:
+ block['vars'][n]['attrspec']=['dimension(%s)'%(string.join(dims,','))]
+ else:
+ if dims:
+ block['vars'][n]={'attrspec':['dimension(%s)'%(string.join(dims,','))]}
+ else: block['vars'][n]={}
+ if n not in commonvars: commonvars.append(n)
+ else:
+ n=e
+ errmess('analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n'%(e,k))
+ comvars.append(n)
+ block['common'][k]=comvars
+ if not block.has_key('commonvars'):
+ block['commonvars']=commonvars
+ else:
+ block['commonvars']=block['commonvars']+commonvars
+ return block
+def analyzebody(block,args,tab=''):
+ global usermodules,skipfuncs,onlyfuncs,f90modulevars
+ setmesstext(block)
+ body=[]
+ for b in block['body']:
+ b['parent_block'] = block
+ if b['block'] in ['function','subroutine']:
+ if args is not None and b['name'] not in args:
+ continue
+ else:
+ as=b['args']
+ if b['name'] in skipfuncs:
+ continue
+ if onlyfuncs and b['name'] not in onlyfuncs:
+ continue
+ else: as=args
+ b=postcrack(b,as,tab=tab+'\t')
+ if b['block']=='interface' and not b['body']:
+ if not b.has_key('f2pyenhancements'):
+ continue
+ if string.replace(b['block'],' ','')=='pythonmodule':
+ usermodules.append(b)
+ else:
+ if b['block']=='module':
+ f90modulevars[b['name']] = b['vars']
+ body.append(b)
+ return body
+def buildimplicitrules(block):
+ setmesstext(block)
+ implicitrules=defaultimplicitrules
+ attrrules={}
+ if block.has_key('implicit'):
+ if block['implicit'] is None:
+ implicitrules=None
+ if verbose>1:
+ outmess('buildimplicitrules: no implicit rules for routine %s.\n'%`block['name']`)
+ else:
+ for k in block['implicit'].keys():
+ if block['implicit'][k].get('typespec') not in ['static','automatic']:
+ implicitrules[k]=block['implicit'][k]
+ else:
+ attrrules[k]=block['implicit'][k]['typespec']
+ return implicitrules,attrrules
+
+def myeval(e,g=None,l=None):
+ r = eval(e,g,l)
+ if type(r) in [type(0),type(0.0)]:
+ return r
+ raise ValueError,'r=%r' % (r)
+
+getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z',re.I)
+def getlincoef(e,xset): # e = a*x+b ; x in xset
+ try:
+ c = int(myeval(e,{},{}))
+ return 0,c,None
+ except: pass
+ if getlincoef_re_1.match(e):
+ return 1,0,e
+ len_e = len(e)
+ for x in xset:
+ if len(x)>len_e: continue
+ re_1 = re.compile(r'(?P<before>.*?)\b'+x+r'\b(?P<after>.*)',re.I)
+ m = re_1.match(e)
+ if m:
+ try:
+ m1 = re_1.match(e)
+ while m1:
+ ee = '%s(%s)%s'%(m1.group('before'),0,m1.group('after'))
+ m1 = re_1.match(ee)
+ b = myeval(ee,{},{})
+ m1 = re_1.match(e)
+ while m1:
+ ee = '%s(%s)%s'%(m1.group('before'),1,m1.group('after'))
+ m1 = re_1.match(ee)
+ a = myeval(ee,{},{}) - b
+ m1 = re_1.match(e)
+ while m1:
+ ee = '%s(%s)%s'%(m1.group('before'),0.5,m1.group('after'))
+ m1 = re_1.match(ee)
+ c = myeval(ee,{},{})
+ if (a*0.5+b==c):
+ return a,b,x
+ except: pass
+ break
+ return None,None,None
+
+_varname_match = re.compile(r'\A[a-z]\w*\Z').match
+def getarrlen(dl,args,star='*'):
+ edl = []
+ try: edl.append(myeval(dl[0],{},{}))
+ except: edl.append(dl[0])
+ try: edl.append(myeval(dl[1],{},{}))
+ except: edl.append(dl[1])
+ if type(edl[0]) is type(0):
+ p1 = 1-edl[0]
+ if p1==0: d = str(dl[1])
+ elif p1<0: d = '%s-%s'%(dl[1],-p1)
+ else: d = '%s+%s'%(dl[1],p1)
+ elif type(edl[1]) is type(0):
+ p1 = 1+edl[1]
+ if p1==0: d='-(%s)' % (dl[0])
+ else: d='%s-(%s)' % (p1,dl[0])
+ else: d = '%s-(%s)+1'%(dl[1],dl[0])
+ try: return `myeval(d,{},{})`,None,None
+ except: pass
+ d1,d2=getlincoef(dl[0],args),getlincoef(dl[1],args)
+ if None not in [d1[0],d2[0]]:
+ if (d1[0],d2[0])==(0,0):
+ return `d2[1]-d1[1]+1`,None,None
+ b = d2[1] - d1[1] + 1
+ d1 = (d1[0],0,d1[2])
+ d2 = (d2[0],b,d2[2])
+ if d1[0]==0 and d2[2] in args:
+ if b<0: return '%s * %s - %s'%(d2[0],d2[2],-b),d2[2],'+%s)/(%s)'%(-b,d2[0])
+ elif b: return '%s * %s + %s'%(d2[0],d2[2],b),d2[2],'-%s)/(%s)'%(b,d2[0])
+ else: return '%s * %s'%(d2[0],d2[2]),d2[2],')/(%s)'%(d2[0])
+ if d2[0]==0 and d1[2] in args:
+
+ if b<0: return '%s * %s - %s'%(-d1[0],d1[2],-b),d1[2],'+%s)/(%s)'%(-b,-d1[0])
+ elif b: return '%s * %s + %s'%(-d1[0],d1[2],b),d1[2],'-%s)/(%s)'%(b,-d1[0])
+ else: return '%s * %s'%(-d1[0],d1[2]),d1[2],')/(%s)'%(-d1[0])
+ if d1[2]==d2[2] and d1[2] in args:
+ a = d2[0] - d1[0]
+ if not a: return `b`,None,None
+ if b<0: return '%s * %s - %s'%(a,d1[2],-b),d2[2],'+%s)/(%s)'%(-b,a)
+ elif b: return '%s * %s + %s'%(a,d1[2],b),d2[2],'-%s)/(%s)'%(b,a)
+ else: return '%s * %s'%(a,d1[2]),d2[2],')/(%s)'%(a)
+ if d1[0]==d2[0]==1:
+ c = str(d1[2])
+ if c not in args:
+ if _varname_match(c):
+ outmess('\tgetarrlen:variable "%s" undefined\n' % (c))
+ c = '(%s)'%c
+ if b==0: d='%s-%s' % (d2[2],c)
+ elif b<0: d='%s-%s-%s' % (d2[2],c,-b)
+ else: d='%s-%s+%s' % (d2[2],c,b)
+ elif d1[0]==0:
+ c2 = str(d2[2])
+ if c2 not in args:
+ if _varname_match(c2):
+ outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
+ c2 = '(%s)'%c2
+ if d2[0]==1: pass
+ elif d2[0]==-1: c2='-%s' %c2
+ else: c2='%s*%s'%(d2[0],c2)
+
+ if b==0: d=c2
+ elif b<0: d='%s-%s' % (c2,-b)
+ else: d='%s+%s' % (c2,b)
+ elif d2[0]==0:
+ c1 = str(d1[2])
+ if c1 not in args:
+ if _varname_match(c1):
+ outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
+ c1 = '(%s)'%c1
+ if d1[0]==1: c1='-%s'%c1
+ elif d1[0]==-1: c1='+%s'%c1
+ elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1)
+ else: c1 = '-%s*%s' % (d1[0],c1)
+
+ if b==0: d=c1
+ elif b<0: d='%s-%s' % (c1,-b)
+ else: d='%s+%s' % (c1,b)
+ else:
+ c1 = str(d1[2])
+ if c1 not in args:
+ if _varname_match(c1):
+ outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
+ c1 = '(%s)'%c1
+ if d1[0]==1: c1='-%s'%c1
+ elif d1[0]==-1: c1='+%s'%c1
+ elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1)
+ else: c1 = '-%s*%s' % (d1[0],c1)
+
+ c2 = str(d2[2])
+ if c2 not in args:
+ if _varname_match(c2):
+ outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
+ c2 = '(%s)'%c2
+ if d2[0]==1: pass
+ elif d2[0]==-1: c2='-%s' %c2
+ else: c2='%s*%s'%(d2[0],c2)
+
+ if b==0: d='%s%s' % (c2,c1)
+ elif b<0: d='%s%s-%s' % (c2,c1,-b)
+ else: d='%s%s+%s' % (c2,c1,b)
+ return d,None,None
+
+word_pattern = re.compile(r'\b[a-z][\w$]*\b',re.I)
+
+def _get_depend_dict(name, vars, deps):
+ if vars.has_key(name):
+ words = vars[name].get('depend',[])
+
+ if vars[name].has_key('=') and not isstring(vars[name]):
+ for word in word_pattern.findall(vars[name]['=']):
+ if word not in words and vars.has_key(word):
+ words.append(word)
+ for word in words[:]:
+ for w in deps.get(word,[]) \
+ or _get_depend_dict(word, vars, deps):
+ if w not in words:
+ words.append(w)
+ else:
+ outmess('_get_depend_dict: no dependence info for %s\n' % (`name`))
+ words = []
+ deps[name] = words
+ return words
+
+def _calc_depend_dict(vars):
+ names = vars.keys()
+ depend_dict = {}
+ for n in names:
+ _get_depend_dict(n, vars, depend_dict)
+ return depend_dict
+
+def get_sorted_names(vars):
+ """
+ """
+ depend_dict = _calc_depend_dict(vars)
+ names = []
+ for name in depend_dict.keys():
+ if not depend_dict[name]:
+ names.append(name)
+ del depend_dict[name]
+ while depend_dict:
+ for name, lst in depend_dict.items():
+ new_lst = [n for n in lst if depend_dict.has_key(n)]
+ if not new_lst:
+ names.append(name)
+ del depend_dict[name]
+ else:
+ depend_dict[name] = new_lst
+ return [name for name in names if vars.has_key(name)]
+
+def _kind_func(string):
+ #XXX: return something sensible.
+ if string[0] in "'\"":
+ string = string[1:-1]
+ if real16pattern.match(string):
+ return 16
+ elif real8pattern.match(string):
+ return 8
+ return 'kind('+string+')'
+
+def _selected_int_kind_func(r):
+ #XXX: This should be processor dependent
+ m = 10**r
+ if m<=2**8: return 1
+ if m<=2**16: return 2
+ if m<=2**32: return 4
+ if m<=2**64: return 8
+ if m<=2**128: return 16
+ return -1
+
+def get_parameters(vars, global_params={}):
+ params = copy.copy(global_params)
+ g_params = copy.copy(global_params)
+ for name,func in [('kind',_kind_func),
+ ('selected_int_kind',_selected_int_kind_func),
+ ]:
+ if not g_params.has_key(name):
+ g_params[name] = func
+ param_names = []
+ for n in get_sorted_names(vars):
+ if vars[n].has_key('attrspec') and 'parameter' in vars[n]['attrspec']:
+ param_names.append(n)
+ kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)',re.I)
+ selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)',re.I)
+ for n in param_names:
+ if vars[n].has_key('='):
+ v = vars[n]['=']
+ if islogical(vars[n]):
+ v = v.lower()
+ for repl in [
+ ('.false.','False'),
+ ('.true.','True'),
+ #TODO: test .eq., .neq., etc replacements.
+ ]:
+ v = v.replace(*repl)
+ v = kind_re.sub(r'kind("\1")',v)
+ v = selected_int_kind_re.sub(r'selected_int_kind(\1)',v)
+ if isinteger(vars[n]) and not selected_int_kind_re.match(v):
+ v = v.split('_')[0]
+ if isdouble(vars[n]):
+ tt = list(v)
+ for m in real16pattern.finditer(v):
+ tt[m.start():m.end()] = list(\
+ v[m.start():m.end()].lower().replace('d', 'e'))
+ v = string.join(tt,'')
+ if iscomplex(vars[n]):
+ if v[0]=='(' and v[-1]==')':
+ l = markoutercomma(v[1:-1]).split('@,@')
+ print n,params
+ try:
+ params[n] = eval(v,g_params,params)
+ except Exception,msg:
+ params[n] = v
+ #print params
+ outmess('get_parameters: got "%s" on %s\n' % (msg,`v`))
+ if isstring(vars[n]) and type(params[n]) is type(0):
+ params[n] = chr(params[n])
+ nl = string.lower(n)
+ if nl!=n:
+ params[nl] = params[n]
+ else:
+ print vars[n]
+ outmess('get_parameters:parameter %s does not have value?!\n'%(`n`))
+ return params
+
+def _eval_length(length,params):
+ if length in ['(:)','(*)','*']:
+ return '(*)'
+ return _eval_scalar(length,params)
+
+_is_kind_number = re.compile('\d+_').match
+
+def _eval_scalar(value,params):
+ if _is_kind_number(value):
+ value = value.split('_')[0]
+ try:
+ value = str(eval(value,{},params))
+ except (NameError, SyntaxError):
+ return value
+ except Exception,msg:
+ errmess('"%s" in evaluating %r '\
+ '(available names: %s)\n' \
+ % (msg,value,params.keys()))
+ return value
+
+def analyzevars(block):
+ global f90modulevars
+ setmesstext(block)
+ implicitrules,attrrules=buildimplicitrules(block)
+ vars=copy.copy(block['vars'])
+ if block['block']=='function' and not vars.has_key(block['name']):
+ vars[block['name']]={}
+ if block['vars'].has_key(''):
+ del vars['']
+ if block['vars'][''].has_key('attrspec'):
+ gen=block['vars']['']['attrspec']
+ for n in vars.keys():
+ for k in ['public','private']:
+ if k in gen:
+ vars[n]=setattrspec(vars[n],k)
+ svars=[]
+ args = block['args']
+ for a in args:
+ try:
+ vars[a]
+ svars.append(a)
+ except KeyError:
+ pass
+ for n in vars.keys():
+ if n not in args: svars.append(n)
+
+ params = get_parameters(vars, get_useparameters(block))
+
+ dep_matches = {}
+ name_match = re.compile(r'\w[\w\d_$]*').match
+ for v in vars.keys():
+ m = name_match(v)
+ if m:
+ n = v[m.start():m.end()]
+ try:
+ dep_matches[n]
+ except KeyError:
+ dep_matches[n] = re.compile(r'.*\b%s\b'%(v),re.I).match
+ for n in svars:
+ if n[0] in attrrules.keys():
+ vars[n]=setattrspec(vars[n],attrrules[n[0]])
+ if not vars[n].has_key('typespec'):
+ if not(vars[n].has_key('attrspec') and 'external' in vars[n]['attrspec']):
+ if implicitrules:
+ ln0 = string.lower(n[0])
+ for k in implicitrules[ln0].keys():
+ if k=='typespec' and implicitrules[ln0][k]=='undefined':
+ continue
+ if not vars[n].has_key(k):
+ vars[n][k]=implicitrules[ln0][k]
+ elif k=='attrspec':
+ for l in implicitrules[ln0][k]:
+ vars[n]=setattrspec(vars[n],l)
+ elif n in block['args']:
+ outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(`n`,block['name']))
+
+ if vars[n].has_key('charselector'):
+ if vars[n]['charselector'].has_key('len'):
+ l = vars[n]['charselector']['len']
+ try:
+ l = str(eval(l,{},params))
+ except:
+ pass
+ vars[n]['charselector']['len'] = l
+
+ if vars[n].has_key('kindselector'):
+ if vars[n]['kindselector'].has_key('kind'):
+ l = vars[n]['kindselector']['kind']
+ try:
+ l = str(eval(l,{},params))
+ except:
+ pass
+ vars[n]['kindselector']['kind'] = l
+
+ savelindims = {}
+ if vars[n].has_key('attrspec'):
+ attr=vars[n]['attrspec']
+ attr.reverse()
+ vars[n]['attrspec']=[]
+ dim,intent,depend,check,note=None,None,None,None,None
+ for a in attr:
+ if a[:9]=='dimension': dim=(string.strip(a[9:]))[1:-1]
+ elif a[:6]=='intent': intent=(string.strip(a[6:]))[1:-1]
+ elif a[:6]=='depend': depend=(string.strip(a[6:]))[1:-1]
+ elif a[:5]=='check': check=(string.strip(a[5:]))[1:-1]
+ elif a[:4]=='note': note=(string.strip(a[4:]))[1:-1]
+ else: vars[n]=setattrspec(vars[n],a)
+ if intent:
+ if not vars[n].has_key('intent'): vars[n]['intent']=[]
+ for c in map(string.strip,string.split(markoutercomma(intent),'@,@')):
+ if not c in vars[n]['intent']:
+ vars[n]['intent'].append(c)
+ intent=None
+ if note:
+ note=string.replace(note,'\\n\\n','\n\n')
+ note=string.replace(note,'\\n ','\n')
+ if not vars[n].has_key('note'): vars[n]['note']=[note]
+ else: vars[n]['note'].append(note)
+ note=None
+ if depend is not None:
+ if not vars[n].has_key('depend'): vars[n]['depend']=[]
+ for c in rmbadname(map(string.strip,string.split(markoutercomma(depend),'@,@'))):
+ if c not in vars[n]['depend']:
+ vars[n]['depend'].append(c)
+ depend=None
+ if check is not None:
+ if not vars[n].has_key('check'): vars[n]['check']=[]
+ for c in map(string.strip,string.split(markoutercomma(check),'@,@')):
+ if not c in vars[n]['check']:
+ vars[n]['check'].append(c)
+ check=None
+ if dim and not vars[n].has_key('dimension'):
+ vars[n]['dimension']=[]
+ for d in rmbadname(map(string.strip,string.split(markoutercomma(dim),'@,@'))):
+ star = '*'
+ if d==':': star=':'
+ if params.has_key(d):
+ d = str(params[d])
+ for p in params.keys():
+ m = re.match(r'(?P<before>.*?)\b'+p+r'\b(?P<after>.*)',d,re.I)
+ if m:
+ #outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`))
+ d = m.group('before')+str(params[p])+m.group('after')
+ if d==star:
+ dl = [star]
+ else:
+ dl=string.split(markoutercomma(d,':'),'@:@')
+ if len(dl)==2 and '*' in dl: # e.g. dimension(5:*)
+ dl = ['*']
+ d = '*'
+ if len(dl)==1 and not dl[0]==star: dl = ['1',dl[0]]
+ if len(dl)==2:
+ d,v,di = getarrlen(dl,block['vars'].keys())
+ if d[:4] == '1 * ': d = d[4:]
+ if di and di[-4:] == '/(1)': di = di[:-4]
+ if v: savelindims[d] = v,di
+ vars[n]['dimension'].append(d)
+ if vars[n].has_key('dimension'):
+ if isintent_c(vars[n]):
+ shape_macro = 'shape'
+ else:
+ shape_macro = 'shape'#'fshape'
+ if isstringarray(vars[n]):
+ if vars[n].has_key('charselector'):
+ d = vars[n]['charselector']
+ if d.has_key('*'):
+ d = d['*']
+ errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'\
+ %(d,n,
+ ','.join(vars[n]['dimension']),
+ n,','.join(vars[n]['dimension']+[d])))
+ vars[n]['dimension'].append(d)
+ del vars[n]['charselector']
+ if not vars[n].has_key('intent'):
+ vars[n]['intent'] = []
+ if 'c' not in vars[n]['intent']:
+ vars[n]['intent'].append('c')
+ else:
+ errmess("analyzevars: charselector=%r unhandled." % (d))
+ if not vars[n].has_key('check') and block.has_key('args') and n in block['args']:
+ flag=not vars[n].has_key('depend')
+ if flag: vars[n]['depend']=[]
+ vars[n]['check']=[]
+ if vars[n].has_key('dimension'):
+ #/----< no check
+ #vars[n]['check'].append('rank(%s)==%s'%(n,len(vars[n]['dimension'])))
+ i=-1; ni=len(vars[n]['dimension'])
+ for d in vars[n]['dimension']:
+ ddeps=[] # dependecies of 'd'
+ ad=''
+ pd=''
+ #origd = d
+ if not vars.has_key(d):
+ if savelindims.has_key(d):
+ pd,ad='(',savelindims[d][1]
+ d = savelindims[d][0]
+ else:
+ for r in block['args']:
+ #for r in block['vars'].keys():
+ if not vars.has_key(r): continue
+ if re.match(r'.*?\b'+r+r'\b',d,re.I):
+ ddeps.append(r)
+ if vars.has_key(d):
+ if vars[d].has_key('attrspec'):
+ for aa in vars[d]['attrspec']:
+ if aa[:6]=='depend':
+ ddeps=ddeps+string.split((string.strip(aa[6:]))[1:-1],',')
+ if vars[d].has_key('depend'):
+ ddeps=ddeps+vars[d]['depend']
+ i=i+1
+ if vars.has_key(d) and (not vars[d].has_key('depend')) \
+ and (not vars[d].has_key('=')) and (d not in vars[n]['depend']) \
+ and l_or(isintent_in,isintent_inout,isintent_inplace)(vars[n]):
+ vars[d]['depend']=[n]
+ if ni>1:
+ vars[d]['=']='%s%s(%s,%s)%s'% (pd,shape_macro,n,i,ad)
+ else:
+ vars[d]['=']='%slen(%s)%s'% (pd,n,ad)
+ # /---< no check
+ if 1 and not vars[d].has_key('check'):
+ if ni>1:
+ vars[d]['check']=['%s%s(%s,%i)%s==%s'\
+ %(pd,shape_macro,n,i,ad,d)]
+ else:
+ vars[d]['check']=['%slen(%s)%s>=%s'%(pd,n,ad,d)]
+ if not vars[d].has_key('attrspec'): vars[d]['attrspec']=['optional']
+ if ('optional' not in vars[d]['attrspec']) and\
+ ('required' not in vars[d]['attrspec']):
+ vars[d]['attrspec'].append('optional')
+ elif d not in ['*',':']:
+ #/----< no check
+ #if ni>1: vars[n]['check'].append('shape(%s,%i)==%s'%(n,i,d))
+ #else: vars[n]['check'].append('len(%s)>=%s'%(n,d))
+ if flag:
+ if vars.has_key(d):
+ if n not in ddeps:
+ vars[n]['depend'].append(d)
+ else:
+ vars[n]['depend'] = vars[n]['depend'] + ddeps
+ elif isstring(vars[n]):
+ length='1'
+ if vars[n].has_key('charselector'):
+ if vars[n]['charselector'].has_key('*'):
+ length = _eval_length(vars[n]['charselector']['*'],
+ params)
+ vars[n]['charselector']['*']=length
+ elif vars[n]['charselector'].has_key('len'):
+ length = _eval_length(vars[n]['charselector']['len'],
+ params)
+ del vars[n]['charselector']['len']
+ vars[n]['charselector']['*']=length
+
+ if not vars[n]['check']: del vars[n]['check']
+ if flag and not vars[n]['depend']: del vars[n]['depend']
+ if vars[n].has_key('='):
+ if not vars[n].has_key('attrspec'): vars[n]['attrspec']=[]
+ if ('optional' not in vars[n]['attrspec']) and \
+ ('required' not in vars[n]['attrspec']):
+ vars[n]['attrspec'].append('optional')
+ if not vars[n].has_key('depend'):
+ vars[n]['depend']=[]
+ for v,m in dep_matches.items():
+ if m(vars[n]['=']): vars[n]['depend'].append(v)
+ if not vars[n]['depend']: del vars[n]['depend']
+ if isscalar(vars[n]):
+ vars[n]['='] = _eval_scalar(vars[n]['='],params)
+
+ for n in vars.keys():
+ if n==block['name']: # n is block name
+ if vars[n].has_key('note'):
+ block['note']=vars[n]['note']
+ if block['block']=='function':
+ if block.has_key('result') and vars.has_key(block['result']):
+ vars[n]=appenddecl(vars[n],vars[block['result']])
+ if block.has_key('prefix'):
+ pr=block['prefix']; ispure=0; isrec=1
+ pr1=string.replace(pr,'pure','')
+ ispure=(not pr==pr1)
+ pr=string.replace(pr1,'recursive','')
+ isrec=(not pr==pr1)
+ m=typespattern[0].match(pr)
+ if m:
+ typespec,selector,attr,edecl=cracktypespec0(m.group('this'),m.group('after'))
+ kindselect,charselect,typename=cracktypespec(typespec,selector)
+ vars[n]['typespec']=typespec
+ if kindselect:
+ if kindselect.has_key('kind'):
+ try:
+ kindselect['kind'] = eval(kindselect['kind'],{},params)
+ except:
+ pass
+ vars[n]['kindselector']=kindselect
+ if charselect: vars[n]['charselector']=charselect
+ if typename: vars[n]['typename']=typename
+ if ispure: vars[n]=setattrspec(vars[n],'pure')
+ if isrec: vars[n]=setattrspec(vars[n],'recursive')
+ else:
+ outmess('analyzevars: prefix (%s) were not used\n'%`block['prefix']`)
+ if not block['block'] in ['module','pythonmodule','python module','block data']:
+ if block.has_key('commonvars'):
+ neededvars=copy.copy(block['args']+block['commonvars'])
+ else:
+ neededvars=copy.copy(block['args'])
+ for n in vars.keys():
+ if l_or(isintent_callback,isintent_aux)(vars[n]):
+ neededvars.append(n)
+ if block.has_key('entry'):
+ neededvars.extend(block['entry'].keys())
+ for k in block['entry'].keys():
+ for n in block['entry'][k]:
+ if n not in neededvars:
+ neededvars.append(n)
+ if block['block']=='function':
+ if block.has_key('result'):
+ neededvars.append(block['result'])
+ else:
+ neededvars.append(block['name'])
+ if block['block'] in ['subroutine','function']:
+ name = block['name']
+ if vars.has_key(name) and vars[name].has_key('intent'):
+ block['intent'] = vars[name]['intent']
+ if block['block'] == 'type':
+ neededvars.extend(vars.keys())
+ for n in vars.keys():
+ if n not in neededvars:
+ del vars[n]
+ return vars
+analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z',re.I)
+def analyzeargs(block):
+ setmesstext(block)
+ implicitrules,attrrules=buildimplicitrules(block)
+ if not block.has_key('args'): block['args']=[]
+ args=[]
+ re_1 = analyzeargs_re_1
+ for a in block['args']:
+ if not re_1.match(a): # `a` is an expression
+ at=determineexprtype(a,block['vars'],implicitrules)
+ na='e_'
+ for c in a:
+ if c not in string.lowercase+string.digits: c='_'
+ na=na+c
+ if na[-1]=='_': na=na+'e'
+ else: na=na+'_e'
+ a=na
+ while block['vars'].has_key(a) or a in block['args']: a=a+'r'
+ block['vars'][a]=at
+ args.append(a)
+ if not block['vars'].has_key(a):
+ block['vars'][a]={}
+ if block.has_key('externals') and a in block['externals']+block['interfaced']:
+ block['vars'][a]=setattrspec(block['vars'][a],'external')
+ block['args']=args
+
+ if block.has_key('entry'):
+ for k,args1 in block['entry'].items():
+ for a in args1:
+ if not block['vars'].has_key(a):
+ block['vars'][a]={}
+
+ for b in block['body']:
+ if b['name'] in args:
+ if not block.has_key('externals'): block['externals']=[]
+ if b['name'] not in block['externals']:
+ block['externals'].append(b['name'])
+ if block.has_key('result') and not block['vars'].has_key(block['result']):
+ block['vars'][block['result']]={}
+ return block
+determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z',re.I)
+determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z',re.I)
+determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z',re.I)
+determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z',re.I)
+determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z',re.I)
+def _ensure_exprdict(r):
+ if type(r) is type(0):
+ return {'typespec':'integer'}
+ if type(r) is type(0.0):
+ return {'typespec':'real'}
+ if type(r) is type(0j):
+ return {'typespec':'complex'}
+ assert type(r) is type({}),`r`
+ return r
+
+def determineexprtype(expr,vars,rules={}):
+ if vars.has_key(expr):
+ return _ensure_exprdict(vars[expr])
+ expr=string.strip(expr)
+ if determineexprtype_re_1.match(expr):
+ return {'typespec':'complex'}
+ m=determineexprtype_re_2.match(expr)
+ if m:
+ if m.groupdict().has_key('name') and m.group('name'):
+ outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`)
+ return {'typespec':'integer'}
+ m = determineexprtype_re_3.match(expr)
+ if m:
+ if m.groupdict().has_key('name') and m.group('name'):
+ outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`)
+ return {'typespec':'real'}
+ for op in ['+','-','*','/']:
+ for e in map(string.strip,string.split(markoutercomma(expr,comma=op),'@'+op+'@')):
+ if vars.has_key(e):
+ return _ensure_exprdict(vars[e])
+ t={}
+ if determineexprtype_re_4.match(expr): # in parenthesis
+ t=determineexprtype(expr[1:-1],vars,rules)
+ else:
+ m = determineexprtype_re_5.match(expr)
+ if m:
+ rn=m.group('name')
+ t=determineexprtype(m.group('name'),vars,rules)
+ if t and t.has_key('attrspec'): del t['attrspec']
+ if not t:
+ if rules.has_key(rn[0]):
+ return _ensure_exprdict(rules[rn[0]])
+ if expr[0] in '\'"':
+ return {'typespec':'character','charselector':{'*':'*'}}
+ if not t:
+ outmess('determineexprtype: could not determine expressions (%s) type.\n'%(`expr`))
+ return t
+######
+def crack2fortrangen(block,tab='\n'):
+ setmesstext(block)
+ ret=''
+ if type(block) is type([]):
+ for g in block:
+ ret=ret+crack2fortrangen(g,tab)
+ return ret
+ prefix=''
+ name=''
+ args=''
+ blocktype=block['block']
+ if blocktype=='program': return ''
+ al=[]
+ if block.has_key('name'): name=block['name']
+ if block.has_key('args'):
+ vars = block['vars']
+ al = [a for a in block['args'] if not isintent_callback(vars[a])]
+ if block['block']=='function' or al:
+ args='(%s)'%string.join(al,',')
+ f2pyenhancements = ''
+ if block.has_key('f2pyenhancements'):
+ for k in block['f2pyenhancements'].keys():
+ f2pyenhancements = '%s%s%s %s'%(f2pyenhancements,tab+tabchar,k,block['f2pyenhancements'][k])
+ intent_lst = block.get('intent',[])[:]
+ if blocktype=='function' and 'callback' in intent_lst:
+ intent_lst.remove('callback')
+ if intent_lst:
+ f2pyenhancements = '%s%sintent(%s) %s'%\
+ (f2pyenhancements,tab+tabchar,
+ string.join(intent_lst,','),name)
+ use=''
+ if block.has_key('use'):
+ use=use2fortran(block['use'],tab+tabchar)
+ common=''
+ if block.has_key('common'):
+ common=common2fortran(block['common'],tab+tabchar)
+ if name=='unknown_interface': name=''
+ result=''
+ if block.has_key('result'):
+ result=' result (%s)'%block['result']
+ if block['result'] not in al:
+ al.append(block['result'])
+ #if block.has_key('prefix'): prefix=block['prefix']+' '
+ body=crack2fortrangen(block['body'],tab+tabchar)
+ vars=vars2fortran(block,block['vars'],al,tab+tabchar)
+ mess=''
+ if block.has_key('from'):
+ mess='! in %s'%block['from']
+ if block.has_key('entry'):
+ entry_stmts = ''
+ for k,i in block['entry'].items():
+ entry_stmts = '%s%sentry %s(%s)' \
+ % (entry_stmts,tab+tabchar,k,string.join(i,','))
+ body = body + entry_stmts
+ if blocktype=='block data' and name=='_BLOCK_DATA_':
+ name = ''
+ ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab,prefix,blocktype,name,args,result,mess,f2pyenhancements,use,vars,common,body,tab,blocktype,name)
+ return ret
+def common2fortran(common,tab=''):
+ ret=''
+ for k in common.keys():
+ if k=='_BLNK_':
+ ret='%s%scommon %s'%(ret,tab,string.join(common[k],','))
+ else:
+ ret='%s%scommon /%s/ %s'%(ret,tab,k,string.join(common[k],','))
+ return ret
+def use2fortran(use,tab=''):
+ ret=''
+ for m in use.keys():
+ ret='%s%suse %s,'%(ret,tab,m)
+ if use[m]=={}:
+ if ret and ret[-1]==',': ret=ret[:-1]
+ continue
+ if use[m].has_key('only') and use[m]['only']:
+ ret='%s,only:'%(ret)
+ if use[m].has_key('map') and use[m]['map']:
+ c=' '
+ for k in use[m]['map'].keys():
+ if k==use[m]['map'][k]:
+ ret='%s%s%s'%(ret,c,k); c=','
+ else:
+ ret='%s%s%s=>%s'%(ret,c,k,use[m]['map'][k]); c=','
+ if ret and ret[-1]==',': ret=ret[:-1]
+ return ret
+def true_intent_list(var):
+ lst = var['intent']
+ ret = []
+ for intent in lst:
+ try:
+ exec('c = isintent_%s(var)' % intent)
+ except NameError:
+ c = 0
+ if c:
+ ret.append(intent)
+ return ret
+def vars2fortran(block,vars,args,tab=''):
+ """
+ TODO:
+ public sub
+ ...
+ """
+ setmesstext(block)
+ ret=''
+ nout=[]
+ for a in args:
+ if block['vars'].has_key(a): nout.append(a)
+ if block.has_key('commonvars'):
+ for a in block['commonvars']:
+ if vars.has_key(a):
+ if a not in nout: nout.append(a)
+ else: errmess('vars2fortran: Confused?!: "%s" is not defined in vars.\n'%a)
+ if block.has_key('varnames'):
+ nout.extend(block['varnames'])
+ for a in vars.keys():
+ if a not in nout: nout.append(a)
+ for a in nout:
+ if vars[a].has_key('depend'):
+ for d in vars[a]['depend']:
+ if vars.has_key(d) and vars[d].has_key('depend') and a in vars[d]['depend']:
+ errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a,d))
+ if block.has_key('externals') and a in block['externals']:
+ if isintent_callback(vars[a]):
+ ret='%s%sintent(callback) %s'%(ret,tab,a)
+ ret='%s%sexternal %s'%(ret,tab,a)
+ if isoptional(vars[a]):
+ ret='%s%soptional %s'%(ret,tab,a)
+ if vars.has_key(a) and not vars[a].has_key('typespec'):
+ continue
+ cont=1
+ for b in block['body']:
+ if a==b['name'] and b['block']=='function': cont=0;break
+ if cont: continue
+ if not vars.has_key(a):
+ show(vars)
+ outmess('vars2fortran: No definition for argument "%s".\n'%a)
+ continue
+ if a==block['name'] and not block['block']=='function':
+ continue
+ if not vars[a].has_key('typespec'):
+ if vars[a].has_key('attrspec') and 'external' in vars[a]['attrspec']:
+ if a in args:
+ ret='%s%sexternal %s'%(ret,tab,a)
+ continue
+ show(vars[a])
+ outmess('vars2fortran: No typespec for argument "%s".\n'%a)
+ continue
+ vardef=vars[a]['typespec']
+ if vardef=='type' and vars[a].has_key('typename'):
+ vardef='%s(%s)'%(vardef,vars[a]['typename'])
+ selector={}
+ if vars[a].has_key('kindselector'): selector=vars[a]['kindselector']
+ elif vars[a].has_key('charselector'): selector=vars[a]['charselector']
+ if selector.has_key('*'):
+ if selector['*'] in ['*',':']:
+ vardef='%s*(%s)'%(vardef,selector['*'])
+ else:
+ vardef='%s*%s'%(vardef,selector['*'])
+ else:
+ if selector.has_key('len'):
+ vardef='%s(len=%s'%(vardef,selector['len'])
+ if selector.has_key('kind'):
+ vardef='%s,kind=%s)'%(vardef,selector['kind'])
+ else:
+ vardef='%s)'%(vardef)
+ elif selector.has_key('kind'):
+ vardef='%s(kind=%s)'%(vardef,selector['kind'])
+ c=' '
+ if vars[a].has_key('attrspec'):
+ attr=[]
+ for l in vars[a]['attrspec']:
+ if l not in ['external']:
+ attr.append(l)
+ if attr:
+ vardef='%s %s'%(vardef,string.join(attr,','))
+ c=','
+ if vars[a].has_key('dimension'):
+# if not isintent_c(vars[a]):
+# vars[a]['dimension'].reverse()
+ vardef='%s%sdimension(%s)'%(vardef,c,string.join(vars[a]['dimension'],','))
+ c=','
+ if vars[a].has_key('intent'):
+ lst = true_intent_list(vars[a])
+ if lst:
+ vardef='%s%sintent(%s)'%(vardef,c,string.join(lst,','))
+ c=','
+ if vars[a].has_key('check'):
+ vardef='%s%scheck(%s)'%(vardef,c,string.join(vars[a]['check'],','))
+ c=','
+ if vars[a].has_key('depend'):
+ vardef='%s%sdepend(%s)'%(vardef,c,string.join(vars[a]['depend'],','))
+ c=','
+ if vars[a].has_key('='):
+ v = vars[a]['=']
+ if vars[a]['typespec'] in ['complex','double complex']:
+ try:
+ v = eval(v)
+ v = '(%s,%s)' % (v.real,v.imag)
+ except:
+ pass
+ vardef='%s :: %s=%s'%(vardef,a,v)
+ else:
+ vardef='%s :: %s'%(vardef,a)
+ ret='%s%s%s'%(ret,tab,vardef)
+ return ret
+######
+
+def crackfortran(files):
+ global usermodules
+ outmess('Reading fortran codes...\n',0)
+ readfortrancode(files,crackline)
+ outmess('Post-processing...\n',0)
+ usermodules=[]
+ postlist=postcrack(grouplist[0])
+ outmess('Post-processing (stage 2)...\n',0)
+ postlist=postcrack2(postlist)
+ return usermodules+postlist
+def crack2fortran(block):
+ global f2py_version
+ pyf=crack2fortrangen(block)+'\n'
+ header="""! -*- f90 -*-
+! Note: the context of this file is case sensitive.
+"""
+ footer="""
+! This file was auto-generated with f2py (version:%s).
+! See http://cens.ioc.ee/projects/f2py2e/
+"""%(f2py_version)
+ return header+pyf+footer
+
+if __name__ == "__main__":
+ files=[]
+ funcs=[]
+ f=1;f2=0;f3=0
+ showblocklist=0
+ for l in sys.argv[1:]:
+ if l=='': pass
+ elif l[0]==':':
+ f=0
+ elif l=='-quiet':
+ quiet=1
+ verbose=0
+ elif l=='-verbose':
+ verbose=2
+ quiet=0
+ elif l=='-fix':
+ if strictf77:
+ outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n',0)
+ skipemptyends=1
+ sourcecodeform='fix'
+ elif l=='-skipemptyends':
+ skipemptyends=1
+ elif l=='--ignore-contains':
+ ignorecontains=1
+ elif l=='-f77':
+ strictf77=1
+ sourcecodeform='fix'
+ elif l=='-f90':
+ strictf77=0
+ sourcecodeform='free'
+ skipemptyends=1
+ elif l=='-h':
+ f2=1
+ elif l=='-show':
+ showblocklist=1
+ elif l=='-m':
+ f3=1
+ elif l[0]=='-':
+ errmess('Unknown option %s\n'%`l`)
+ elif f2:
+ f2=0
+ pyffilename=l
+ elif f3:
+ f3=0
+ f77modulename=l
+ elif f:
+ try:
+ open(l).close()
+ files.append(l)
+ except IOError,detail:
+ errmess('IOError: %s\n'%str(detail))
+ else:
+ funcs.append(l)
+ if not strictf77 and f77modulename and not skipemptyends:
+ outmess("""\
+ Warning: You have specifyied module name for non Fortran 77 code
+ that should not need one (expect if you are scanning F90 code
+ for non module blocks but then you should use flag -skipemptyends
+ and also be sure that the files do not contain programs without program statement).
+""",0)
+
+ postlist=crackfortran(files,funcs)
+ if pyffilename:
+ outmess('Writing fortran code to file %s\n'%`pyffilename`,0)
+ pyf=crack2fortran(postlist)
+ f=open(pyffilename,'w')
+ f.write(pyf)
+ f.close()
+ if showblocklist:
+ show(postlist)
diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py
new file mode 100644
index 000000000..34784b39c
--- /dev/null
+++ b/numpy/f2py/diagnose.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+
+import os,sys,tempfile
+
+def run_command(cmd):
+ print 'Running %r:' % (cmd)
+ s = os.system(cmd)
+ print '------'
+def run():
+ _path = os.getcwd()
+ os.chdir(tempfile.gettempdir())
+ print '------'
+ print 'os.name=%r' % (os.name)
+ print '------'
+ print 'sys.platform=%r' % (sys.platform)
+ print '------'
+ print 'sys.version:'
+ print sys.version
+ print '------'
+ print 'sys.prefix:'
+ print sys.prefix
+ print '------'
+ print 'sys.path=%r' % (':'.join(sys.path))
+ print '------'
+ try:
+ import Numeric
+ has_Numeric = 1
+ except ImportError:
+ print 'Failed to import Numeric:',sys.exc_value
+ has_Numeric = 0
+ try:
+ import numarray
+ has_numarray = 1
+ except ImportError:
+ print 'Failed to import numarray:',sys.exc_value
+ has_numarray = 0
+ try:
+ import scipy.base
+ has_newscipy = 1
+ except ImportError:
+ print 'Failed to import new scipy:', sys.exc_value
+ has_newscipy = 0
+ try:
+ import f2py2e
+ has_f2py2e = 1
+ except ImportError:
+ print 'Failed to import f2py2e:',sys.exc_value
+ has_f2py2e = 0
+ try:
+ import scipy.distutils
+ has_scipy_distutils = 2
+ except ImportError:
+ try:
+ import scipy_distutils
+ has_scipy_distutils = 1
+ except ImportError:
+ print 'Failed to import scipy_distutils:',sys.exc_value
+ has_scipy_distutils = 0
+ if has_Numeric:
+ try:
+ print 'Found Numeric version %r in %s' % \
+ (Numeric.__version__,Numeric.__file__)
+ except Exception,msg:
+ print 'error:',msg
+ print '------'
+ if has_numarray:
+ try:
+ print 'Found numarray version %r in %s' % \
+ (numarray.__version__,numarray.__file__)
+ except Exception,msg:
+ print 'error:',msg
+ print '------'
+ if has_newscipy:
+ try:
+ print 'Found new scipy version %r in %s' % \
+ (scipy.__version__, scipy.__file__)
+ except Exception,msg:
+ print 'error:', msg
+ print '------'
+ if has_f2py2e:
+ try:
+ print 'Found f2py2e version %r in %s' % \
+ (f2py2e.__version__.version,f2py2e.__file__)
+ except Exception,msg:
+ print 'error:',msg
+ print '------'
+ if has_scipy_distutils:
+ try:
+ if has_scipy_distutils==2:
+ print 'Found scipy.distutils version %r in %r' % (\
+ scipy.distutils.__version__,
+ scipy.distutils.__file__)
+ else:
+ print 'Found scipy_distutils version %r in %r' % (\
+ scipy_distutils.scipy_distutils_version.scipy_distutils_version,
+ scipy_distutils.__file__)
+ print '------'
+ except Exception,msg:
+ print 'error:',msg
+ print '------'
+ try:
+ if has_scipy_distutils==1:
+ print 'Importing scipy_distutils.command.build_flib ...',
+ import scipy_distutils.command.build_flib as build_flib
+ print 'ok'
+ print '------'
+ try:
+ print 'Checking availability of supported Fortran compilers:'
+ for compiler_class in build_flib.all_compilers:
+ compiler_class(verbose=1).is_available()
+ print '------'
+ except Exception,msg:
+ print 'error:',msg
+ print '------'
+ except Exception,msg:
+ print 'error:',msg,'(ignore it, build_flib is obsolute for scipy.distutils 0.2.2 and up)'
+ print '------'
+ try:
+ if has_scipy_distutils==2:
+ print 'Importing scipy.distutils.fcompiler ...',
+ import scipy.distutils.fcompiler as fcompiler
+ else:
+ print 'Importing scipy_distutils.fcompiler ...',
+ import scipy_distutils.fcompiler as fcompiler
+ print 'ok'
+ print '------'
+ try:
+ print 'Checking availability of supported Fortran compilers:'
+ fcompiler.show_fcompilers()
+ print '------'
+ except Exception,msg:
+ print 'error:',msg
+ print '------'
+ except Exception,msg:
+ print 'error:',msg
+ print '------'
+ try:
+ if has_scipy_distutils==2:
+ print 'Importing scipy.distutils.cpuinfo ...',
+ from scipy.distutils.cpuinfo import cpuinfo
+ print 'ok'
+ print '------'
+ else:
+ try:
+ print 'Importing scipy_distutils.command.cpuinfo ...',
+ from scipy_distutils.command.cpuinfo import cpuinfo
+ print 'ok'
+ print '------'
+ except Exception,msg:
+ print 'error:',msg,'(ignore it)'
+ print 'Importing scipy_distutils.cpuinfo ...',
+ from scipy_distutils.cpuinfo import cpuinfo
+ print 'ok'
+ print '------'
+ cpu = cpuinfo()
+ print 'CPU information:',
+ for name in dir(cpuinfo):
+ if name[0]=='_' and name[1]!='_' and getattr(cpu,name[1:])():
+ print name[1:],
+ print '------'
+ except Exception,msg:
+ print 'error:',msg
+ print '------'
+ os.chdir(_path)
+if __name__ == "__main__":
+ run()
diff --git a/numpy/f2py/doc/Makefile b/numpy/f2py/doc/Makefile
new file mode 100644
index 000000000..2f241da0a
--- /dev/null
+++ b/numpy/f2py/doc/Makefile
@@ -0,0 +1,76 @@
+# Makefile for compiling f2py2e documentation (dvi, ps, html)
+# Pearu Peterson <pearu@ioc.ee>
+
+REL=4
+TOP = usersguide
+LATEXSRC = bugs.tex commands.tex f2py2e.tex intro.tex notes.tex signaturefile.tex
+MAINLATEX = f2py2e
+
+LATEX = latex
+PDFLATEX = pdflatex
+
+COLLECTINPUT = ./collectinput.py
+INSTALLDATA = install -m 644 -c
+
+TTH = tth
+TTHFILTER = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -L$(MAINLATEX) -i
+TTHFILTER2 = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -Lpython9 -i
+TTHFILTER3 = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -Lfortranobject -i
+TTHMISSING = "\
+***************************************************************\n\
+Warning: Could not find tth (a TeX to HTML translator) \n\
+ or an error arised was by tth\n\
+You can download tth from http://hutchinson.belmont.ma.us/tth/ \n\
+or\n\
+use your favorite LaTeX to HTML translator on file tmp_main.tex\n\
+***************************************************************\
+"
+
+all: dvi ps html clean
+$(MAINLATEX).dvi: $(LATEXSRC)
+ $(LATEX) $(MAINLATEX).tex
+ $(LATEX) $(MAINLATEX).tex
+ $(LATEX) $(MAINLATEX).tex
+ $(PDFLATEX) $(MAINLATEX).tex
+$(TOP).dvi: $(MAINLATEX).dvi
+ cp -f $(MAINLATEX).dvi $(TOP).dvi
+ mv -f $(MAINLATEX).pdf $(TOP).pdf
+$(TOP).ps: $(TOP).dvi
+ dvips $(TOP).dvi -o
+$(TOP).html: $(LATEXSRC)
+ $(COLLECTINPUT) < $(MAINLATEX).tex > tmp_$(MAINLATEX).tex
+ @test `which $(TTH)` && cat tmp_$(MAINLATEX).tex | $(TTHFILTER) > $(TOP).html\
+ || echo -e $(TTHMISSING)
+dvi: $(TOP).dvi
+ps: $(TOP).ps
+ gzip -f $(TOP).ps
+html: $(TOP).html
+
+python9:
+ cp -f python9.tex f2python9-final/src/
+ cd f2python9-final && mk_html.sh
+ cd f2python9-final && mk_ps.sh
+ cd f2python9-final && mk_pdf.sh
+pyfobj:
+ $(LATEX) fortranobject.tex
+ $(LATEX) fortranobject.tex
+ $(LATEX) fortranobject.tex
+ @test `which $(TTH)` && cat fortranobject.tex | $(TTHFILTER3) > pyfobj.html\
+ || echo -e $(TTHMISSING)
+ dvips fortranobject.dvi -o pyfobj.ps
+ gzip -f pyfobj.ps
+ pdflatex fortranobject.tex
+ mv fortranobject.pdf pyfobj.pdf
+
+WWWDIR=/net/cens/home/www/unsecure/projects/f2py2e/
+wwwpage: all
+ $(INSTALLDATA) index.html $(TOP).html $(TOP).ps.gz $(TOP).dvi $(TOP).pdf \
+ Release-$(REL).x.txt ../NEWS.txt win32_notes.txt $(WWWDIR)
+ $(INSTALLDATA) pyfobj.{ps.gz,pdf,html} $(WWWDIR)
+ $(INSTALLDATA) f2python9-final/f2python9.{ps.gz,pdf,html} f2python9-final/{flow,structure,aerostructure}.jpg $(WWWDIR)
+clean:
+ rm -f tmp_$(MAINLATEX).* $(MAINLATEX).{aux,dvi,log,toc}
+distclean:
+ rm -f tmp_$(MAINLATEX).* $(MAINLATEX).{aux,dvi,log,toc}
+ rm -f $(TOP).{ps,dvi,html,pdf,ps.gz}
+ rm -f *~
diff --git a/numpy/f2py/doc/Release-1.x.txt b/numpy/f2py/doc/Release-1.x.txt
new file mode 100644
index 000000000..46d6fbf09
--- /dev/null
+++ b/numpy/f2py/doc/Release-1.x.txt
@@ -0,0 +1,27 @@
+
+I am pleased to announce the first public release of f2py 1.116:
+
+Writing Python C/API wrappers for Fortran routines can be a very
+tedious task, especially if a Fortran routine takes more than 20
+arguments but only few of them are relevant for the problems that they
+solve.
+
+The Fortran to Python Interface Generator, or FPIG for short, is a
+command line tool (f2py) for generating Python C/API modules for
+wrapping Fortran 77 routines, accessing common blocks from Python, and
+calling Python functions from Fortran (call-backs).
+
+The tool can be downloaded from
+
+ http://cens.ioc.ee/projects/f2py2e/
+
+where you can find also information about f2py features and its User's
+Guide.
+
+f2py is released under the LGPL license.
+
+With regards,
+ Pearu Peterson <pearu@ioc.ee>
+
+<P><A HREF="http://cens.ioc.ee/projects/f2py2e/">f2py 1.116</A> - The
+Fortran to Python Interface Generator (25-Jan-00)
diff --git a/numpy/f2py/doc/Release-2.x.txt b/numpy/f2py/doc/Release-2.x.txt
new file mode 100644
index 000000000..807eb0ca8
--- /dev/null
+++ b/numpy/f2py/doc/Release-2.x.txt
@@ -0,0 +1,77 @@
+
+FPIG - Fortran to Python Interface Generator
+
+I am pleased to announce the second public release of f2py
+(version 2.264):
+
+ http://cens.ioc.ee/projects/f2py2e/
+
+f2py is a command line tool for binding Python and Fortran codes. It
+scans Fortran 77/90/95 codes and generates a Python C/API module that
+makes it possible to call Fortran routines from Python. No Fortran or
+C expertise is required for using this tool.
+
+Features include:
+
+ *** All basic Fortran types are supported:
+ integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ],
+ character[ | *(*) | *1 | *2 | *3 | ... ]
+ real[ | *4 | *8 | *16 ], double precision,
+ complex[ | *8 | *16 | *32 ]
+
+ *** Multi-dimensional arrays of (almost) all basic types.
+ Dimension specifications:
+ <dim> | <start>:<end> | * | :
+
+ *** Supported attributes:
+ intent([ in | inout | out | hide | in,out | inout,out ])
+ dimension(<dimspec>)
+ depend([<names>])
+ check([<C-booleanexpr>])
+ note(<LaTeX text>)
+ optional, required, external
+
+ *** Calling Fortran 77/90/95 subroutines and functions. Also
+ Fortran 90/95 module routines. Internal initialization of
+ optional arguments.
+
+ *** Accessing COMMON blocks from Python. Accessing Fortran 90/95
+ module data coming soon.
+
+ *** Call-back functions: calling Python functions from Fortran with
+ very flexible hooks.
+
+ *** In Python, arguments of the interfaced functions may be of
+ different type - necessary type conversations are done
+ internally in C level.
+
+ *** Automatically generates documentation (__doc__,LaTeX) for
+ interface functions.
+
+ *** Automatically generates signature files --- user has full
+ control over the interface constructions. Automatically
+ detects the signatures of call-back functions, solves argument
+ dependencies, etc.
+
+ *** Automatically generates Makefile for compiling Fortran and C
+ codes and linking them to a shared module. Many compilers are
+ supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft
+ F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha
+ Linux, HP-UX, IRIX64.
+
+ *** Complete User's Guide in various formats (html,ps,pdf,dvi).
+
+ *** f2py users list is available for support, feedback, etc.
+
+More information about f2py, see
+
+ http://cens.ioc.ee/projects/f2py2e/
+
+f2py is released under the LGPL license.
+
+Sincerely,
+ Pearu Peterson <pearu@ioc.ee>
+ September 12, 2000
+
+<P><A HREF="http://cens.ioc.ee/projects/f2py2e/">f2py 2.264</A> - The
+Fortran to Python Interface Generator (12-Sep-00)
diff --git a/numpy/f2py/doc/Release-3.x.txt b/numpy/f2py/doc/Release-3.x.txt
new file mode 100644
index 000000000..940771015
--- /dev/null
+++ b/numpy/f2py/doc/Release-3.x.txt
@@ -0,0 +1,87 @@
+
+F2PY - Fortran to Python Interface Generator
+
+I am pleased to announce the third public release of f2py
+(version 2.3.321):
+
+ http://cens.ioc.ee/projects/f2py2e/
+
+f2py is a command line tool for binding Python and Fortran codes. It
+scans Fortran 77/90/95 codes and generates a Python C/API module that
+makes it possible to call Fortran subroutines from Python. No Fortran or
+C expertise is required for using this tool.
+
+Features include:
+
+ *** All basic Fortran types are supported:
+ integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ],
+ character[ | *(*) | *1 | *2 | *3 | ... ]
+ real[ | *4 | *8 | *16 ], double precision,
+ complex[ | *8 | *16 | *32 ]
+
+ *** Multi-dimensional arrays of (almost) all basic types.
+ Dimension specifications:
+ <dim> | <start>:<end> | * | :
+
+ *** Supported attributes and statements:
+ intent([ in | inout | out | hide | in,out | inout,out ])
+ dimension(<dimspec>)
+ depend([<names>])
+ check([<C-booleanexpr>])
+ note(<LaTeX text>)
+ optional, required, external
+NEW: intent(c), threadsafe, fortranname
+
+ *** Calling Fortran 77/90/95 subroutines and functions. Also
+ Fortran 90/95 module subroutines are supported. Internal
+ initialization of optional arguments.
+
+ *** Accessing COMMON blocks from Python.
+NEW: Accessing Fortran 90/95 module data.
+
+ *** Call-back functions: calling Python functions from Fortran with
+ very flexible hooks.
+
+ *** In Python, arguments of the interfaced functions may be of
+ different type - necessary type conversations are done
+ internally in C level.
+
+ *** Automatically generates documentation (__doc__,LaTeX) for
+ interfaced functions.
+
+ *** Automatically generates signature files --- user has full
+ control over the interface constructions. Automatically
+ detects the signatures of call-back functions, solves argument
+ dependencies, etc.
+
+NEW: * Automatically generates setup_<modulename>.py for building
+ extension modules using tools from distutils and
+ fortran_support module (SciPy).
+
+ *** Automatically generates Makefile for compiling Fortran and C
+ codes and linking them to a shared module. Many compilers are
+ supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft
+ F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha
+ Linux, HP-UX, IRIX64.
+
+ *** Complete User's Guide in various formats (html,ps,pdf,dvi).
+
+ *** f2py users list is available for support, feedback, etc.
+
+NEW: * Installation with distutils.
+
+ *** And finally, many bugs are fixed.
+
+More information about f2py, see
+
+ http://cens.ioc.ee/projects/f2py2e/
+
+LICENSE:
+ f2py is released under the LGPL.
+
+Sincerely,
+ Pearu Peterson <pearu@cens.ioc.ee>
+ December 4, 2001
+
+<P><A HREF="http://cens.ioc.ee/projects/f2py2e/">f2py 2.3.321</A> - The
+Fortran to Python Interface Generator (04-Dec-01)
diff --git a/numpy/f2py/doc/Release-4.x.txt b/numpy/f2py/doc/Release-4.x.txt
new file mode 100644
index 000000000..ed071a0cb
--- /dev/null
+++ b/numpy/f2py/doc/Release-4.x.txt
@@ -0,0 +1,91 @@
+
+F2PY - Fortran to Python Interface Generator
+
+I am pleased to announce the fourth public release of f2py
+(version 2.4.366):
+
+ http://cens.ioc.ee/projects/f2py2e/
+
+f2py is a command line tool for binding Python and Fortran codes. It
+scans Fortran 77/90/95 codes and generates a Python C/API module that
+makes it possible to call Fortran subroutines from Python. No Fortran or
+C expertise is required for using this tool.
+
+New features:
+ *** Win32 support.
+ *** Better Python C/API generated code (-Wall is much less verbose).
+
+Features include:
+
+ *** All basic Fortran types are supported:
+ integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ],
+ character[ | *(*) | *1 | *2 | *3 | ... ]
+ real[ | *4 | *8 | *16 ], double precision,
+ complex[ | *8 | *16 | *32 ]
+
+ *** Multi-dimensional arrays of (almost) all basic types.
+ Dimension specifications:
+ <dim> | <start>:<end> | * | :
+
+ *** Supported attributes and statements:
+ intent([ in | inout | out | hide | in,out | inout,out ])
+ dimension(<dimspec>)
+ depend([<names>])
+ check([<C-booleanexpr>])
+ note(<LaTeX text>)
+ optional, required, external
+ intent(c), threadsafe, fortranname
+
+ *** Calling Fortran 77/90/95 subroutines and functions. Also
+ Fortran 90/95 module subroutines are supported. Internal
+ initialization of optional arguments.
+
+ *** Accessing COMMON blocks from Python.
+ Accessing Fortran 90/95 module data.
+
+ *** Call-back functions: calling Python functions from Fortran with
+ very flexible hooks.
+
+ *** In Python, arguments of the interfaced functions may be of
+ different type - necessary type conversations are done
+ internally in C level.
+
+ *** Automatically generates documentation (__doc__,LaTeX) for
+ interfaced functions.
+
+ *** Automatically generates signature files --- user has full
+ control over the interface constructions. Automatically
+ detects the signatures of call-back functions, solves argument
+ dependencies, etc.
+
+ *** Automatically generates setup_<modulename>.py for building
+ extension modules using tools from distutils and
+ fortran_support module (SciPy).
+
+ *** Automatically generates Makefile for compiling Fortran and C
+ codes and linking them to a shared module. Many compilers are
+ supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft
+ F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha
+ Linux, HP-UX, IRIX64.
+
+ *** Complete User's Guide in various formats (html,ps,pdf,dvi).
+
+ *** f2py users list is available for support, feedback, etc.
+
+ *** Installation with distutils.
+
+ *** And finally, many bugs are fixed.
+
+More information about f2py, see
+
+ http://cens.ioc.ee/projects/f2py2e/
+
+LICENSE:
+ f2py is released under the LGPL.
+
+Sincerely,
+ Pearu Peterson <pearu@cens.ioc.ee>
+ December 17, 2001
+
+<P><A HREF="http://cens.ioc.ee/projects/f2py2e/">f2py 2.4.366</A> - The
+Fortran to Python Interface Generator (17-Dec-01)
diff --git a/numpy/f2py/doc/apps.tex b/numpy/f2py/doc/apps.tex
new file mode 100644
index 000000000..513c048bd
--- /dev/null
+++ b/numpy/f2py/doc/apps.tex
@@ -0,0 +1,71 @@
+
+\section{Applications}
+\label{sec:apps}
+
+
+\subsection{Example: wrapping C library \texttt{fftw}}
+\label{sec:wrapfftw}
+
+Here follows a simple example how to use \fpy to generate a wrapper
+for C functions. Let us create a FFT code using the functions in FFTW
+library. I'll assume that the library \texttt{fftw} is configured with
+\texttt{-{}-enable-shared} option.
+
+Here is the wrapper for the typical usage of FFTW:
+\begin{verbatim}
+/* File: wrap_dfftw.c */
+#include <dfftw.h>
+
+extern void dfftw_one(fftw_complex *in,fftw_complex *out,int *n) {
+ fftw_plan p;
+ p = fftw_create_plan(*n,FFTW_FORWARD,FFTW_ESTIMATE);
+ fftw_one(p,in,out);
+ fftw_destroy_plan(p);
+}
+\end{verbatim}
+and here follows the corresponding siganture file (created manually):
+\begin{verbatim}
+!%f90
+! File: fftw.f90
+module fftw
+ interface
+ subroutine dfftw_one(in,out,n)
+ integer n
+ complex*16 in(n),out(n)
+ intent(out) out
+ intent(hide) n
+ end subroutine dfftw_one
+ end interface
+end module fftw
+\end{verbatim}
+
+Now let us generate the Python C/API module with \fpy:
+\begin{verbatim}
+f2py fftw.f90
+\end{verbatim}
+and compile it
+\begin{verbatim}
+gcc -shared -I/numeric/include -I`f2py -I` -L/numeric/lib -ldfftw \
+ -o fftwmodule.so -DNO_APPEND_FORTRAN fftwmodule.c wrap_dfftw.c
+\end{verbatim}
+
+In Python:
+\begin{verbatim}
+>>> from Numeric import *
+>>> from fftw import *
+>>> print dfftw_one.__doc__
+Function signature:
+ out = dfftw_one(in)
+Required arguments:
+ in : input rank-1 array('D') with bounds (n)
+Return objects:
+ out : rank-1 array('D') with bounds (n)
+>>> print dfftw_one([1,2,3,4])
+[ 10.+0.j -2.+2.j -2.+0.j -2.-2.j]
+>>>
+\end{verbatim}
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "f2py2e"
+%%% End:
diff --git a/numpy/f2py/doc/bugs.tex b/numpy/f2py/doc/bugs.tex
new file mode 100644
index 000000000..699ecf530
--- /dev/null
+++ b/numpy/f2py/doc/bugs.tex
@@ -0,0 +1,109 @@
+
+\section{Bugs, Plans, and Feedback}
+\label{sec:bugs}
+
+Currently no bugs have found that I was not able to fix. I will be
+happy to receive bug reports from you (so that I could fix them and
+keep the first sentence of this paragraph as true as possible ;-).
+Note that \fpy is developed to work properly with gcc/g77
+compilers.
+\begin{description}
+\item[NOTE:] Wrapping callback functions returning \texttt{COMPLEX}
+ may fail on some systems. Workaround: avoid it by using callback
+ subroutines.
+\end{description}
+
+Here follows a list of things that I plan to implement in (near) future:
+\begin{enumerate}
+\item recognize file types by their extension (signatures:
+ \texttt{*.pyf}, Fortran 77, Fortran 90 fixed: \texttt{*.f, *.for, *.F, *.FOR},
+ Fortran 90 free: \texttt{*.F90, *.f90, *.m, *.f95, *.F95}); [DONE]
+\item installation using \texttt{distutils} (when it will be stable);
+\item put out to the web examples of \fpy usages in real situations:
+ wrapping \texttt{vode}, for example;
+\item implement support for \texttt{PARAMETER} statement; [DONE]
+\item rewrite test-site;
+\item ...
+\end{enumerate}
+and here are things that I plan to do in future:
+\begin{enumerate}
+\item implement \texttt{intent(cache)} attribute for an optional work
+ arrays with a feature of allocating additional memory if needed;
+\item use \fpy for wrapping Fortran 90/95 codes. \fpy should scan
+ Fortran 90/95 codes with no problems, what needs to be done is find
+ out how to call a Fortran 90/95 function (from a module) from
+ C. Anybody there willing to test \fpy with Fortran 90/95 modules? [DONE]
+\item implement support for Fortran 90/95 module data; [DONE]
+\item implement support for \texttt{BLOCK DATA} blocks (if needed);
+\item test/document \fpy for \texttt{CHARACTER} arrays;
+\item decide whether internal transposition of multi-dimensional
+ arrays is reasonable (need efficient code then), even if this is
+ controlled by the user trough some additional keyword; need
+ consistent and safe policy here;
+\item use \fpy for generating wrapper functions also for C programs (a
+ kind of SWIG, only between Python and C). For that \fpy needs a
+ command line switch to inform itself that C scalars are passed in by
+ their value, not by their reference, for instance;
+\item introduce a counter that counts the number of inefficient usages
+ of wrapper functions (copying caused by type-casting, non-contiguous
+ arrays);
+\item if needed, make \texttt{DATA} statement to work properly for
+ arrays;
+\item rewrite \texttt{COMMON} wrapper; [DONE]
+\item ...
+\end{enumerate}
+I'll appreciate any feedback that will improve \fpy (bug reports,
+suggestions, etc). If you find a correct Fortran code that fails with
+\fpy, try to send me a minimal version of it so that I could track
+down the cause of the failure. Note also that there is no sense to
+send me files that are auto-generated with \fpy (I can generate them
+myself); the version of \fpy that you are using (run \texttt{\fpy\
+ -v}), and the relevant fortran codes or modified signature files
+should be enough information to fix the bugs. Also add some
+information on compilers and linkers that you use to the bug report.
+
+
+\section{History of \fpy}
+\label{sec:history}
+
+\begin{enumerate}
+\item I was driven to start developing a tool such as \fpy after I had
+ wrote several Python C/API modules for interfacing various Fortran
+ routines from the Netlib. This work was tedious (some of functions
+ had more than 20 arguments, only few of them made sense for the
+ problems that they solved). I realized that most of the writing
+ could be done automatically.
+\item On 9th of July, 1999, the first lines of the tool was written. A
+ prototype of the tool was ready to use in only three weeks. During
+ this time Travis Oliphant joined to the project and shared his
+ valuable knowledge and experience; the call-back mechanism is his
+ major contribution. Then I gave the tool to public under the name
+ FPIG --- \emph{Fortran to Python Interface Generator}. The tool contained
+ only one file \texttt{f2py.py}.
+\item By autumn, it was clear that a better implementation was needed
+ as the debugging process became very tedious. So, I reserved some
+ time and rewrote the tool from scratch. The most important result of
+ this rewriting was the code that reads real Fortran codes and
+ determines the signatures of the Fortran routines. The main
+ attention was payed in particular to this part so that the tool
+ could read arbitrary Fortran~77/90/95 codes. As a result, the other
+ side of the tools task, that is, generating Python C/API functions,
+ was not so great. In public, this version of the tool was called
+ \texttt{f2py2e} --- \emph{Fortran to Python C/API generator, the
+ Second Edition}.
+\item So, a month before The New Year 2000, I started the third
+ iteration of the \fpy development. Now the main attention was to
+ have a good C/API module constructing code. By 21st of January,
+ 2000, the tool of generating wrapper functions for Fortran routines
+ was ready. It had many new features and was more robust than ever.
+\item In 25th of January, 2000, the first public release of \fpy was
+ announced (version 1.116).
+\item In 12th of September, 2000, the second public release of \fpy was
+ announced (version 2.264). It now has among other changes a support
+ for Fortran 90/95 module routines.
+\end{enumerate}
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "f2py2e"
+%%% End:
diff --git a/numpy/f2py/doc/collectinput.py b/numpy/f2py/doc/collectinput.py
new file mode 100755
index 000000000..c2ce2bf89
--- /dev/null
+++ b/numpy/f2py/doc/collectinput.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+"""
+collectinput - Collects all files that are included to a main Latex document
+ with \input or \include commands. These commands must be
+ in separate lines.
+
+Copyright 1999 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+Pearu Peterson
+
+Usage:
+ collectinput <infile> <outfile>
+ collectinput <infile> # <outfile>=inputless_<infile>
+ collectinput # in and out are stdin and stdout
+"""
+
+__version__ = "0.0"
+
+stdoutflag=0
+import sys,os,string,fileinput,re,commands
+
+try: fn=sys.argv[2]
+except:
+ try: fn='inputless_'+sys.argv[1]
+ except: stdoutflag=1
+try: fi=sys.argv[1]
+except: fi=()
+if not stdoutflag:
+ sys.stdout=open(fn,'w')
+
+nonverb=r'[\w\s\\&=\^\*\.\{\(\)\[\?\+\$/]*(?!\\verb.)'
+input=re.compile(nonverb+r'\\(input|include)\*?\s*\{?.*}?')
+comment=re.compile(r'[^%]*%')
+
+for l in fileinput.input(fi):
+ l=l[:-1]
+ l1=''
+ if comment.match(l):
+ m=comment.match(l)
+ l1=l[m.end()-1:]
+ l=l[:m.end()-1]
+ m=input.match(l)
+ if m:
+ l=string.strip(l)
+ if l[-1]=='}': l=l[:-1]
+ i=m.end()-2
+ sys.stderr.write('>>>>>>')
+ while i>-1 and (l[i] not in [' ','{']): i=i-1
+ if i>-1:
+ fn=l[i+1:]
+ try: f=open(fn,'r'); flag=1; f.close()
+ except:
+ try: f=open(fn+'.tex','r'); flag=1;fn=fn+'.tex'; f.close()
+ except: flag=0
+ if flag==0:
+ sys.stderr.write('Could not open a file: '+fn+'\n')
+ print l+l1
+ continue
+ elif flag==1:
+ sys.stderr.write(fn+'\n')
+ print '%%%%% Begin of '+fn
+ print commands.getoutput(sys.argv[0]+' < '+fn)
+ print '%%%%% End of '+fn
+ else:
+ sys.stderr.write('Could not extract a file name from: '+l)
+ print l+l1
+ else:
+ print l+l1
+sys.stdout.close()
+
+
+
diff --git a/numpy/f2py/doc/commands.tex b/numpy/f2py/doc/commands.tex
new file mode 100644
index 000000000..5101a9ff5
--- /dev/null
+++ b/numpy/f2py/doc/commands.tex
@@ -0,0 +1,20 @@
+\usepackage{xspace}
+\usepackage{verbatim}
+
+%%tth:\newcommand{\xspace}{ }
+
+\newcommand{\fpy}{\texttt{f2py}\xspace}
+
+\newcommand{\bs}{\symbol{`\\}}
+% need bs here:
+%%tth:\newcommand{\bs}{\texttt{<backslash>}}
+
+\newcommand{\shell}[1]{\hspace*{1em}\texttt{sh> \begin{minipage}[t]{0.8\textwidth}#1\end{minipage}}}
+
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "f2py2e"
+%%% End:
+
+
diff --git a/numpy/f2py/doc/ex1/arr.f b/numpy/f2py/doc/ex1/arr.f
new file mode 100644
index 000000000..c4e49988f
--- /dev/null
+++ b/numpy/f2py/doc/ex1/arr.f
@@ -0,0 +1,4 @@
+ subroutine arr(l,m,n,a)
+ integer l,m,n
+ real*8 a(l,m,n)
+ end
diff --git a/numpy/f2py/doc/ex1/bar.f b/numpy/f2py/doc/ex1/bar.f
new file mode 100644
index 000000000..c723b5af1
--- /dev/null
+++ b/numpy/f2py/doc/ex1/bar.f
@@ -0,0 +1,4 @@
+ function bar(a,b)
+ integer a,b,bar
+ bar = a + b
+ end
diff --git a/numpy/f2py/doc/ex1/foo.f b/numpy/f2py/doc/ex1/foo.f
new file mode 100644
index 000000000..cdcac4103
--- /dev/null
+++ b/numpy/f2py/doc/ex1/foo.f
@@ -0,0 +1,5 @@
+ subroutine foo(a)
+ integer a
+cf2py intent(in,out) :: a
+ a = a + 5
+ end
diff --git a/numpy/f2py/doc/ex1/foobar-smart.f90 b/numpy/f2py/doc/ex1/foobar-smart.f90
new file mode 100644
index 000000000..61385a685
--- /dev/null
+++ b/numpy/f2py/doc/ex1/foobar-smart.f90
@@ -0,0 +1,24 @@
+!%f90
+module foobar ! in
+ note(This module contains two examples that are used in &
+ \texttt{f2py} documentation.) foobar
+ interface ! in :foobar
+ subroutine foo(a) ! in :foobar:foo.f
+ note(Example of a wrapper function of a Fortran subroutine.) foo
+ integer intent(inout),&
+ note(5 is added to the variable {{}\verb@a@{}} ``in place''.) :: a
+ end subroutine foo
+ function bar(a,b) result (ab) ! in :foobar:bar.f
+ integer :: a
+ integer :: b
+ integer :: ab
+ note(The first value.) a
+ note(The second value.) b
+ note(Add two values.) bar
+ note(The result.) ab
+ end function bar
+ end interface
+end module foobar
+
+! This file was auto-generated with f2py (version:0.95).
+! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/numpy/f2py/doc/ex1/foobar.f90 b/numpy/f2py/doc/ex1/foobar.f90
new file mode 100644
index 000000000..53ac5b506
--- /dev/null
+++ b/numpy/f2py/doc/ex1/foobar.f90
@@ -0,0 +1,16 @@
+!%f90
+module foobar ! in
+ interface ! in :foobar
+ subroutine foo(a) ! in :foobar:foo.f
+ integer intent(inout) :: a
+ end subroutine foo
+ function bar(a,b) ! in :foobar:bar.f
+ integer :: a
+ integer :: b
+ integer :: bar
+ end function bar
+ end interface
+end module foobar
+
+! This file was auto-generated with f2py (version:0.95).
+! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/numpy/f2py/doc/ex1/foobarmodule.tex b/numpy/f2py/doc/ex1/foobarmodule.tex
new file mode 100644
index 000000000..32411ec03
--- /dev/null
+++ b/numpy/f2py/doc/ex1/foobarmodule.tex
@@ -0,0 +1,36 @@
+% This file is auto-generated with f2py (version:2.266)
+\section{Module \texttt{foobar}}
+
+This module contains two examples that are used in \texttt{f2py} documentation.
+
+\subsection{Wrapper function \texttt{foo}}
+
+
+\noindent{{}\verb@foo@{}}\texttt{(a)}
+--- Example of a wrapper function of a Fortran subroutine.
+
+\noindent Required arguments:
+\begin{description}
+\item[]{{}\verb@a : in/output rank-0 array(int,'i')@{}}
+--- 5 is added to the variable {{}\verb@a@{}} ``in place''.
+\end{description}
+
+\subsection{Wrapper function \texttt{bar}}
+
+
+\noindent{{}\verb@bar = bar@{}}\texttt{(a, b)}
+--- Add two values.
+
+\noindent Required arguments:
+\begin{description}
+\item[]{{}\verb@a : input int@{}}
+--- The first value.
+\item[]{{}\verb@b : input int@{}}
+--- The second value.
+\end{description}
+\noindent Return objects:
+\begin{description}
+\item[]{{}\verb@bar : int@{}}
+--- See elsewhere.
+\end{description}
+
diff --git a/numpy/f2py/doc/ex1/runme b/numpy/f2py/doc/ex1/runme
new file mode 100755
index 000000000..2aac6158e
--- /dev/null
+++ b/numpy/f2py/doc/ex1/runme
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+f2py2e='python ../../f2py2e.py'
+PYINC=`$f2py2e -pyinc`
+$f2py2e foobar-smart.pyf --short-latex --overwrite-makefile -makefile foo.f bar.f
+gmake -f Makefile-foobar
+#gcc -O3 -I$PYINC -I$PYINC/Numeric -shared -o foobarmodule.so foobarmodule.c foo.f bar.f
+python -c '
+import foobar
+print foobar.__doc__
+print foobar.bar(2,3)
+from Numeric import *
+a=array(3)
+print a,foobar.foo(a),a
+print foobar.foo.__doc__
+print foobar.bar.__doc__
+print "ok"
+'
diff --git a/numpy/f2py/doc/f2py2e.tex b/numpy/f2py/doc/f2py2e.tex
new file mode 100644
index 000000000..6e3e9d68c
--- /dev/null
+++ b/numpy/f2py/doc/f2py2e.tex
@@ -0,0 +1,50 @@
+\documentclass{article}
+\usepackage{a4wide}
+
+\input commands
+
+\title{\fpy\\Fortran to Python Interface Generator\\{\large Second Edition}}
+\author{Pearu Peterson \texttt{<pearu@ioc.ee>}}
+\date{$Revision: 1.16 $\\\today}
+\begin{document}
+\special{html: <font size=-1>If equations does not show Greek letters or large
+ brackets correctly, then your browser configuration needs some
+ adjustment. Read the notes for <A
+ href=http://hutchinson.belmont.ma.us/tth/Xfonts.html>Enabling Symbol
+ Fonts in Netscape under X </A>. In addition, the browser must be set
+ to use document fonts. </font>
+}
+
+\maketitle
+\begin{abstract}
+ \fpy is a Python program that generates Python C/API modules for
+ wrapping Fortran~77/90/95 codes to Python. The user can influence the
+ process by modifying the signature files that \fpy generates when
+ scanning the Fortran codes. This document describes the syntax of
+ the signature files and the ways how the user can dictate the tool
+ to produce wrapper functions with desired Python signatures. Also
+ how to call the wrapper functions from Python is discussed.
+
+ See \texttt{http://cens.ioc.ee/projects/f2py2e/} for updates of this
+ document and the tool.
+\end{abstract}
+
+\tableofcontents
+
+\input intro
+\input signaturefile
+\input notes
+\input options
+\input bugs
+
+\appendix
+\input ex1/foobarmodule
+\input apps
+\end{document}
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: t
+%%% End:
+
+
diff --git a/numpy/f2py/doc/f2python9-final/README.txt b/numpy/f2py/doc/f2python9-final/README.txt
new file mode 100644
index 000000000..b907216b6
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/README.txt
@@ -0,0 +1,38 @@
+
+This directory contains the source of the paper
+
+ "Fortran to Python Interface Generator with an Application
+ to Aerospace Engineering"
+
+by
+ Pearu Peterson <pearu@cens.ioc.ee> (the corresponding author)
+ Joaquim R. R. A. Martins <joaquim.martins@stanford.edu>
+ Juan J. Alonso <jjalonso@stanford.edu>
+
+for The 9th International Python Conference, March 5-8, 2001, Long Beach, California.
+
+The paper is provided here is in the HTML format:
+
+ f2python9.html (size=48151 bytes)
+
+Note that this file includes the following JPG images
+
+ flow.jpg (size=13266)
+ structure.jpg (size=17860)
+ aerostructure.jpg (size=72247)
+
+PS:
+The HTML file f2python9.html is generated using TTH (http://hutchinson.belmont.ma.us/tth/)
+from the LaTeX source file `python9.tex'. The source can be found in the
+ src/
+directory. This directory contains also the following EPS files
+ flow.eps
+ structure.eps
+ aerostructure.eps
+and the text files
+ examples/{exp1.f,exp1mess.txt,exp1session.txt,foo.pyf,foom.pyf}
+that are used by the LaTeX source python9.tex.
+
+Regards,
+ Pearu
+January 15, 2001
diff --git a/numpy/f2py/doc/f2python9-final/aerostructure.jpg b/numpy/f2py/doc/f2python9-final/aerostructure.jpg
new file mode 100644
index 000000000..896ad6e12
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/aerostructure.jpg
Binary files differ
diff --git a/numpy/f2py/doc/f2python9-final/flow.jpg b/numpy/f2py/doc/f2python9-final/flow.jpg
new file mode 100644
index 000000000..cfe0f85f3
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/flow.jpg
Binary files differ
diff --git a/numpy/f2py/doc/f2python9-final/mk_html.sh b/numpy/f2py/doc/f2python9-final/mk_html.sh
new file mode 100755
index 000000000..944110e93
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/mk_html.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+cd src
+
+test -f aerostructure.eps || convert ../aerostructure.jpg aerostructure.eps
+test -f flow.eps || convert ../flow.jpg flow.eps
+test -f structure.eps || convert ../structure.jpg structure.eps
+
+latex python9.tex
+latex python9.tex
+latex python9.tex
+
+test `which tth` && cat python9.tex | sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | tth -Lpython9 -i > ../f2python9.html
+cd ..
diff --git a/numpy/f2py/doc/f2python9-final/mk_pdf.sh b/numpy/f2py/doc/f2python9-final/mk_pdf.sh
new file mode 100755
index 000000000..b773028b7
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/mk_pdf.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+cd src
+
+test -f aerostructure.pdf || convert ../aerostructure.jpg aerostructure.pdf
+test -f flow.pdf || convert ../flow.jpg flow.pdf
+test -f structure.pdf || convert ../structure.jpg structure.pdf
+
+cat python9.tex | sed -e "s/eps,/pdf,/g" > python9pdf.tex
+pdflatex python9pdf.tex
+pdflatex python9pdf.tex
+pdflatex python9pdf.tex
+
+mv python9pdf.pdf ../f2python9.pdf \ No newline at end of file
diff --git a/numpy/f2py/doc/f2python9-final/mk_ps.sh b/numpy/f2py/doc/f2python9-final/mk_ps.sh
new file mode 100755
index 000000000..4b0863fcd
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/mk_ps.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+cd src
+
+test -f aerostructure.eps || convert ../aerostructure.jpg aerostructure.eps
+test -f flow.eps || convert ../flow.jpg flow.eps
+test -f structure.eps || convert ../structure.jpg structure.eps
+
+latex python9.tex
+latex python9.tex
+latex python9.tex
+
+dvips python9.dvi -o ../f2python9.ps
+cd ..
+gzip -f f2python9.ps
diff --git a/numpy/f2py/doc/f2python9-final/src/examples/exp1.f b/numpy/f2py/doc/f2python9-final/src/examples/exp1.f
new file mode 100644
index 000000000..36bee50b0
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/src/examples/exp1.f
@@ -0,0 +1,26 @@
+ subroutine exp1(l,u,n)
+C Input: n is number of iterations
+C Output: l,u are such that
+C l(1)/l(2) < exp(1) < u(1)/u(2)
+C
+Cf2py integer*4 :: n = 1
+Cf2py intent(out) l,u
+ integer*4 n,i
+ real*8 l(2),u(2),t,t1,t2,t3,t4
+ l(2) = 1
+ l(1) = 0
+ u(2) = 0
+ u(1) = 1
+ do 10 i=0,n
+ t1 = 4 + 32*(1+i)*i
+ t2 = 11 + (40+32*i)*i
+ t3 = 3 + (24+32*i)*i
+ t4 = 8 + 32*(1+i)*i
+ t = u(1)
+ u(1) = l(1)*t1 + t*t2
+ l(1) = l(1)*t3 + t*t4
+ t = u(2)
+ u(2) = l(2)*t1 + t*t2
+ l(2) = l(2)*t3 + t*t4
+ 10 continue
+ end
diff --git a/numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt b/numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt
new file mode 100644
index 000000000..ae1545718
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt
@@ -0,0 +1,17 @@
+Reading fortran codes...
+ Reading file 'exp1.f'
+Post-processing...
+ Block: foo
+ Block: exp1
+Creating 'Makefile-foo'...
+ Linker: ld ('GNU ld' 2.9.5)
+ Fortran compiler: f77 ('g77 2.x.x' 2.95.2)
+ C compiler: cc ('gcc 2.x.x' 2.95.2)
+Building modules...
+ Building module "foo"...
+ Constructing wrapper function "exp1"...
+ l,u = exp1([n])
+ Wrote C/API module "foo" to file "foomodule.c"
+ Documentation is saved to file "foomodule.tex"
+Run GNU make to build shared modules:
+ gmake -f Makefile-<modulename> [test]
diff --git a/numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt b/numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt
new file mode 100644
index 000000000..9bec9198e
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt
@@ -0,0 +1,20 @@
+>>> import foo,Numeric
+>>> print foo.exp1.__doc__
+exp1 - Function signature:
+ l,u = exp1([n])
+Optional arguments:
+ n := 1 input int
+Return objects:
+ l : rank-1 array('d') with bounds (2)
+ u : rank-1 array('d') with bounds (2)
+
+>>> l,u = foo.exp1()
+>>> print l,u
+[ 1264. 465.] [ 1457. 536.]
+>>> print l[0]/l[1], u[0]/u[1]-l[0]/l[1]
+2.71827956989 2.25856657199e-06
+>>> l,u = foo.exp1(2)
+>>> print l,u
+[ 517656. 190435.] [ 566827. 208524.]
+>>> print l[0]/l[1], u[0]/u[1]-l[0]/l[1]
+2.71828182845 1.36437527942e-11 \ No newline at end of file
diff --git a/numpy/f2py/doc/f2python9-final/src/examples/foo.pyf b/numpy/f2py/doc/f2python9-final/src/examples/foo.pyf
new file mode 100644
index 000000000..516bb292f
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/src/examples/foo.pyf
@@ -0,0 +1,13 @@
+!%f90 -*- f90 -*-
+python module foo
+ interface
+ subroutine exp1(l,u,n)
+ real*8 dimension(2) :: l
+ real*8 dimension(2) :: u
+ integer*4 :: n
+ end subroutine exp1
+ end interface
+end python module foo
+! This file was auto-generated with f2py
+! (version:2.298).
+! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/numpy/f2py/doc/f2python9-final/src/examples/foom.pyf b/numpy/f2py/doc/f2python9-final/src/examples/foom.pyf
new file mode 100644
index 000000000..6392ebc95
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/src/examples/foom.pyf
@@ -0,0 +1,14 @@
+!%f90 -*- f90 -*-
+python module foo
+ interface
+ subroutine exp1(l,u,n)
+ real*8 dimension(2) :: l
+ real*8 dimension(2) :: u
+ intent(out) l,u
+ integer*4 optional :: n = 1
+ end subroutine exp1
+ end interface
+end python module foo
+! This file was auto-generated with f2py
+! (version:2.298) and modified by pearu.
+! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/numpy/f2py/doc/f2python9-final/structure.jpg b/numpy/f2py/doc/f2python9-final/structure.jpg
new file mode 100644
index 000000000..9aa691339
--- /dev/null
+++ b/numpy/f2py/doc/f2python9-final/structure.jpg
Binary files differ
diff --git a/numpy/f2py/doc/fortranobject.tex b/numpy/f2py/doc/fortranobject.tex
new file mode 100644
index 000000000..dbb244cdd
--- /dev/null
+++ b/numpy/f2py/doc/fortranobject.tex
@@ -0,0 +1,574 @@
+\documentclass{article}
+
+\headsep=0pt
+\topmargin=0pt
+\headheight=0pt
+\oddsidemargin=0pt
+\textwidth=6.5in
+\textheight=9in
+
+\usepackage{xspace}
+\usepackage{verbatim}
+\newcommand{\fpy}{\texttt{f2py}\xspace}
+\newcommand{\bs}{\symbol{`\\}}
+\newcommand{\email}[1]{\special{html:<A href="mailto:#1">}\texttt{<#1>}\special{html:</A>}}
+\title{\texttt{PyFortranObject} --- example usages}
+\author{
+\large Pearu Peterson\\
+\small \email{pearu@cens.ioc.ee}
+}
+
+\begin{document}
+
+\maketitle
+
+\special{html: Other formats of this document:
+<A href=pyfobj.ps.gz>Gzipped PS</A>,
+<A href=pyfobj.pdf>PDF</A>
+}
+
+\tableofcontents
+
+\section{Introduction}
+\label{sec:intro}
+
+Fortran language defines the following concepts that we would like to
+access from Python: functions, subroutines, data in \texttt{COMMON} blocks,
+F90 module functions and subroutines, F90 module data (both static and
+allocatable arrays).
+
+In the following we shall assume that we know the signatures (full
+specifications of routine arguments and variables) of these concepts
+from their Fortran source codes. Now, in order to call or use them
+from C, one needs to have pointers to the corresponding objects. The
+pointers to Fortran 77 objects (routines, data in \texttt{COMMON}
+blocks) are readily available to C codes (there are various sources
+available about mixing Fortran 77 and C codes). On the other hand, F90
+module specifications are highly compiler dependent and sometimes it
+is not even possible to access F90 module objects from C (at least,
+not directly, see remark about MIPSPro 7 Compilers). But using some
+tricks (described below), the pointers to F90 module objects can be
+determined in runtime providing a compiler independent solution.
+
+To use Fortran objects from Python in unified manner, \fpy introduces
+\texttt{PyFortranObject} to hold pointers of the Fortran objects and
+the corresponing wrapper functions. In fact, \texttt{PyFortranObject}
+does much more: it generates documentation strings in run-time (for
+items in \texttt{COMMON} blocks and data in F90 modules), provides
+methods for accessing Fortran data and for calling Fortran routines,
+etc.
+
+\section{\texttt{PyFortranObject}}
+\label{sec:pyfortobj}
+
+\texttt{PyFortranObject} is defined as follows
+\begin{verbatim}
+typedef struct {
+ PyObject_HEAD
+ int len; /* Number of attributes */
+ FortranDataDef *defs; /* An array of FortranDataDef's */
+ PyObject *dict; /* Fortran object attribute dictionary */
+} PyFortranObject;
+\end{verbatim}
+where \texttt{FortranDataDef} is
+\begin{verbatim}
+typedef struct {
+ char *name; /* attribute (array||routine) name */
+ int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS,
+ || rank=-1 for Fortran routine */
+ struct {int d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */
+ int type; /* PyArray_<type> || not used */
+ char *data; /* pointer to array || Fortran routine */
+ void (*func)(); /* initialization function for
+ allocatable arrays:
+ func(&rank,dims,set_ptr_func,name,len(name))
+ || C/API wrapper for Fortran routine */
+ char *doc; /* documentation string; only recommended
+ for routines. */
+} FortranDataDef;
+\end{verbatim}
+In the following we demonstrate typical usages of
+\texttt{PyFortranObject}. Just relevant code fragments will be given.
+
+
+\section{Fortran 77 subroutine}
+\label{sec:f77subrout}
+
+Consider Fortran 77 subroutine
+\begin{verbatim}
+subroutine bar()
+end
+\end{verbatim}
+The corresponding \texttt{PyFortranObject} is defined in C as follows:
+\begin{verbatim}
+static char doc_bar[] = "bar()";
+static PyObject *c_bar(PyObject *self, PyObject *args,
+ PyObject *keywds, void (*f2py_func)()) {
+ static char *capi_kwlist[] = {NULL};
+ if (!PyArg_ParseTupleAndKeywords(args,keywds,"|:bar",capi_kwlist))
+ return NULL;
+ (*f2py_func)();
+ return Py_BuildValue("");
+}
+extern void F_FUNC(bar,BAR)();
+static FortranDataDef f2py_routines_def[] = {
+ {"bar",-1, {-1}, 0, (char *)F_FUNC(bar,BAR),(void*)c_bar,doc_bar},
+ {NULL}
+};
+void initfoo() {
+ <snip>
+ d = PyModule_GetDict(m);
+ PyDict_SetItemString(d, f2py_routines_def[0].name,
+ PyFortranObject_NewAsAttr(&f2py_routines_def[0]));
+}
+\end{verbatim}
+where CPP macro \texttt{F\_FUNC} defines how Fortran 77 routines are
+seen in C.
+In Python, Fortran subroutine \texttt{bar} is called as follows
+\begin{verbatim}
+>>> import foo
+>>> foo.bar()
+\end{verbatim}
+
+\section{Fortran 77 function}
+\label{sec:f77func}
+Consider Fortran 77 function
+\begin{verbatim}
+function bar()
+complex bar
+end
+\end{verbatim}
+The corresponding \texttt{PyFortranObject} is defined in C as in
+previous example but with the following changes:
+\begin{verbatim}
+static char doc_bar[] = "bar = bar()";
+static PyObject *c_bar(PyObject *self, PyObject *args,
+ PyObject *keywds, void (*f2py_func)()) {
+ complex_float bar;
+ static char *capi_kwlist[] = {NULL};
+ if (!PyArg_ParseTupleAndKeywords(args,keywds,"|:bar",capi_kwlist))
+ return NULL;
+ (*f2py_func)(&bar);
+ return Py_BuildValue("O",pyobj_from_complex_float1(bar));
+}
+extern void F_WRAPPEDFUNC(bar,BAR)();
+static FortranDataDef f2py_routines_def[] = {
+ {"bar",-1,{-1},0,(char *)F_WRAPPEDFUNC(bar,BAR),(void *)c_bar,doc_bar},
+ {NULL}
+};
+\end{verbatim}
+where CPP macro \texttt{F\_WRAPPEDFUNC} gives the pointer to the following
+Fortran 77 subroutine:
+\begin{verbatim}
+subroutine f2pywrapbar (barf2pywrap)
+external bar
+complex bar, barf2pywrap
+barf2pywrap = bar()
+end
+\end{verbatim}
+With these hooks, calling Fortran functions returning composed types
+becomes platform/compiler independent.
+
+
+\section{\texttt{COMMON} block data}
+\label{sec:commondata}
+
+Consider Fortran 77 \texttt{COMMON} block
+\begin{verbatim}
+integer i
+COMMON /bar/ i
+\end{verbatim}
+In order to access the variable \texttt{i} from Python,
+\texttt{PyFortranObject} is defined as follows:
+\begin{verbatim}
+static FortranDataDef f2py_bar_def[] = {
+ {"i",0,{-1},PyArray_INT},
+ {NULL}
+};
+static void f2py_setup_bar(char *i) {
+ f2py_bar_def[0].data = i;
+}
+extern void F_FUNC(f2pyinitbar,F2PYINITBAR)();
+static void f2py_init_bar() {
+ F_FUNC(f2pyinitbar,F2PYINITBAR)(f2py_setup_bar);
+}
+void initfoo() {
+ <snip>
+ PyDict_SetItemString(d, "bar", PyFortranObject_New(f2py_bar_def,f2py_init_bar));
+}
+\end{verbatim}
+where auxiliary Fortran function \texttt{f2pyinitbar} is defined as follows
+\begin{verbatim}
+subroutine f2pyinitbar(setupfunc)
+external setupfunc
+integer i
+common /bar/ i
+call setupfunc(i)
+end
+\end{verbatim}
+and it is called in \texttt{PyFortranObject\_New}.
+
+
+\section{Fortran 90 module subroutine}
+\label{sec:f90modsubrout}
+
+Consider
+\begin{verbatim}
+module fun
+ subroutine bar()
+ end subroutine bar
+end module fun
+\end{verbatim}
+\texttt{PyFortranObject} is defined as follows
+\begin{verbatim}
+static char doc_fun_bar[] = "fun.bar()";
+static PyObject *c_fun_bar(PyObject *self, PyObject *args,
+ PyObject *keywds, void (*f2py_func)()) {
+ static char *kwlist[] = {NULL};
+ if (!PyArg_ParseTupleAndKeywords(args,keywds,"",kwlist))
+ return NULL;
+ (*f2py_func)();
+ return Py_BuildValue("");
+}
+static FortranDataDef f2py_fun_def[] = {
+ {"bar",-1,{-1},0,NULL,(void *)c_fun_bar,doc_fun_bar},
+ {NULL}
+};
+static void f2py_setup_fun(char *bar) {
+ f2py_fun_def[0].data = bar;
+}
+extern void F_FUNC(f2pyinitfun,F2PYINITFUN)();
+static void f2py_init_fun() {
+ F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun);
+}
+void initfoo () {
+ <snip>
+ PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun));
+}
+\end{verbatim}
+where auxiliary Fortran function \texttt{f2pyinitfun} is defined as
+follows
+\begin{verbatim}
+subroutine f2pyinitfun(f2pysetupfunc)
+use fun
+external f2pysetupfunc
+call f2pysetupfunc(bar)
+end subroutine f2pyinitfun
+\end{verbatim}
+The following Python session demonstrates how to call Fortran 90
+module function \texttt{bar}:
+\begin{verbatim}
+>>> import foo
+>>> foo.fun.bar()
+\end{verbatim}
+
+\section{Fortran 90 module function}
+\label{sec:f90modfunc}
+
+Consider
+\begin{verbatim}
+module fun
+ function bar()
+ complex bar
+ end subroutine bar
+end module fun
+\end{verbatim}
+\texttt{PyFortranObject} is defined as follows
+\begin{verbatim}
+static char doc_fun_bar[] = "bar = fun.bar()";
+static PyObject *c_fun_bar(PyObject *self, PyObject *args,
+ PyObject *keywds, void (*f2py_func)()) {
+ complex_float bar;
+ static char *kwlist[] = {NULL};
+ if (!PyArg_ParseTupleAndKeywords(args,keywds,"",kwlist))
+ return NULL;
+ (*f2py_func)(&bar);
+ return Py_BuildValue("O",pyobj_from_complex_float1(bar));
+}
+static FortranDataDef f2py_fun_def[] = {
+ {"bar",-1,{-1},0,NULL,(void *)c_fun_bar,doc_fun_bar},
+ {NULL}
+};
+static void f2py_setup_fun(char *bar) {
+ f2py_fun_def[0].data = bar;
+}
+extern void F_FUNC(f2pyinitfun,F2PYINITFUN)();
+static void f2py_init_fun() {
+ F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun);
+}
+void initfoo() {
+ <snip>
+ PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun));
+}
+\end{verbatim}
+where
+\begin{verbatim}
+subroutine f2pywrap_fun_bar (barf2pywrap)
+use fun
+complex barf2pywrap
+barf2pywrap = bar()
+end
+
+subroutine f2pyinitfun(f2pysetupfunc)
+external f2pysetupfunc,f2pywrap_fun_bar
+call f2pysetupfunc(f2pywrap_fun_bar)
+end
+\end{verbatim}
+
+
+\section{Fortran 90 module data}
+\label{sec:f90moddata}
+
+Consider
+\begin{verbatim}
+module fun
+ integer i
+end module fun
+\end{verbatim}
+Then
+\begin{verbatim}
+static FortranDataDef f2py_fun_def[] = {
+ {"i",0,{-1},PyArray_INT},
+ {NULL}
+};
+static void f2py_setup_fun(char *i) {
+ f2py_fun_def[0].data = i;
+}
+extern void F_FUNC(f2pyinitfun,F2PYINITFUN)();
+static void f2py_init_fun() {
+ F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun);
+}
+void initfoo () {
+ <snip>
+ PyDict_SetItemString(d, "fun",
+ PyFortranObject_New(f2py_fun_def,f2py_init_fun));
+}
+\end{verbatim}
+where
+\begin{verbatim}
+subroutine f2pyinitfun(f2pysetupfunc)
+use fun
+external f2pysetupfunc
+call f2pysetupfunc(i)
+end subroutine f2pyinitfun
+\end{verbatim}
+Example usage in Python:
+\begin{verbatim}
+>>> import foo
+>>> foo.fun.i = 4
+\end{verbatim}
+
+\section{Fortran 90 module allocatable array}
+\label{sec:f90modallocarr}
+
+Consider
+\begin{verbatim}
+module fun
+ real, allocatable :: r(:)
+end module fun
+\end{verbatim}
+Then
+\begin{verbatim}
+static FortranDataDef f2py_fun_def[] = {
+ {"r",1,{-1},PyArray_FLOAT},
+ {NULL}
+};
+static void f2py_setup_fun(void (*r)()) {
+ f2py_fun_def[0].func = r;
+}
+extern void F_FUNC(f2pyinitfun,F2PYINITFUN)();
+static void f2py_init_fun() {
+ F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun);
+}
+void initfoo () {
+ <snip>
+ PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun));
+}
+\end{verbatim}
+where
+\begin{verbatim}
+subroutine f2py_fun_getdims_r(r,s,f2pysetdata)
+use fun, only: d => r
+external f2pysetdata
+logical ns
+integer s(*),r,i,j
+ns = .FALSE.
+if (allocated(d)) then
+ do i=1,r
+ if ((size(d,r-i+1).ne.s(i)).and.(s(i).ge.0)) then
+ ns = .TRUE.
+ end if
+ end do
+ if (ns) then
+ deallocate(d)
+ end if
+end if
+if ((.not.allocated(d)).and.(s(1).ge.1)) then
+ allocate(d(s(1)))
+end if
+if (allocated(d)) then
+ do i=1,r
+ s(i) = size(d,r-i+1)
+ end do
+end if
+call f2pysetdata(d,allocated(d))
+end subroutine f2py_fun_getdims_r
+
+subroutine f2pyinitfun(f2pysetupfunc)
+use fun
+external f2pysetupfunc,f2py_fun_getdims_r
+call f2pysetupfunc(f2py_fun_getdims_r)
+end subroutine f2pyinitfun
+\end{verbatim}
+Usage in Python:
+\begin{verbatim}
+>>> import foo
+>>> foo.fun.r = [1,2,3,4]
+\end{verbatim}
+
+\section{Callback subroutine}
+\label{sec:cbsubr}
+
+Thanks to Travis Oliphant for working out the basic idea of the
+following callback mechanism.
+
+Consider
+\begin{verbatim}
+subroutine fun(bar)
+external bar
+call bar(1)
+end
+\end{verbatim}
+Then
+\begin{verbatim}
+static char doc_foo8_fun[] = "
+Function signature:
+ fun(bar,[bar_extra_args])
+Required arguments:
+ bar : call-back function
+Optional arguments:
+ bar_extra_args := () input tuple
+Call-back functions:
+ def bar(e_1_e): return
+ Required arguments:
+ e_1_e : input int";
+static PyObject *foo8_fun(PyObject *capi_self, PyObject *capi_args,
+ PyObject *capi_keywds, void (*f2py_func)()) {
+ PyObject *capi_buildvalue = NULL;
+ PyObject *bar_capi = Py_None;
+ PyTupleObject *bar_xa_capi = NULL;
+ PyTupleObject *bar_args_capi = NULL;
+ jmp_buf bar_jmpbuf;
+ int bar_jmpbuf_flag = 0;
+ int bar_nofargs_capi = 0;
+ static char *capi_kwlist[] = {"bar","bar_extra_args",NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\
+ "O!|O!:foo8.fun",\
+ capi_kwlist,&PyFunction_Type,&bar_capi,&PyTuple_Type,&bar_xa_capi))
+ goto capi_fail;
+
+ bar_nofargs_capi = cb_bar_in_fun__user__routines_nofargs;
+ if (create_cb_arglist(bar_capi,bar_xa_capi,1,0,
+ &cb_bar_in_fun__user__routines_nofargs,&bar_args_capi)) {
+ if ((PyErr_Occurred())==NULL)
+ PyErr_SetString(foo8_error,"failed in processing argument list for call-back bar." );
+ goto capi_fail;
+ }
+
+ SWAP(bar_capi,cb_bar_in_fun__user__routines_capi,PyObject);
+ SWAP(bar_args_capi,cb_bar_in_fun__user__routines_args_capi,PyTupleObject);
+ memcpy(&bar_jmpbuf,&cb_bar_in_fun__user__routines_jmpbuf,sizeof(jmp_buf));
+ bar_jmpbuf_flag = 1;
+
+ if ((setjmp(cb_bar_in_fun__user__routines_jmpbuf))) {
+ if ((PyErr_Occurred())==NULL)
+ PyErr_SetString(foo8_error,"Failure of a callback function");
+ goto capi_fail;
+ } else
+ (*f2py_func)(cb_bar_in_fun__user__routines);
+
+ capi_buildvalue = Py_BuildValue("");
+capi_fail:
+
+ if (bar_jmpbuf_flag) {
+ cb_bar_in_fun__user__routines_capi = bar_capi;
+ Py_DECREF(cb_bar_in_fun__user__routines_args_capi);
+ cb_bar_in_fun__user__routines_args_capi = bar_args_capi;
+ cb_bar_in_fun__user__routines_nofargs = bar_nofargs_capi;
+ memcpy(&cb_bar_in_fun__user__routines_jmpbuf,&bar_jmpbuf,sizeof(jmp_buf));
+ bar_jmpbuf_flag = 0;
+ }
+ return capi_buildvalue;
+}
+extern void F_FUNC(fun,FUN)();
+static FortranDataDef f2py_routine_defs[] = {
+ {"fun",-1,{-1},0,(char *)F_FUNC(fun,FUN),(void *)foo8_fun,doc_foo8_fun},
+ {NULL}
+};
+void initfoo8 () {
+ <snip>
+ PyDict_SetItemString(d, f2py_routine_defs[0].name,
+ PyFortranObject_NewAsAttr(&f2py_routine_defs[0]));
+}
+\end{verbatim}
+where
+\begin{verbatim}
+PyObject *cb_bar_in_fun__user__routines_capi = Py_None;
+PyTupleObject *cb_bar_in_fun__user__routines_args_capi = NULL;
+int cb_bar_in_fun__user__routines_nofargs = 0;
+jmp_buf cb_bar_in_fun__user__routines_jmpbuf;
+static void cb_bar_in_fun__user__routines (int *e_1_e_cb_capi) {
+ PyTupleObject *capi_arglist = cb_bar_in_fun__user__routines_args_capi;
+ PyObject *capi_return = NULL;
+ PyObject *capi_tmp = NULL;
+ int capi_j,capi_i = 0;
+
+ int e_1_e=(*e_1_e_cb_capi);
+ if (capi_arglist == NULL)
+ goto capi_fail;
+ if (cb_bar_in_fun__user__routines_nofargs>capi_i)
+ if (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_int1(e_1_e)))
+ goto capi_fail;
+
+ capi_return = PyEval_CallObject(cb_bar_in_fun__user__routines_capi,
+ (PyObject *)capi_arglist);
+
+ if (capi_return == NULL)
+ goto capi_fail;
+ if (capi_return == Py_None) {
+ Py_DECREF(capi_return);
+ capi_return = Py_BuildValue("()");
+ }
+ else if (!PyTuple_Check(capi_return)) {
+ capi_tmp = capi_return;
+ capi_return = Py_BuildValue("(O)",capi_tmp);
+ Py_DECREF(capi_tmp);
+ }
+ capi_j = PyTuple_Size(capi_return);
+ capi_i = 0;
+ goto capi_return_pt;
+capi_fail:
+ fprintf(stderr,"Call-back cb_bar_in_fun__user__routines failed.\n");
+ Py_XDECREF(capi_return);
+ longjmp(cb_bar_in_fun__user__routines_jmpbuf,-1);
+capi_return_pt:
+ ;
+}
+\end{verbatim}
+Usage in Python:
+\begin{verbatim}
+>>> import foo8 as foo
+>>> def bar(i): print 'In bar i=',i
+...
+>>> foo.fun(bar)
+In bar i= 1
+\end{verbatim}
+
+\end{document}
+
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: t
+%%% End:
diff --git a/numpy/f2py/doc/index.html b/numpy/f2py/doc/index.html
new file mode 100644
index 000000000..abddd7d43
--- /dev/null
+++ b/numpy/f2py/doc/index.html
@@ -0,0 +1,265 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
+<HTML>
+<HEAD>
+<META name="Author" content="Pearu Peterson">
+<!-- You may add here some keywords (comma separeted list) -->
+<META name="Keywords" content="fortran,python,interface,f2py,f2py2e,wrapper,fpig">
+<TITLE>F2PY - Fortran to Python Interface Generator</TITLE>
+<LINK rel="stylesheet" type="text/css" href="/styles/userstyle.css">
+</HEAD>
+
+<BODY>
+<!-- Begin of user text -->
+<H1>F2PY &shy; Fortran to Python Interface Generator</H1>
+by <em>Pearu Peterson</em>
+
+<h2>What's new?</h2>
+
+See <a href="NEWS.txt">NEWS.txt</a> for the latest changes in <code>f2py</code>.
+<dl>
+ <dt> July ??, 2002
+ <dd> Implemented prototype calculator, complete tests for scalar F77
+ functions, --help-compiler option. Fixed number of bugs and
+ removed obsolete features.
+ <dt> April 4, 2002
+ <dd> Fixed a nasty bug of copying one-dimensional non-contiguous arrays.
+ (Thanks to Travis O. for pointing this out).
+ <dt> March 26, 2002
+ <dd> Bug fixes, turned off F2PY_REPORT_ATEXIT by default.
+ <dt> March 13, 2002
+ <dd> MAC support, fixed incomplete dependency calculator, minor bug fixes.
+ <dt> March 3, 2002
+ <dd> Fixed memory leak and copying of multi-dimensional complex arrays.
+ <dt> <a href="oldnews.html">Old news</a>.
+</dl>
+
+<h2>Introduction</h2>
+
+Writing Python C/API wrappers for Fortran routines can be a very
+tedious task, especially if a Fortran routine takes more than 20
+arguments but only few of them are relevant for the problems that they
+solve. So, I have developed a tool that generates the C/API modules
+containing wrapper functions of Fortran routines. I call this
+tool as <em>F2PY &shy; Fortran to Python Interface Generator</em>.
+It is completely written in <a href="http://www.python.org">Python</a>
+language and can be called from the command line as <code>f2py</code>.
+<em>F2PY</em> is released under the terms of <a
+href="http://www.fsf.org/copyleft/lesser.html">GNU LGPL</a>.
+
+
+<h2><code>f2py</code>, Second Edition</h2>
+
+The development of <code>f2py</code> started in summer of 1999.
+For now (January, 2000) it has reached to stage of being a
+complete tool: it scans real Fortran code, creates signature file
+that the user can modify, constructs C/API module that can be
+complied and imported to Python, and it creates LaTeX documentation
+for wrapper functions. Below is a bit longer list of
+<code>f2py</code> features:
+<ol>
+ <li> <code>f2py</code> scans real Fortran codes and produces the signature files.
+ The syntax of the signature files is borrowed from the Fortran 90/95
+ language specification with some extensions.
+ <li> <code>f2py</code> generates a GNU Makefile that can be used
+ for building shared modules (see below for a list of supported
+ platforms/compilers). Starting from the third release,
+ <code>f2py</code> generates <code>setup_modulename.py</code> for
+ building extension modules using <code>distutils</code> tools.
+ <li> <code>f2py</code> uses the signature files to produce the wrappers for
+ Fortran 77 routines and their <code>COMMON</code> blocks.
+ <li> For <code>external</code> arguments <code>f2py</code> constructs a very flexible
+ call-back mechanism so that Python functions can be called from
+ Fortran.
+ <li> You can pass in almost arbitrary Python objects to wrapper
+ functions. If needed, <code>f2py</code> takes care of type-casting and
+ non-contiguous arrays.
+ <li> You can modify the signature files so that <code>f2py</code> will generate
+ wrapper functions with desired signatures. <code>depend()</code>
+ attribute is introduced to control the initialization order of the
+ variables. <code>f2py</code> introduces <code>intent(hide)</code>
+ attribute to remove
+ the particular argument from the argument list of the wrapper
+ function and <code>intent(c)</code> that is useful for wrapping C
+libraries. In addition, <code>optional</code> and
+<code>required</code>
+ attributes are introduced and employed.
+ <li> <code>f2py</code> supports almost all standard Fortran 77/90/95 constructs
+ and understands all basic Fortran types, including
+ (multi-dimensional, complex) arrays and character strings with
+ adjustable and assumed sizes/lengths.
+ <li> <code>f2py</code> generates a LaTeX document containing the
+ documentations of the wrapped functions (argument types, dimensions,
+ etc). The user can easily add some human readable text to the
+ documentation by inserting <code>note(&lt;LaTeX text&gt;)</code> attribute to
+ the definition of routine signatures.
+ <li> With <code>f2py</code> one can access also Fortran 90/95
+ module subroutines from Python.
+</ol>
+
+For more information, see the <a href="usersguide.html">User's
+Guide</a> of the tool. Windows users should also take a look at
+<a href="win32_notes.txt">f2py HOWTO for Win32</a> (its latest version
+can be found <a
+href="http://www.scipy.org/Members/eric/f2py_win32">here</a>).
+
+<h3>Requirements</h3>
+<ol>
+ <li> You'll need <a
+ href="http://www.python.org/download/">Python</a>
+ (1.5.2 or later, 2.2 is recommended) to run <code>f2py</code>
+ (because it uses exchanged module <code>re</code>).
+ To build generated extension modules with distutils setup script,
+ you'll need Python 2.x.
+ <li> You'll need <a
+ href="http://sourceforge.net/project/?group_id=1369">Numerical
+ Python</a>
+ (version 13 or later, 20.3 is recommended) to compile
+ C/API modules (because they use function
+ <code>PyArray_FromDimsAndDataAndDescr</code>)
+</ol>
+
+<h3>Download</h3>
+
+<dl>
+ <dt> User's Guide:
+ <dd> <a href="usersguide.html">usersguide.html</a>,
+ <a href="usersguide.pdf">usersguide.pdf</a>,
+ <a href="usersguide.ps.gz">usersguide.ps.gz</a>,
+ <a href="usersguide.dvi">usersguide.dvi</a>.
+ <dt> Snapshots of the fifth public release:
+ <dd> <a href="2.x">2.x</a>/<a href="2.x/F2PY-2-latest.tar.gz">F2PY-2-latest.tar.gz</a>
+ <dt> Snapshots of earlier releases:
+ <dd> <a href="rel-5.x">rel-5.x</a>, <a href="rel-4.x">rel-4.x</a>,
+ <a href="rel-3.x">rel-3.x</a>,
+ <a href="rel-2.x">rel-2.x</a>,<a href="rel-1.x">rel-1.x</a>,
+ <a href="rel-0.x">rel-0.x</a>
+</dl>
+
+<h3>Installation</h3>
+
+Unpack the source file, change to directory <code>f2py-?-???</code>
+and run <code>python setup.py install</code>. That's it!
+
+<h3>Platform/Compiler Related Notes</h3>
+
+<code>f2py</code> has been successfully tested on
+<ul>
+ <li> Intel Linux (MD7.0,RH6.1,RH4.2,Debian woody), Athlon Linux (RH6.1), Alpha Linux (RH5.2,RH6.1) with <a
+href="http://gcc.gnu.org/">gcc</a> (versions egcs-2.91.60,egcs-2.91.66, and 2.95.2).
+ <li> Intel Linux (MD7.0) with <a
+ href="http://www.psrv.com/index.html">Pacific-Sierra
+ Research</a> <a href="http://www.psrv.com/lnxf90.html">Personal
+ Linux VAST/f90 Fortran 90 compiler</a> (version V3.4N5).
+ <li> Intel Linux (RH6.1) with <a href="http://www.absoft.com/">Absoft F77/F90</a> compilers for Linux.
+ <li> IRIX64 with <a href="http://gcc.gnu.org/">gcc</a> (2.95.2) and <a
+href="http://www.sgi.com/developers/devtools/languages/mipspro.html">MIPSpro
+7 Compilers</a> (f77,f90,cc versions 7.30).
+ <li> Alpha Linux (RH5.2,RH6.1) with <a href="http://www.digital.com/fortran/linux/">Compaq Fortran </a> compiler (version V1.0-920).
+ <li> Linux with <a href="http://www.nag.co.uk/">NAGWare</a> Fortran
+ 95 compiler.
+ <li> <a href="http://developer.intel.com/software/products/compilers/f50/linux/">
+ Intel(R) Fortran Compiler for Linux</a>
+ <li> Windows 2000 with <a href="http://www.mingw.org">mingw32</a>.
+</ul>
+<code>f2py</code> will probably run on other UN*X systems as
+well. Additions to the list of platforms/compilers where
+<code>f2py</code> has been successfully used are most welcome.
+<P>
+<em>Note:</em>
+Using Compaq Fortran
+compiler on Alpha Linux is succesful unless when
+wrapping Fortran callback functions returning
+<code>COMPLEX</code>. This applies also for IRIX64.
+<P>
+<em>Note:</em>
+Fortran 90/95 module support is currently tested with Absoft F90, VAST/f90, Intel F90 compilers on Linux (MD7.0,Debian woody).
+
+
+<h3><a name="f2py-users">Mailing list</a></h3>
+
+There is a mailing list <a
+href="http://cens.ioc.ee/pipermail/f2py-users/">f2py-users</a>
+available for the users of the <code>f2py</code>
+program and it is open for discussion, questions, and answers. You can subscribe
+the list <a href="http://cens.ioc.ee/mailman/listinfo/f2py-users">here</a>.
+
+<h3><a href="http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/">CVS Repository</a></h3>
+
+<code>f2py</code> is being developed under <a href="http://www.sourcegear.com/CVS">CVS</a> and those who are
+interested in the really latest version of <code>f2py</code> (possibly
+unstable) can get it from the repository as follows:
+<ol>
+ <li> First you need to login (the password is <code>guest</code>):
+<pre>
+> cvs -d :pserver:anonymous@cens.ioc.ee:/home/cvs login
+</pre>
+ <li> and then do the checkout:
+<pre>
+> cvs -z6 -d :pserver:anonymous@cens.ioc.ee:/home/cvs checkout f2py2e
+</pre>
+ <li> In the directory <code>f2py2e</code> you can get the updates by hitting
+<pre>
+> cvs -z6 update -P -d
+</pre>
+</ol>
+You can browse <code>f2py</code> CVS repository <a href="http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/">here</a>.
+
+<h2>Related sites</h2>
+
+<ol>
+ <li> <a href="http://pfdubois.com/numpy/" target="_top">Numerical Python</a>.
+ <li> <a href="http://pyfortran.sourceforge.net/" target="_top">Pyfort</a> -- The Python-Fortran connection tool.
+ <li> <a href="http://starship.python.net/crew/hinsen/scientific.html" target="_top">Scientific Python</a>.
+ <li> <a href="http://scipy.org/" target="_top">SciPy</a> -- Scientific tools for Python (includes Multipack).
+ <li> <a href="http://www.fortran.com/fortran/" target="_top">The Fortran Company</a>.
+ <li> <a href="http://www.j3-fortran.org/" target="_top">Fortran Standards</a>.
+
+ <li> <a href="http://www.fortran.com/fortran/F77_std/rjcnf.html">American National Standard Programming Language FORTRAN ANSI(R) X3.9-1978</a>
+ <li> <a href="http://www.mathtools.net" target="_top">Mathtools.net</a> -- A technical computing portal for all scientific and engineering needs.
+
+</ol>
+
+<!-- End of user text -->
+<HR>
+<ADDRESS>
+<A href="http://validator.w3.org/"><IMG border=0 align=right src="/icons/vh40.gif" alt="Valid HTML 4.0!" height=31 width=88></A>
+<A href="http://cens.ioc.ee/~pearu/" target="_top">Pearu Peterson</A>
+<A href="mailto:pearu(at)ioc.ee">&lt;pearu(at)ioc.ee&gt;</A><BR>
+<!-- hhmts start -->
+Last modified: Mon Dec 3 19:40:26 EET 2001
+<!-- hhmts end -->
+</ADDRESS>
+<!-- You may want to comment the following line out when the document is final-->
+<!-- Check that the reference is right -->
+<!--A href="http://validator.w3.org/check?uri=http://cens.ioc.ee/projects/f2py2e/index.html;ss"> Submit this page for validation</A-->
+
+<p>
+<center>
+This <a href="http://www.ctv.es/USERS/irmina/pythonring.html">Python
+ring</a> site owned by <a href="mailto:pearu(at)ioc.ee">Pearu Peterson</a>.
+<br>
+[
+ <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;id=12;prev5">Previous 5 Sites</a>
+|
+ <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;id=12;prev">Previous</a>
+|
+ <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;id=12;next">Next</a>
+|
+ <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;id=12;next5">Next 5 Sites</a>
+|
+ <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;random">Random Site</a>
+|
+ <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;list">List Sites</a>
+]
+</center>
+<p>
+
+
+
+</BODY>
+
+
+</HTML>
+
+
+
diff --git a/numpy/f2py/doc/intro.tex b/numpy/f2py/doc/intro.tex
new file mode 100644
index 000000000..d9625b09c
--- /dev/null
+++ b/numpy/f2py/doc/intro.tex
@@ -0,0 +1,158 @@
+
+\section{Introduction}
+\label{sec:intro}
+
+\fpy is a command line tool that generates Python C/API modules for
+interfacing Fortran~77/90/95 codes and Fortran~90/95 modules from
+Python. In general, using \fpy an
+interface is produced in three steps:
+\begin{itemize}
+\item[(i)] \fpy scans Fortran sources and creates the so-called
+ \emph{signature} file; the signature file contains the signatures of
+ Fortran routines; the signatures are given in the free format of the
+ Fortran~90/95 language specification. Latest version of \fpy
+ generates also a make file for building shared module.
+ About currently supported compilers see the \fpy home page
+\item[(ii)] Optionally, the signature files can be modified manually
+ in order to dictate how the Fortran routines should be called or
+ seemed from the Python environment.
+\item[(iii)] \fpy reads the signature files and generates Python C/API
+ modules that can be compiled and imported to Python code. In
+ addition, a LaTeX document is generated that contains the
+ documentation of wrapped functions.
+\end{itemize}
+(Note that if you are satisfied with the default signature that \fpy
+generates in step (i), all three steps can be covered with just
+one call to \fpy --- by not specifying `\texttt{-h}' flag).
+Latest versions of \fpy support so-called \fpy directive that allows
+inserting various information about wrapping directly to Fortran
+source code as comments (\texttt{<comment char>f2py <signature statement>}).
+
+The following diagram illustrates the usage of the tool:
+\begin{verbatim}
+! Fortran file foo.f:
+ subroutine foo(a)
+ integer a
+ a = a + 5
+ end
+\end{verbatim}
+\begin{verbatim}
+! Fortran file bar.f:
+ function bar(a,b)
+ integer a,b,bar
+ bar = a + b
+ end
+\end{verbatim}
+\begin{itemize}
+\item[(i)] \shell{\fpy foo.f bar.f -m foobar -h foobar.pyf}
+\end{itemize}
+\begin{verbatim}
+!%f90
+! Signature file: foobar.pyf
+python module foobar ! in
+ interface ! in :foobar
+ subroutine foo(a) ! in :foobar:foo.f
+ integer intent(inout) :: a
+ end subroutine foo
+ function bar(a,b) ! in :foobar:bar.f
+ integer :: a
+ integer :: b
+ integer :: bar
+ end function bar
+ end interface
+end python module foobar
+\end{verbatim}
+\begin{itemize}
+\item[(ii)] Edit the signature file (here I made \texttt{foo}s
+ argument \texttt{a} to be \texttt{intent(inout)}, see
+ Sec.~\ref{sec:attributes}).
+\item[(iii)] \shell{\fpy foobar.pyf}
+\end{itemize}
+\begin{verbatim}
+/* Python C/API module: foobarmodule.c */
+...
+\end{verbatim}
+\begin{itemize}
+\item[(iv)] \shell{make -f Makefile-foobar}
+%\shell{gcc -shared -I/usr/include/python1.5/ foobarmodule.c\bs\\
+%foo.f bar.f -o foobarmodule.so}
+\end{itemize}
+\begin{verbatim}
+Python shared module: foobarmodule.so
+\end{verbatim}
+\begin{itemize}
+\item[(v)] Usage in Python:
+\end{itemize}
+\vspace*{-4ex}
+\begin{verbatim}
+>>> import foobar
+>>> print foobar.__doc__
+This module 'foobar' is auto-generated with f2py (version:1.174).
+The following functions are available:
+ foo(a)
+ bar = bar(a,b)
+.
+>>> print foobar.bar(2,3)
+5
+>>> from Numeric import *
+>>> a = array(3)
+>>> print a,foobar.foo(a),a
+3 None 8
+\end{verbatim}
+Information about how to call \fpy (steps (i) and (iii)) can be
+obtained by executing\\
+\shell{\fpy}\\
+This will print the usage instructions.
+ Step (iv) is system dependent
+(compiler and the locations of the header files \texttt{Python.h} and
+\texttt{arrayobject.h}), and so you must know how to compile a shared
+module for Python in you system.
+
+The next Section describes the step (ii) in more detail in order to
+explain how you can influence to the process of interface generation
+so that the users can enjoy more writing Python programs using your
+wrappers that call Fortran routines. Step (v) is covered in
+Sec.~\ref{sec:notes}.
+
+
+\subsection{Features}
+\label{sec:features}
+
+\fpy has the following features:
+\begin{enumerate}
+\item \fpy scans real Fortran codes and produces the signature files.
+ The syntax of the signature files is borrowed from the Fortran~90/95
+ language specification with some extensions.
+\item \fpy uses the signature files to produce the wrappers for
+ Fortran~77 routines and their \texttt{COMMON} blocks.
+\item For \texttt{external} arguments \fpy constructs a very flexible
+ call-back mechanism so that Python functions can be called from
+ Fortran.
+\item You can pass in almost arbitrary Python objects to wrapper
+ functions. If needed, \fpy takes care of type-casting and
+ non-contiguous arrays.
+\item You can modify the signature files so that \fpy will generate
+ wrapper functions with desired signatures. \texttt{depend()}
+ attribute is introduced to control the initialization order of the
+ variables. \fpy introduces \texttt{intent(hide)} attribute to remove
+ the particular argument from the argument list of the wrapper
+ function. In addition, \texttt{optional} and \texttt{required}
+ attributes are introduced and employed.
+\item \fpy supports almost all standard Fortran~77/90/95 constructs
+ and understands all basic Fortran types, including
+ (multi-dimensional, complex) arrays and character strings with
+ adjustable and assumed sizes/lengths.
+\item \fpy generates a LaTeX document containing the
+ documentations of the wrapped functions (argument types, dimensions,
+ etc). The user can easily add some human readable text to the
+ documentation by inserting \texttt{note(<LaTeX text>)} attribute to
+ the definition of routine signatures.
+\item \fpy generates a GNU make file that can be used for building
+ shared modules calling Fortran functions.
+\item \fpy supports wrapping Fortran 90/95 module routines.
+\end{enumerate}
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "f2py2e"
+%%% End:
diff --git a/numpy/f2py/doc/multiarray/array_from_pyobj.c b/numpy/f2py/doc/multiarray/array_from_pyobj.c
new file mode 100644
index 000000000..7e0de9a74
--- /dev/null
+++ b/numpy/f2py/doc/multiarray/array_from_pyobj.c
@@ -0,0 +1,323 @@
+/*
+ * File: array_from_pyobj.c
+ *
+ * Description:
+ * ------------
+ * Provides array_from_pyobj function that returns a contigious array
+ * object with the given dimensions and required storage order, either
+ * in row-major (C) or column-major (Fortran) order. The function
+ * array_from_pyobj is very flexible about its Python object argument
+ * that can be any number, list, tuple, or array.
+ *
+ * array_from_pyobj is used in f2py generated Python extension
+ * modules.
+ *
+ * Author: Pearu Peterson <pearu@cens.ioc.ee>
+ * Created: 13-16 January 2002
+ * $Id: array_from_pyobj.c,v 1.1 2002/01/16 18:57:33 pearu Exp $
+ */
+
+
+#define ARR_IS_NULL(arr,mess) \
+if (arr==NULL) { \
+ fprintf(stderr,"array_from_pyobj:" mess); \
+ return NULL; \
+}
+
+#define CHECK_DIMS_DEFINED(rank,dims,mess) \
+if (count_nonpos(rank,dims)) { \
+ fprintf(stderr,"array_from_pyobj:" mess); \
+ return NULL; \
+}
+
+#define HAS_PROPER_ELSIZE(arr,type_num) \
+ ((PyArray_DescrFromType(type_num)->elsize) == (arr)->descr->elsize)
+
+/* static */
+/* void f2py_show_args(const int type_num, */
+/* const int *dims, */
+/* const int rank, */
+/* const int intent) { */
+/* int i; */
+/* fprintf(stderr,"array_from_pyobj:\n\ttype_num=%d\n\trank=%d\n\tintent=%d\n",\ */
+/* type_num,rank,intent); */
+/* for (i=0;i<rank;++i) */
+/* fprintf(stderr,"\tdims[%d]=%d\n",i,dims[i]); */
+/* } */
+
+static
+int count_nonpos(const int rank,
+ const int *dims) {
+ int i=0,r=0;
+ while (i<rank) {
+ if (dims[i] <= 0) ++r;
+ ++i;
+ }
+ return r;
+}
+
+static void lazy_transpose(PyArrayObject* arr);
+static int check_and_fix_dimensions(const PyArrayObject* arr,
+ const int rank,
+ int *dims);
+static
+int array_has_column_major_storage(const PyArrayObject *ap);
+
+static
+PyArrayObject* array_from_pyobj(const int type_num,
+ int *dims,
+ const int rank,
+ const int intent,
+ PyObject *obj) {
+ /* Note about reference counting
+ -----------------------------
+ If the caller returns the array to Python, it must be done with
+ Py_BuildValue("N",arr).
+ Otherwise, if obj!=arr then the caller must call Py_DECREF(arr).
+ */
+
+/* f2py_show_args(type_num,dims,rank,intent); */
+
+ if (intent & F2PY_INTENT_CACHE) {
+ /* Don't expect correct storage order or anything reasonable when
+ returning cache array. */
+ if ((intent & F2PY_INTENT_HIDE)
+ || (obj==Py_None)) {
+ PyArrayObject *arr = NULL;
+ CHECK_DIMS_DEFINED(rank,dims,"optional,intent(cache) must"
+ " have defined dimensions.\n");
+ arr = (PyArrayObject *)PyArray_FromDims(rank,dims,type_num);
+ ARR_IS_NULL(arr,"FromDims failed: optional,intent(cache)\n");
+ if (intent & F2PY_INTENT_OUT)
+ Py_INCREF(arr);
+ return arr;
+ }
+ if (PyArray_Check(obj)
+ && ISCONTIGUOUS((PyArrayObject *)obj)
+ && HAS_PROPER_ELSIZE((PyArrayObject *)obj,type_num)
+ ) {
+ if (check_and_fix_dimensions((PyArrayObject *)obj,rank,dims))
+ return NULL; /*XXX: set exception */
+ if (intent & F2PY_INTENT_OUT)
+ Py_INCREF(obj);
+ return (PyArrayObject *)obj;
+ }
+ ARR_IS_NULL(NULL,"intent(cache) must be contiguous array with a proper elsize.\n");
+ }
+
+ if (intent & F2PY_INTENT_HIDE) {
+ PyArrayObject *arr = NULL;
+ CHECK_DIMS_DEFINED(rank,dims,"intent(hide) must have defined dimensions.\n");
+ arr = (PyArrayObject *)PyArray_FromDims(rank,dims,type_num);
+ ARR_IS_NULL(arr,"FromDims failed: intent(hide)\n");
+ if (intent & F2PY_INTENT_OUT) {
+ if ((!(intent & F2PY_INTENT_C)) && (rank>1)) {
+ lazy_transpose(arr);
+ arr->flags &= ~CONTIGUOUS;
+ }
+ Py_INCREF(arr);
+ }
+ return arr;
+ }
+
+ if (PyArray_Check(obj)) { /* here we have always intent(in) or
+ intent(inout) */
+
+ PyArrayObject *arr = (PyArrayObject *)obj;
+ int is_cont = (intent & F2PY_INTENT_C) ?
+ (ISCONTIGUOUS(arr)) : (array_has_column_major_storage(arr));
+
+ if (check_and_fix_dimensions(arr,rank,dims))
+ return NULL; /*XXX: set exception */
+
+ if ((intent & F2PY_INTENT_COPY)
+ || (! (is_cont
+ && HAS_PROPER_ELSIZE(arr,type_num)
+ && PyArray_CanCastSafely(arr->descr->type_num,type_num)))) {
+ PyArrayObject *tmp_arr = NULL;
+ if (intent & F2PY_INTENT_INOUT) {
+ ARR_IS_NULL(NULL,"intent(inout) array must be contiguous and"
+ " with a proper type and size.\n")
+ }
+ if ((rank>1) && (! (intent & F2PY_INTENT_C)))
+ lazy_transpose(arr);
+ if (PyArray_CanCastSafely(arr->descr->type_num,type_num)) {
+ tmp_arr = (PyArrayObject *)PyArray_CopyFromObject(obj,type_num,0,0);
+ ARR_IS_NULL(arr,"CopyFromObject failed: array.\n");
+ } else {
+ tmp_arr = (PyArrayObject *)PyArray_FromDims(arr->nd,
+ arr->dimensions,
+ type_num);
+ ARR_IS_NULL(tmp_arr,"FromDims failed: array with unsafe cast.\n");
+ if (copy_ND_array(arr,tmp_arr))
+ ARR_IS_NULL(NULL,"copy_ND_array failed: array with unsafe cast.\n");
+ }
+ if ((rank>1) && (! (intent & F2PY_INTENT_C))) {
+ lazy_transpose(arr);
+ lazy_transpose(tmp_arr);
+ tmp_arr->flags &= ~CONTIGUOUS;
+ }
+ arr = tmp_arr;
+ }
+ if (intent & F2PY_INTENT_OUT)
+ Py_INCREF(arr);
+ return arr;
+ }
+
+ if ((obj==Py_None) && (intent & F2PY_OPTIONAL)) {
+ PyArrayObject *arr = NULL;
+ CHECK_DIMS_DEFINED(rank,dims,"optional must have defined dimensions.\n");
+ arr = (PyArrayObject *)PyArray_FromDims(rank,dims,type_num);
+ ARR_IS_NULL(arr,"FromDims failed: optional.\n");
+ if (intent & F2PY_INTENT_OUT) {
+ if ((!(intent & F2PY_INTENT_C)) && (rank>1)) {
+ lazy_transpose(arr);
+ arr->flags &= ~CONTIGUOUS;
+ }
+ Py_INCREF(arr);
+ }
+ return arr;
+ }
+
+ if (intent & F2PY_INTENT_INOUT) {
+ ARR_IS_NULL(NULL,"intent(inout) argument must be an array.\n");
+ }
+
+ {
+ PyArrayObject *arr = (PyArrayObject *) \
+ PyArray_ContiguousFromObject(obj,type_num,0,0);
+ ARR_IS_NULL(arr,"ContiguousFromObject failed: not a sequence.\n");
+ if (check_and_fix_dimensions(arr,rank,dims))
+ return NULL; /*XXX: set exception */
+ if ((rank>1) && (! (intent & F2PY_INTENT_C))) {
+ PyArrayObject *tmp_arr = NULL;
+ lazy_transpose(arr);
+ arr->flags &= ~CONTIGUOUS;
+ tmp_arr = (PyArrayObject *) PyArray_CopyFromObject((PyObject *)arr,type_num,0,0);
+ Py_DECREF(arr);
+ arr = tmp_arr;
+ ARR_IS_NULL(arr,"CopyFromObject(Array) failed: intent(fortran)\n");
+ lazy_transpose(arr);
+ arr->flags &= ~CONTIGUOUS;
+ }
+ if (intent & F2PY_INTENT_OUT)
+ Py_INCREF(arr);
+ return arr;
+ }
+
+}
+
+ /*****************************************/
+ /* Helper functions for array_from_pyobj */
+ /*****************************************/
+
+static
+int array_has_column_major_storage(const PyArrayObject *ap) {
+ /* array_has_column_major_storage(a) is equivalent to
+ transpose(a).iscontiguous() but more efficient.
+
+ This function can be used in order to decide whether to use a
+ Fortran or C version of a wrapped function. This is relevant, for
+ example, in choosing a clapack or flapack function depending on
+ the storage order of array arguments.
+ */
+ int sd;
+ int i;
+ sd = ap->descr->elsize;
+ for (i=0;i<ap->nd;++i) {
+ if (ap->dimensions[i] == 0) return 1;
+ if (ap->strides[i] != sd) return 0;
+ sd *= ap->dimensions[i];
+ }
+ return 1;
+}
+
+static
+void lazy_transpose(PyArrayObject* arr) {
+ /*
+ Changes the order of array strides and dimensions. This
+ corresponds to the lazy transpose of a Numeric array in-situ.
+ Note that this function is assumed to be used even times for a
+ given array. Otherwise, the caller should set flags &= ~CONTIGUOUS.
+ */
+ int rank,i,s,j;
+ rank = arr->nd;
+ if (rank < 2) return;
+
+ for(i=0,j=rank-1;i<rank/2;++i,--j) {
+ s = arr->strides[i];
+ arr->strides[i] = arr->strides[j];
+ arr->strides[j] = s;
+ s = arr->dimensions[i];
+ arr->dimensions[i] = arr->dimensions[j];
+ arr->dimensions[j] = s;
+ }
+}
+
+static
+int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,int *dims) {
+ /*
+ This function fills in blanks (that are -1's) in dims list using
+ the dimensions from arr. It also checks that non-blank dims will
+ match with the corresponding values in arr dimensions.
+ */
+ const int arr_size = (arr->nd)?PyArray_Size((PyObject *)arr):1;
+
+ if (rank > arr->nd) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */
+ int new_size = 1;
+ int free_axe = -1;
+ int i;
+ /* Fill dims where -1 or 0; check dimensions; calc new_size; */
+ for(i=0;i<arr->nd;++i) {
+ if (dims[i] >= 0) {
+ if (dims[i]!=arr->dimensions[i]) {
+ fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n",
+ i,dims[i],arr->dimensions[i]);
+ return 1;
+ }
+ if (!dims[i]) dims[i] = 1;
+ } else {
+ dims[i] = arr->dimensions[i] ? arr->dimensions[i] : 1;
+ }
+ new_size *= dims[i];
+ }
+ for(i=arr->nd;i<rank;++i)
+ if (dims[i]>1) {
+ fprintf(stderr,"%d-th dimension must be %d but got 0 (not defined).\n",
+ i,dims[i]);
+ return 1;
+ } else if (free_axe<0)
+ free_axe = i;
+ else
+ dims[i] = 1;
+ if (free_axe>=0) {
+ dims[free_axe] = arr_size/new_size;
+ new_size *= dims[free_axe];
+ }
+ if (new_size != arr_size) {
+ fprintf(stderr,"confused: new_size=%d, arr_size=%d (maybe too many free"
+ " indices)\n",new_size,arr_size);
+ return 1;
+ }
+ } else {
+ int i;
+ for (i=rank;i<arr->nd;++i)
+ if (arr->dimensions[i]>1) {
+ fprintf(stderr,"too many axes: %d, expected rank=%d\n",arr->nd,rank);
+ return 1;
+ }
+ for (i=0;i<rank;++i)
+ if (dims[i]>=0) {
+ if (arr->dimensions[i]!=dims[i]) {
+ fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n",
+ i,dims[i],arr->dimensions[i]);
+ return 1;
+ }
+ if (!dims[i]) dims[i] = 1;
+ } else
+ dims[i] = arr->dimensions[i];
+ }
+ return 0;
+}
+
+/* End of file: array_from_pyobj.c */
diff --git a/numpy/f2py/doc/multiarray/bar.c b/numpy/f2py/doc/multiarray/bar.c
new file mode 100644
index 000000000..350636ea6
--- /dev/null
+++ b/numpy/f2py/doc/multiarray/bar.c
@@ -0,0 +1,15 @@
+
+#include <stdio.h>
+
+void bar(int *a,int m,int n) {
+ int i,j;
+ printf("C:");
+ printf("m=%d, n=%d\n",m,n);
+ for (i=0;i<m;++i) {
+ printf("Row %d:\n",i+1);
+ for (j=0;j<n;++j)
+ printf("a(i=%d,j=%d)=%d\n",i,j,a[n*i+j]);
+ }
+ if (m*n)
+ a[0] = 7777;
+}
diff --git a/numpy/f2py/doc/multiarray/foo.f b/numpy/f2py/doc/multiarray/foo.f
new file mode 100644
index 000000000..f8c39c4d1
--- /dev/null
+++ b/numpy/f2py/doc/multiarray/foo.f
@@ -0,0 +1,13 @@
+ subroutine foo(a,m,n)
+ integer a(m,n), m,n,i,j
+ print*, "F77:"
+ print*, "m=",m,", n=",n
+ do 100,i=1,m
+ print*, "Row ",i,":"
+ do 50,j=1,n
+ print*, "a(i=",i,",j=",j,") = ",a(i,j)
+ 50 continue
+ 100 continue
+ if (m*n.gt.0) a(1,1) = 77777
+ end
+
diff --git a/numpy/f2py/doc/multiarray/fortran_array_from_pyobj.txt b/numpy/f2py/doc/multiarray/fortran_array_from_pyobj.txt
new file mode 100644
index 000000000..c7b945c84
--- /dev/null
+++ b/numpy/f2py/doc/multiarray/fortran_array_from_pyobj.txt
@@ -0,0 +1,284 @@
+
+ _____________________________________________________________
+ / Proposed internal structure for f2py generated extension \
+ < modules regarding the issues with different storage-orders >
+ \ of multi-dimensional matrices in Fortran and C. /
+ =============================================================
+
+Author: Pearu Peterson
+Date: 14 January, 2001
+
+Definitions:
+============
+
+In the following I will use the following definitions:
+
+1) A matrix is a mathematical object that represents a collection of
+ objects (elements), usually visualized in a table form, and one can
+ define a set of various (algebraic,etc) operations for matrices.
+ One can think of a matrix as a defintion of a certain mapping:
+ (i) |--> A(i)
+ where i belongs to the set of indices (an index itself can be a
+ sequence of objects, for example, a sequence of integers) and A(i)
+ is an element from a specified set, for example, a set of fruits.
+ Symbol A then denotes a matrix of fruits.
+
+2) An array is a storage object that represents a collection of
+ objects stored in a certain systematic way, for example, as an
+ ordered sequence in computer memory.
+
+In order to manipulate matrices using computers, one must store matrix
+elements in computer memory. In the following, I will assume that the
+elements of a matrix is stored as an array. There is no unique way in
+which order one should save matrix elements in the array. However, in
+C and Fortran programming languages, two, unfortunately different,
+conventions are used.
+
+Aim:
+====
+
+The purpose of this writing is to work out an interface for Python
+language so that C and Fortran routines can be called without
+bothering about how multi-dimensional matrices are stored in memory.
+For example, accessing a matrix element A[i,j] in Python will be
+equivalent to accessing the same matrix in C, using A[i][j], or in
+Fortran, using A(i,j).
+
+External conditions:
+====================
+
+In C programming language, it is custom to think that matrices are
+stored in the so-called row-major order, that is, a matrix is stored
+row by row, each row is as a contiguous array in computer memory.
+
+In Fortran programming language, matrices are stored in the
+column-major order: each column is a contiguous array in computer
+memory.
+
+In Python programming language, matrices can be stored using Python
+Numeric array() function that uses internally C approach, that is,
+elements of matrices are stored in row-major order. For example,
+A = array([[1,2,3],[4,5,6]]) represents a 2-by-3 matrix
+
+ / 1 2 3 \
+ | |
+ \ 4 5 6 /
+
+and its elements are stored in computer memory as the following array:
+
+ 1 2 3 4 5 6
+
+The same matrix, if used in Fortran, would be stored in computer
+memory as the following array:
+
+ 1 4 2 5 3 6
+
+Problem and solution:
+=====================
+
+A problem arises if one wants to use the same matrix both in C and in
+Fortran functions. Then the difference in storage order of a matrix
+elements must be taken into account. This technical detail can be very
+confusing even for an experienced programmer. This is because when
+passing a matrix to a Fortran subroutine, you must (mentally or
+programmically) transpose the matrix and when the subroutine returns,
+you must transpose it back.
+
+As will be discussed below, there is a way to overcome these
+difficulties in Python by creating an interface between Python and
+Fortran code layers that takes care of this transition internally. So
+that if you will read the manual pages of the Fortran codes, then you
+need not to think about how matrices are actually stored, the storage
+order will be the same, seemingly.
+
+Python / C / Fortran interface:
+===============================
+
+The interface between Python and Fortran codes will use the following
+Python Numeric feature: transposing a Numeric array does not involve
+copying of its data but just permuting the dimensions and strides of
+the array (the so-called lazy transpose).
+
+However, when passing a Numeric array data pointer to Fortran or C
+function, the data must be contiguous in memory. If it is not, then
+data is rearranged inplace. I don't think that it can be avoided.
+This is certainly a penalty hit to performance. However, one can
+easily avoid it by creating a Numeric array with the right storage
+order, so that after transposing, the array data will be contiguous in
+memory and the data pointer can safely passed on to the Fortran
+subroutine. This lazy-transpose operation will be done within the
+interface and users need not to bother about this detail anymore (that
+is, after they initialize Numeric array with matrix elements using the
+proper order. Of course, the proper order depends on the target
+function: C or Fortran). The interface should be smart enough to
+minimize the need of real-transpose operations and the need to
+additional memory storage as well.
+
+Statement of the problem:
+=========================
+
+Consider a M-by-N matrix A of integers, where M and N are the number A
+rows and columns, respectively.
+
+In Fortran language, the storing array of this matrix can be defined
+as follows:
+
+ integer A(M,N)
+
+in C:
+
+ int A[M][N];
+
+and in Python:
+
+ A = Numeric.zeros((M,N),'i')
+
+Consider also the corresponding Fortran and C functions that
+that use matrix arguments:
+
+Fortran:
+ subroutine FUN(A,M,N)
+ integer A(M,N)
+ ...
+ end
+C:
+ void cun(int *a,int m,int n) {
+ ...
+ }
+
+and the corresponding Python interface signatures:
+
+ def py_fun(a):
+ ...
+ def py_cun(a):
+ ...
+
+Main goal:
+==========
+
+Our goal is to generate Python C/API functions py_fun and py_cun such
+that their usage in Python would be identical. The cruical part of
+their implementation are in functions that take a PyObject and
+return a PyArrayObject such that it is contiguous and its data pointer
+is suitable for passing on to the arguments of C or Fortran functions.
+The prototypes of these functions are:
+
+PyArrayObject* fortran_array_from_pyobj(
+ int typecode,
+ int *dims,
+ int rank,
+ int intent,
+ PyObject *obj);
+
+and
+
+PyArrayObject* c_array_from_pyobj(
+ int typecode,
+ int *dims,
+ int rank,
+ int intent,
+ PyObject *obj);
+
+for wrapping Fortran and C functions, respectively.
+
+Pseudo-code for fortran_array_from_pyobj:
+=========================================
+
+if type(obj) is ArrayType:
+ #raise not check(len(ravel(obj)) >= dims[0]*dims[1]*...*dims[rank-1])
+ if obj.typecode is typecode:
+ if is_contiguous(obj):
+ transpose_data_inplace(obj) # real-transpose
+ set_transpose_strides(obj) # lazy-transpose
+ Py_INCREF(obj);
+ return obj
+ set_transpose_strides(obj)
+ if is_contiguous(obj):
+ set_transpose_strides(obj)
+ Py_INCREF(obj);
+ return obj
+ else:
+ tmp_obj = PyArray_ContiguousFromObject(obj,typecode,0,0)
+ swap_datapointer_and_typeinfo(obj,tmp_obj)
+ Py_DECREF(tmp_obj);
+ set_transpose_strides(obj)
+ Py_INCREF(obj);
+ return obj
+ else:
+ tmp_obj = PyArray_FromDims(rank,dims,typecode)
+ set_transpose_strides(tmp_obj)
+ if intent in [in,inout]:
+ copy_ND_array(obj,tmp_obj)
+ swap_datapointer_and_typeinfo(obj,tmp_obj)
+ Py_DECREF(tmp_obj);
+ Py_INCREF(obj);
+ return obj
+elif obj is None: # happens when only intent is 'hide'
+ tmp_obj = PyArray_FromDims(rank,dims,typecode)
+ if intent is out:
+ set_transpose_strides(tmp_obj)
+ # otherwise tmp_obj->data is used as a work array
+ Py_INCREF(tmp_obj)
+ return tmp_obj
+else:
+ tmp_obj = PyArray_ContiguousFromObject(obj,typecode,0,0)
+ #raise not check(len(ravel(obj)) >= dims[0]*dims[1]*...*dims[rank-1])
+ set_transpose_strides(tmp_obj)
+ transpose_data_inplace(tmp_obj)
+ Py_INCREF(tmp_obj)
+ return tmp_obj
+
+Notes:
+ 1) CPU expensive tasks are in transpose_data_inplace and
+ copy_ND_array, PyArray_ContiguousFromObject.
+ 2) Memory expensive tasks are in PyArray_FromDims,
+ PyArray_ContiguousFromObject
+ 3) Side-effects are expected when set_transpose_strides and
+ transpose_data_inplace are used. For example:
+ >>> a = Numeric([[1,2,3],[4,5,6]],'d')
+ >>> a.is_contiguous()
+ 1
+ >>> py_fun(a)
+ >>> a.typecode()
+ 'i'
+ >>> a.is_contiguous()
+ 0
+ >>> transpose(a).is_contiguous()
+ 1
+
+Pseudo-code for c_array_from_pyobj:
+===================================
+
+if type(obj) is ArrayType:
+ #raise not check(len(ravel(obj)) >= dims[0]*dims[1]*...*dims[rank-1])
+ if obj.typecode is typecode:
+ if is_contiguous(obj):
+ Py_INCREF(obj);
+ return obj
+ else:
+ tmp_obj = PyArray_ContiguousFromObject(obj,typecode,0,0)
+ swap_datapointer_and_typeinfo(obj,tmp_obj)
+ Py_DECREF(tmp_obj);
+ Py_INCREF(obj);
+ return obj
+ else:
+ tmp_obj = PyArray_FromDims(rank,dims,typecode)
+ if intent in [in,inout]:
+ copy_ND_array(obj,tmp_obj)
+ swap_datapointer_and_typeinfo(obj,tmp_obj)
+ Py_DECREF(tmp_obj);
+ Py_INCREF(obj);
+ return obj
+elif obj is None: # happens when only intent is 'hide'
+ tmp_obj = PyArray_FromDims(rank,dims,typecode)
+ Py_INCREF(tmp_obj)
+ return tmp_obj
+else:
+ tmp_obj = PyArray_ContiguousFromObject(obj,typecode,0,0)
+ #raise not check(len(ravel(obj)) >= dims[0]*dims[1]*...*dims[rank-1])
+ Py_INCREF(tmp_obj)
+ return tmp_obj
+
+
+14 January, 2002
+Pearu Peterson <pearu@cens.ioc.ee> \ No newline at end of file
diff --git a/numpy/f2py/doc/multiarray/fun.pyf b/numpy/f2py/doc/multiarray/fun.pyf
new file mode 100644
index 000000000..ed5d1923f
--- /dev/null
+++ b/numpy/f2py/doc/multiarray/fun.pyf
@@ -0,0 +1,89 @@
+!%f90 -*- f90 -*-
+
+! Example:
+! Using f2py for wrapping multi-dimensional Fortran and C arrays
+! [NEW APPROACH, use it with f2py higher than 2.8.x]
+! $Id: fun.pyf,v 1.3 2002/01/18 10:06:50 pearu Exp $
+
+! Usage (with gcc compiler):
+! f2py -c fun.pyf foo.f bar.c
+
+python module fun ! in
+ interface ! in :fun
+
+! >>> from Numeric import *
+! >>> import fun
+! >>> a=array([[1,2,3],[4,5,6]])
+
+ subroutine foo(a,m,n) ! in :fun:foo.f
+ integer dimension(m,n) :: a
+ intent(in,out,copy) :: a
+ integer optional,check(shape(a,0)==m),depend(a) :: m=shape(a,0)
+ integer optional,check(shape(a,1)==n),depend(a) :: n=shape(a,1)
+ end subroutine foo
+
+! >>> print fun.foo.__doc__
+! foo - Function signature:
+! a = foo(a,[m,n])
+! Required arguments:
+! a : input rank-2 array('i') with bounds (m,n)
+! Optional arguments:
+! m := shape(a,0) input int
+! n := shape(a,1) input int
+! Return objects:
+! a : rank-2 array('i') with bounds (m,n)
+
+! >>> print fun.foo(a)
+! F77:
+! m= 2, n= 3
+! Row 1:
+! a(i= 1,j= 1) = 1
+! a(i= 1,j= 2) = 2
+! a(i= 1,j= 3) = 3
+! Row 2:
+! a(i= 2,j= 1) = 4
+! a(i= 2,j= 2) = 5
+! a(i= 2,j= 3) = 6
+! [[77777 2 3]
+! [ 4 5 6]]
+
+
+ subroutine bar(a,m,n)
+ intent(c)
+ intent(c) bar
+ integer dimension(m,n) :: a
+ intent(in,out) :: a
+ integer optional,check(shape(a,0)==m),depend(a) :: m=shape(a,0)
+ integer optional,check(shape(a,1)==n),depend(a) :: n=shape(a,1)
+ intent(in) m,n
+ end subroutine bar
+
+! >>> print fun.bar.__doc__
+! bar - Function signature:
+! a = bar(a,[m,n])
+! Required arguments:
+! a : input rank-2 array('i') with bounds (m,n)
+! Optional arguments:
+! m := shape(a,0) input int
+! n := shape(a,1) input int
+! Return objects:
+! a : rank-2 array('i') with bounds (m,n)
+
+! >>> print fun.bar(a)
+! C:m=2, n=3
+! Row 1:
+! a(i=0,j=0)=1
+! a(i=0,j=1)=2
+! a(i=0,j=2)=3
+! Row 2:
+! a(i=1,j=0)=4
+! a(i=1,j=1)=5
+! a(i=1,j=2)=6
+! [[7777 2 3]
+! [ 4 5 6]]
+
+ end interface
+end python module fun
+
+! This file was auto-generated with f2py (version:2.9.166).
+! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/numpy/f2py/doc/multiarray/run.pyf b/numpy/f2py/doc/multiarray/run.pyf
new file mode 100644
index 000000000..bb12a439b
--- /dev/null
+++ b/numpy/f2py/doc/multiarray/run.pyf
@@ -0,0 +1,91 @@
+!%f90 -*- f90 -*-
+
+! Example:
+! Using f2py for wrapping multi-dimensional Fortran and C arrays
+! [OLD APPROACH, do not use it with f2py higher than 2.8.x]
+! $Id: run.pyf,v 1.1 2002/01/14 15:49:46 pearu Exp $
+
+! Usage (with gcc compiler):
+! f2py -c run.pyf foo.f bar.c -DNO_APPEND_FORTRAN
+
+python module run ! in
+ interface ! in :run
+
+! >>> from Numeric import *
+! >>> import run
+! >>> a=array([[1,2,3],[4,5,6]],'i')
+
+ subroutine foo(a,m,n)
+ fortranname foo_
+ integer dimension(m,n) :: a
+ integer optional,check(shape(a,1)==m),depend(a) :: m=shape(a,1)
+ integer optional,check(shape(a,0)==n),depend(a) :: n=shape(a,0)
+ end subroutine foo
+
+! >>> print run.foo.__doc__
+! foo - Function signature:
+! foo(a,[m,n])
+! Required arguments:
+! a : input rank-2 array('i') with bounds (n,m)
+! Optional arguments:
+! m := shape(a,1) input int
+! n := shape(a,0) input int
+
+! >>> run.foo(a)
+! F77:
+! m= 3, n= 2
+! Row 1:
+! a(i= 1,j= 1) = 1
+! a(i= 1,j= 2) = 4
+! Row 2:
+! a(i= 2,j= 1) = 2
+! a(i= 2,j= 2) = 5
+! Row 3:
+! a(i= 3,j= 1) = 3
+! a(i= 3,j= 2) = 6
+
+! >>> run.foo(transpose(a))
+! F77:
+! m= 2, n= 3
+! Row 1:
+! a(i= 1,j= 1) = 1
+! a(i= 1,j= 2) = 2
+! a(i= 1,j= 3) = 3
+! Row 2:
+! a(i= 2,j= 1) = 4
+! a(i= 2,j= 2) = 5
+! a(i= 2,j= 3) = 6
+
+ subroutine bar(a,m,n)
+ intent(c)
+ integer dimension(m,n) :: a
+ integer optional,check(shape(a,0)==m),depend(a) :: m=shape(a,0)
+ integer optional,check(shape(a,1)==n),depend(a) :: n=shape(a,1)
+ end subroutine bar
+
+! >>> print run.bar.__doc__
+! bar - Function signature:
+! bar(a,[m,n])
+! Required arguments:
+! a : rank-2 array('i') with bounds (m,n)
+! Optional arguments:
+! m := shape(a,0) int
+! n := shape(a,1) int
+
+! >>> run.bar(a)
+! C:m=2, n=3
+! Row 1:
+! a(i=0,j=0)=1
+! a(i=0,j=1)=2
+! a(i=0,j=2)=3
+! Row 2:
+! a(i=1,j=0)=4
+! a(i=1,j=1)=5
+! a(i=1,j=2)=6
+
+
+ end interface
+end python module run
+
+! This file was auto-generated with f2py (version:2.8.172).
+! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/numpy/f2py/doc/multiarray/transpose.txt b/numpy/f2py/doc/multiarray/transpose.txt
new file mode 100644
index 000000000..a8d41e6df
--- /dev/null
+++ b/numpy/f2py/doc/multiarray/transpose.txt
@@ -0,0 +1,1127 @@
+From: Phil Garner (garner@signal.dra.hmg.gb)
+ Subject: In place matrix transpose
+ Newsgroups: sci.math.num-analysis
+ Date: 1993-08-05 06:35:06 PST
+
+
+Someone was talking about matrix transposes earlier on. It's a
+curious subject. I found that an in-place transpose is about 12 times
+slower than the trivial copying method.
+
+Here's somthing I nicked from netlib and translated into C to do the
+in-place one for those that are interested: (matrix must be in one
+block)
+
+
+typedef float scalar; /* float -> double for double precision */
+
+/*
+ * In Place Matrix Transpose
+ * From: Algorithm 380 collected algorithms from ACM.
+ * Converted to C by Phil Garner
+ *
+ * Algorithm appeared in comm. ACM, vol. 13, no. 05,
+ * p. 324.
+ */
+int trans(scalar *a, unsigned m, unsigned n, int *move, int iwrk)
+{
+ scalar b;
+ int i, j, k, i1, i2, ia, ib, ncount, kmi, Max, mn;
+
+ /*
+ * a is a one-dimensional array of length mn=m*n, which
+ * contains the m by n matrix to be transposed.
+ * move is a one-dimensional array of length iwrk
+ * used to store information to speed up the process. the
+ * value iwrk=(m+n)/2 is recommended. Return val indicates the
+ * success or failure of the routine.
+ * normal return = 0
+ * errors
+ * -2, iwrk negative or zero.
+ * ret > 0, (should never occur). in this case
+ * we set ret equal to the final value of i when the search
+ * is completed but some loops have not been moved.
+ * check arguments and initialise
+ */
+
+ /* Function Body */
+ if (n < 2 || m < 2)
+ return 0;
+ if (iwrk < 1)
+ return -2;
+
+ /* If matrix is square, exchange elements a(i,j) and a(j,i). */
+ if (n == m)
+ {
+ for (i = 0; i < m - 1; ++i)
+ for (j = i + 1; j < m; ++j)
+ {
+ i1 = i + j * m;
+ i2 = j + i * m;
+ b = a[i1];
+ a[i1] = a[i2];
+ a[i2] = b;
+ } return 0;
+ }
+
+ /* Non square matrix */
+ ncount = 2;
+ for (i = 0; i < iwrk; ++i)
+ move[i] = 0;
+
+ if (n > 2)
+ /* Count number,ncount, of single points. */
+ for (ia = 1; ia < n - 1; ++ia)
+ {
+ ib = ia * (m - 1) / (n - 1);
+ if (ia * (m - 1) != ib * (n - 1))
+ continue;
+ ++ncount;
+ i = ia * m + ib;
+ if (i > iwrk)
+ continue;
+ move[i] = 1;
+ }
+
+ /* Set initial values for search. */
+ mn = m * n;
+ k = mn - 1;
+ kmi = k - 1;
+ Max = mn;
+ i = 1;
+
+ while (1)
+ {
+ /* Rearrange elements of a loop. */
+ /* At least one loop must be re-arranged. */
+ i1 = i;
+ while (1)
+ {
+ b = a[i1];
+ while (1)
+ {
+ i2 = n * i1 - k * (i1 / m);
+ if (i1 <= iwrk)
+ move[i1 - 1] = 2;
+ ++ncount;
+ if (i2 == i || i2 >= kmi)
+ {
+ if (Max == kmi || i2 == i)
+ break;
+ Max = kmi;
+ }
+ a[i1] = a[i2];
+ i1 = i2;
+ }
+
+ /* Test for symmetric pair of loops. */
+ a[i1] = b;
+ if (ncount >= mn)
+ return 0;
+ if (i2 == Max || Max == kmi)
+ break;
+ Max = kmi;
+ i1 = Max;
+ }
+
+ /* Search for loops to be rearranged. */
+ while (1)
+ {
+ Max = k - i;
+ ++i;
+ kmi = k - i;
+ if (i > Max)
+ return i;
+ if (i <= iwrk)
+ {
+ if (move[i-1] < 1)
+ break;
+ continue;
+ }
+ if (i == n * i - k * (i / m))
+ continue;
+ i1 = i;
+ while (1)
+ {
+ i2 = n * i1 - k * (i1 / m);
+ if (i2 <= i || i2 >= Max)
+ break;
+ i1 = i2;
+ }
+ if (i2 == i)
+ break;
+ }
+ } /* End never reached */
+}
+
+--
+ ,----------------------------- ______
+ ____ | Phil Garner. \___| |/ \ \ ____
+/__/ `--, _L__L\_ | garner@signal.dra.hmg.gb | _|`---' \_/__/ `--,
+`-0---0-' `-0--0-' `--OO-------------------O-----' `---0---' `-0---0-'
+
+ From: Murray Dow (mld900@anusf.anu.edu.au)
+ Subject: Re: In place matrix transpose
+ Newsgroups: sci.math.num-analysis
+ Date: 1993-08-09 19:45:57 PST
+
+
+In article <23qmp3INN3gl@mentor.dra.hmg.gb>, garner@signal.dra.hmg.gb (Phil Garner) writes:
+|> Someone was talking about matrix transposes earlier on. It's a
+|> curious subject. I found that an in-place transpose is about 12 times
+|> slower than the trivial copying method.
+|>
+
+Algorithm 380 from CACM is sloweer than ALG 467. Here are my times
+from a VP2200 vector computer. Note that the CACM algorithms are scalar.
+Times are in seconds, for a 900*904 matrix:
+
+380 NAG 467 disc copy
+1.03 1.14 .391 .177
+
+Compare two vector algortihms, one I wrote and the second a matrix
+copy:
+
+My Alg Matrix copy
+.0095 .0097
+
+Conclusions: dont use Alg 380 from Netlib. If you have the available memory,
+do a matrix copy. If you don't have the memory, I will send you my algorithm
+when I have published it.
+--
+Murray Dow GPO Box 4 Canberra ACT 2601 Australia
+Supercomputer Facility Phone: +61 6 2495028
+Australian National University Fax: +61 6 2473425
+mld900@anusf.anu.edu.au
+
+=============================================================================
+
+From: Mark Smotherman (mark@hubcap.clemson.edu)
+ Subject: Matrix transpose benchmark [was Re: MIPS R8000 == TFP?]
+ Newsgroups: comp.arch, comp.benchmarks, comp.sys.super
+ Date: 1994-07-01 06:35:51 PST
+
+
+mccalpin@perelandra.cms.udel.edu (John D. McCalpin) writes:
+
+>
+>Of course, these results are all for the naive algorithm. I would be
+>interested to see what an efficient blocked algorithm looks like.
+>Anyone care to offer one? There is clearly a lot of performance
+>to be gained by the effort....
+
+Here is a matrix transpose benchmark generator. Enter something like
+
+ 10d10eij;
+
+and you get a benchmark program with tiles of size 10 for the i and j
+inner loops. Please email code improvements and flames.
+
+Enjoy!
+
+
+/*---------------------------------------------------------------------------
+
+ Matrix Transpose Generator
+
+ Copyright 1993, Dept. of Computer Science, Clemson University
+
+ Permission to use, copy, modify, and distribute this software and
+ its documentation for any purpose and without fee is hereby granted,
+ provided that the above copyright notice appears in all copies.
+
+ Clemson University and its Dept. of Computer Science make no
+ representations about the suitability of this software for any
+ purpose. It is provided "as is" without express or implied warranty.
+
+ Original author: Mark Smotherman
+
+ -------------------------------------------------------------------------*/
+
+
+/* tpgen.c version 1.0
+ *
+ * generate a matrix transpose loop nest, with tiling and unrolling
+ * (timing code using getrusage is included in the generated program)
+ *
+ * mark smotherman
+ * mark@cs.clemson.edu
+ * clemson university
+ * 9 july 1993
+ *
+ * a loop nest can be described by the order of its loop indices, so
+ * this program takes as input a simple language describing these indices:
+ * <number>d ==> generate tiling loop for index i with step size of <number>
+ * <number>e ==> generate tiling loop for index j with step size of <number>
+ * <number>i ==> generate loop for index i with unrolling factor of <number>
+ * <number>j ==> generate loop for index j with unrolling factor of <number>
+ * ; ==> input terminator (required)
+ * rules are:
+ * i,j tokens must appear
+ * if d appears, it must appear before i
+ * if e appears, it must appear before j
+ * ; must appear
+ * matrix size is controlled by #define N in this program.
+ *
+ * this code was adapted from mmgen.c v1.2 and extended to generate pre-
+ * condition loops for unrolling factors that do not evenly divide the
+ * matrix size (or the tiling step size for loop nests with a tiling loop).
+ * note that this program only provides a preconditioning loop for the
+ * innermost loop. unrolling factors for non-innermost loops that do not
+ * evenly divide the matrix size (or step size) are not supported.
+ *
+ * my interest in this program generator is to hook it to a sentence
+ * generator and a minimum execution time finder, that is
+ * while((sentence=sgen())!=NULL){
+ * genprogram=tpgen(sentence);
+ * system("cc -O4 genprogram.c");
+ * system("a.out >> tpresults");
+ * }
+ * findmintime(tpresults);
+ * this will find the optimum algorithm for the host system via an
+ * exhaustive search.
+ *
+ * please report bugs and suggestions for enhancements to me.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#define N 500
+
+#define ALLOC1 temp1=(struct line *)malloc(sizeof(struct line));\
+temp1->indentcnt=indentcnt;
+
+#define LINK1 temp1->next=insertbefore;\
+insertafter->next=temp1;\
+insertafter=temp1;
+
+#define INSERT1 temp1->next=start;\
+start=temp1;
+
+#define ALLOC2 temp1=(struct line *)malloc(sizeof(struct line));\
+temp2=(struct line *)malloc(sizeof(struct line));\
+temp1->indentcnt=indentcnt;\
+temp2->indentcnt=indentcnt++;
+
+#define LINK2 temp1->next=temp2;\
+temp2->next=insertbefore;\
+insertafter->next=temp1;\
+insertafter=temp1;\
+insertbefore=temp2;
+
+struct line{ int indentcnt; char line[256]; struct line *next; };
+
+int indentcnt;
+int iflag,jflag;
+int ijflag,jiflag;
+int dflag,eflag;
+int counter;
+int iistep,jjstep;
+int iunroll,junroll;
+int precond;
+
+char c;
+int i,ttp,nt;
+char *p0;
+char tptype[80];
+char number[10];
+
+struct line *start,*head,*insertafter,*insertbefore,*temp1,*temp2;
+
+void processloop();
+void processstmt();
+
+main(){
+
+ indentcnt=0;
+ iflag=jflag=0;
+ ijflag=jiflag=0;
+ dflag=eflag=0;
+ iunroll=junroll=0;
+ counter=1;
+ precond=0;
+ ttp=0;
+
+ start=NULL;
+ ALLOC2
+ sprintf(temp1->line,"/* begin */\nt_start=second();\n");
+ sprintf(temp2->line,"/* end */\nt_end = second();\n");
+ head=temp1; temp1->next=temp2; temp2->next=NULL;
+ insertafter=temp1; insertbefore=temp2;
+
+ while((c=getchar())!=';'){
+ tptype[ttp++]=c;
+ if(isdigit(c)){
+ nt=0;
+ while(isdigit(c)){
+ number[nt++]=c;
+ c=getchar();
+ if(c==';'){ fprintf(stderr,"unexpected ;!\n"); exit(1); }
+ tptype[ttp++]=c;
+ }
+ number[nt]='\0';
+ sscanf(number,"%d",&counter);
+ }
+ switch(c){
+ case 'd':
+ if(iflag){ fprintf(stderr,"d cannot appear after i!\n"); exit(1); }
+ dflag++;
+ ALLOC1
+ sprintf(temp1->line,"#define IISTEP %d\n",counter);
+ INSERT1
+ iistep=counter;
+ counter=1;
+ ALLOC2
+ sprintf(temp1->line,"for(ii=0;ii<%d;ii+=IISTEP){\n",N);
+ sprintf(temp2->line,"}\n",N);
+ LINK2
+ ALLOC1
+ sprintf(temp1->line,"it=min(ii+IISTEP,%d);\n",N);
+ LINK1
+ break;
+ case 'e':
+ if(jflag){ fprintf(stderr,"e cannot appear after j!\n"); exit(1); }
+ eflag++;
+ ALLOC1
+ sprintf(temp1->line,"#define JJSTEP %d\n",counter);
+ INSERT1
+ jjstep=counter;
+ counter=1;
+ ALLOC2
+ sprintf(temp1->line,"for(jj=0;jj<%d;jj+=JJSTEP){\n",N);
+ sprintf(temp2->line,"}\n",N);
+ LINK2
+ ALLOC1
+ sprintf(temp1->line,"jt=min(jj+JJSTEP,%d);\n",N);
+ LINK1
+ break;
+ case 'i':
+ iunroll=counter;
+ counter=1;
+ iflag++; if(jflag) jiflag++;
+ if(dflag) precond=iistep%iunroll; else precond=N%iunroll;
+ if(precond&&(jiflag==0)){
+ fprintf(stderr,"unrolling factor for outer loop i\n");
+ fprintf(stderr," does not evenly divide matrix/step size!\n");
+ exit(1);
+ }
+ if(dflag&&(iunroll>1)&&(N%iistep)){
+ fprintf(stderr,"with unrolling of i, step size for tiled loop ii\n");
+ fprintf(stderr," does not evenly divide matrix size!\n");
+ exit(1);
+ }
+ processloop('i',dflag,iunroll,precond,junroll);
+ break;
+ case 'j':
+ junroll=counter;
+ counter=1;
+ jflag++; if(iflag) ijflag++;
+ if(eflag) precond=jjstep%junroll; else precond=N%junroll;
+ if(precond&&(ijflag==0)){
+ fprintf(stderr,"unrolling factor for outer loop j\n");
+ fprintf(stderr," does not evenly divide matrix/step size!\n");
+ exit(1);
+ }
+ if(eflag&&(junroll>1)&&(N%jjstep)){
+ fprintf(stderr,"with unrolling of j, step size for tiled loop jj\n");
+ fprintf(stderr," does not evenly divide matrix size!\n");
+ exit(1);
+ }
+ processloop('j',eflag,junroll,precond,iunroll);
+ break;
+ default: break;
+ }
+ }
+ processstmt();
+
+ tptype[ttp++]=c;
+
+ if((iflag==0)||(jflag==0)){
+ fprintf(stderr,
+ "one of the loops (i,j) was not specified!\n");
+ exit(1);
+ }
+
+ temp1=start;
+ while(temp1!=NULL){
+ printf("%s",temp1->line);
+ temp1=temp1->next;
+ }
+ printf("#include <stdio.h>\n");
+ printf("#include <sys/time.h>\n");
+ printf("#include <sys/resource.h>\n");
+ if(dflag|eflag) printf("#define min(a,b) ((a)<=(b)?(a):(b))\n");
+ printf("double second();\n");
+ printf("double t_start,t_end,t_total;\n");
+ printf("int times;\n");
+ printf("\ndouble b[%d][%d],dummy[10000],bt[%d][%d];\n\nmain(){\n"
+ ,N,N,N,N);
+ if(precond) printf(" int i,j,n;\n"); else printf(" int i,j;\n");
+ if(dflag) printf(" int ii,it;\n");
+ if(eflag) printf(" int jj,jt;\n");
+ printf("/* set coefficients so that result matrix should have \n");
+ printf(" * column entries equal to column index\n");
+ printf(" */\n");
+ printf(" for (i=0;i<%d;i++){\n",N);
+ printf(" for (j=0;j<%d;j++){\n",N);
+ printf(" b[i][j] = (double) i;\n");
+ printf(" }\n");
+ printf(" }\n");
+ printf("\n t_total=0.0;\n for(times=0;times<10;times++){\n\n",N);
+ printf("/* try to flush cache */\n");
+ printf(" for(i=0;i<10000;i++){\n",N);
+ printf(" dummy[i] = 0.0;\n");
+ printf(" }\n");
+ printf("%s",head->line);
+ temp1=head->next;
+ while(temp1!=NULL){
+ for(i=0;i<temp1->indentcnt;i++) printf(" ");
+ while((p0=strstr(temp1->line,"+0"))!=NULL){
+ *p0++=' '; *p0=' ';
+ }
+ printf("%s",temp1->line);
+ temp1=temp1->next;
+ }
+ printf("\n t_total+=t_end-t_start;\n }\n");
+ printf("/* check result */\n");
+ printf(" for (j=0;j<%d;j++){\n",N);
+ printf(" for (i=0;i<%d;i++){\n",N);
+ printf(" if (bt[i][j]!=((double)j)){\n");
+ printf(" fprintf(stderr,\"error in bt[%cd][%cd]",'%','%');
+ printf("\\n\",i,j);\n");
+ printf(" fprintf(stderr,\" for %s\\n\");\n",tptype);
+ printf(" exit(1);\n");
+ printf(" }\n");
+ printf(" }\n");
+ printf(" }\n");
+ tptype[ttp]='\0';
+ printf(" printf(\"%c10.2f secs\",t_total);\n",'%');
+ printf(" printf(\" for 10 runs of %s\\n\");\n",tptype);
+ printf("}\n");
+ printf("double second(){\n");
+ printf(" void getrusage();\n");
+ printf(" struct rusage ru;\n");
+ printf(" double t;\n");
+ printf(" getrusage(RUSAGE_SELF,&ru);\n");
+ printf(" t = ((double)ru.ru_utime.tv_sec) +\n");
+ printf(" ((double)ru.ru_utime.tv_usec)/1.0e6;\n");
+ printf(" return t;\n");
+ printf("}\n");
+
+}
+
+void processloop(index,flag,unroll,precond,unroll2)
+char index;
+int flag,unroll,precond,unroll2;
+{
+ char build[80],temp[40];
+ int n;
+ if(precond){
+ ALLOC1
+ sprintf(temp1->line,"/* preconditioning loop for unrolling factor */\n");
+ LINK1
+ if(unroll2==1){
+ build[0]='\0';
+ if(flag){
+ if(index='i')
+ sprintf(temp,"n=IISTEP%c%d; ",'%',unroll);
+ else
+ sprintf(temp,"n=JJSTEP%c%d; ",'%',unroll);
+ strcat(build,temp);
+ sprintf(temp,"for(%c=%c%c;%c<%c%c+n;%c++) ",index,index,index,
+ index,index,index,index);
+ strcat(build,temp);
+ }else{
+ sprintf(temp,"n=%d%c%d; ",N,'%',unroll);
+ strcat(build,temp);
+ sprintf(temp,"for(%c=0;%c<n;%c++) ",index,index,index);
+ strcat(build,temp);
+ }
+ sprintf(temp,"bt[i][j]=b[j][i];\n");
+ strcat(build,temp);
+ ALLOC1
+ sprintf(temp1->line,"%s\n",build);
+ LINK1
+ }else{
+ if(flag){
+ ALLOC1
+ if(index=='i')
+ sprintf(temp1->line,"n=IISTEP%c%d;\n",'%',unroll);
+ else
+ sprintf(temp1->line,"n=JJSTEP%c%d;\n",'%',unroll);
+ LINK1
+ ALLOC1
+ sprintf(temp1->line,"for(%c=%c%c;%c<%c%c+n;%c++){\n",index,index,index,
+ index,index,index,index);
+ LINK1
+ }else{
+ ALLOC1
+ sprintf(temp1->line,"n=%d%c%d;\n",N,'%',unroll);
+ LINK1
+ ALLOC1
+ sprintf(temp1->line,"for(%c=0;%c<n;%c++){\n",index,index,index);
+ LINK1
+ }
+ if(index=='i'){
+ for(n=0;n<unroll2;n++){
+ ALLOC1
+ sprintf(temp1->line," bt[i][j+%d]=b[j+%d][i];\n",n,n);
+ LINK1
+ }
+ }else{
+ for(n=0;n<unroll2;n++){
+ ALLOC1
+ sprintf(temp1->line," bt[i+%d][j]=b[j][i+%d];\n",n,n);
+ LINK1
+ }
+ }
+ ALLOC1
+ sprintf(temp1->line,"}\n");
+ LINK1
+ }
+ ALLOC2
+ if(flag){
+ sprintf(temp1->line,"for(%c=%c%c+n;%c<%ct;%c+=%d){\n",index,index,index,
+ index,index,index,unroll);
+ }else{
+ sprintf(temp1->line,"for(%c=n;%c<%d;%c+=%d){\n",index,index,N,index,
+ unroll);
+ }
+ sprintf(temp2->line,"}\n",N);
+ LINK2
+ }else{
+ ALLOC2
+ if(unroll==1){
+ if(flag){
+ sprintf(temp1->line,"for(%c=%c%c;%c<%ct;%c++){\n",index,index,index,
+ index,index,index);
+ }else{
+ sprintf(temp1->line,"for(%c=0;%c<%d;%c++){\n",index,index,N,index);
+ }
+ }else{
+ if(flag){
+ sprintf(temp1->line,"for(%c=%c%c;%c<%ct;%c+=%d){\n",index,index,index,
+ index,index,index,unroll);
+ }else{
+ sprintf(temp1->line,"for(%c=0;%c<%d;%c+=%d){\n",index,index,N,index,
+ unroll);
+ }
+ }
+ sprintf(temp2->line,"}\n",N);
+ LINK2
+ }
+}
+
+void processstmt()
+{
+ int i,j;
+ for(i=0;i<iunroll;i++){
+ for(j=0;j<junroll;j++){
+ ALLOC1
+ sprintf(temp1->line,"bt[i+%d][j+%d]=b[j+%d][i+%d];\n",i,j,j,i);
+ LINK1
+ }
+ }
+}
+--
+Mark Smotherman, Computer Science Dept., Clemson University, Clemson, SC
+
+=======================================================================
+From: has (h.genceli@bre.com)
+ Subject: transpose of a nxm matrix stored in a vector !!!
+ Newsgroups: sci.math.num-analysis
+ Date: 2000/07/25
+
+
+If I have a matrix nrows x ncols, I can store it in a vector.
+so A(i,j) is really a[i*ncols+j]. So really TRANS of A
+(say B) is really is also a vector B where
+
+0<=i b[j*nrows+i] <nrows, 0<=j<ncols
+b[j*nrows+i] = a[i*ncols+j].
+
+Fine but I want to use only one array a to do this transformation.
+
+i.e a[j*nrows+i] = a[i*ncols+j]. this will itself
+erase some elements so each time a swap is necessary in a loop.
+
+temp = a[j*nrows+i]
+a[j*nrows+i] = a[i*ncols+j]
+a[i*ncols+j] = temp
+
+but still this will lose some info as it is, so indexing
+should have more intelligence in it ???? anybody
+can give me a lead here, thanks.
+
+Has
+
+ From: wei-choon ng (wng@ux8.cso.uiuc.edu)
+ Subject: Re: transpose of a nxm matrix stored in a vector !!!
+ Newsgroups: sci.math.num-analysis
+ Date: 2000/07/25
+
+
+has <h.genceli@bre.com> wrote:
+> If I have a matrix nrows x ncols, I can store it in a vector.
+> so A(i,j) is really a[i*ncols+j]. So really TRANS of A
+> (say B) is really is also a vector B where
+
+[snip]
+
+Hey, if you just want to do a transpose-matrix vector multiply, there is
+no need to explicitly store the transpose matrix in another array and
+doubling the storage!
+
+W.C.
+--
+
+ From: Robin Becker (robin@jessikat.fsnet.co.uk)
+ Subject: Re: transpose of a nxm matrix stored in a vector !!!
+ Newsgroups: sci.math.num-analysis
+ Date: 2000/07/25
+
+
+In article <snr532fo3j1180@corp.supernews.com>, has <h.genceli@bre.com>
+writes
+>If I have a matrix nrows x ncols, I can store it in a vector.
+>so A(i,j) is really a[i*ncols+j]. So really TRANS of A
+>(say B) is really is also a vector B where
+>
+>0<=i b[j*nrows+i] <nrows, 0<=j<ncols
+>b[j*nrows+i] = a[i*ncols+j].
+>
+>Fine but I want to use only one array a to do this transformation.
+>
+>i.e a[j*nrows+i] = a[i*ncols+j]. this will itself
+>erase some elements so each time a swap is necessary in a loop.
+>
+>temp = a[j*nrows+i]
+>a[j*nrows+i] = a[i*ncols+j]
+>a[i*ncols+j] = temp
+>
+>but still this will lose some info as it is, so indexing
+>should have more intelligence in it ???? anybody
+>can give me a lead here, thanks.
+>
+>Has
+>
+>
+>
+
+void dmx_transpose(unsigned n, unsigned m, double* a, double* b)
+{
+ unsigned size = m*n;
+ if(b!=a){
+ real *bmn, *aij, *anm;
+ bmn = b + size; /*b+n*m*/
+ anm = a + size;
+ while(b<bmn) for(aij=a++;aij<anm; aij+=n ) *b++ = *aij;
+ }
+ else if(size>3){
+ unsigned i,row,column,current;
+ for(i=1, size -= 2;i<size;i++){
+ current = i;
+ do {
+ /*current = row+n*column*/
+ column = current/m;
+ row = current%m;
+ current = n*row + column;
+ } while(current < i);
+
+ if (current >i) {
+ real temp = a[i];
+ a[i] = a[current];
+ a[current] = temp;
+ }
+ }
+ }
+}
+--
+Robin Becker
+
+ From: E. Robert Tisdale (edwin@netwood.net)
+ Subject: Re: transpose of a nxm matrix stored in a vector !!!
+ Newsgroups: sci.math.num-analysis
+ Date: 2000/07/25
+
+
+Take a look at
+The C++ Scalar, Vector, Matrix and Tensor class library
+
+ http://www.netwood.net/~edwin/svmt/
+
+<Type><System>SubVector&
+ <Type><System>SubVector::transpose(Extent p, Extent q) {
+ <Type><System>SubVector&
+ v = *this;
+ if (1 < p && 1 < q) {
+ // A vector v of extent n = qp is viewed as a q by p matrix U and
+ // a p by q matrix V where U_{ij} = v_{p*i+j} and V_{ij} = v_{q*i+j}.
+ // The vector v is modified in-place so that V is the transpose of U.
+ // The algorithm searches for every sequence k_s of S indices
+ // such that a circular shift of elements v_{k_s} <-- v_{k_{s+1}}
+ // and v_{k_{S-1}} <-- v_{k_0} effects an in-place transpose.
+ Extent n = q*p;
+ Extent m = 0; // count up to n-2
+ Offset l = 0; // 1 <= l <= n-2
+ while (++l < n-1 && m < n-2) {
+ Offset k = l;
+ Offset j = k;
+ while (l < (k = (j%p)*q + j/p)) { // Search backward for k < l.
+ j = k;
+ }
+ // If a sequence of indices beginning with l has any index k < l,
+ // it has already been transposed. The sequence length S = 1
+ // and diagonal element v_k is its own transpose if k = j.
+ // Skip every index sequence that has already been transposed.
+ if (k == l) { // a new sequence
+ if (k < j) { // with 1 < S
+ TYPE x = v[k]; // save v_{k_0}
+ do {
+ v[k] = v[j]; // v_{k_{s}} <-- v_{k_{s+1}}
+ k = j;
+ ++m;
+ } while (l < (j = (k%q)*p + k/q));
+ v[k] = x; // v_{k_{S-1}} <-- v_{k_0}
+ }
+ ++m;
+ }
+ }
+ } return v;
+ }
+
+
+
+<Type><System>SubVector&
+
+Read the rest of this message... (50 more lines)
+
+ From: Victor Eijkhout (eijkhout@disco.cs.utk.edu)
+ Subject: Re: transpose of a nxm matrix stored in a vector !!!
+ Newsgroups: sci.math.num-analysis
+ Date: 2000/07/25
+
+
+"Alan Miller" <amiller @ vic.bigpond.net.au> writes:
+
+> The attached routine does an in situ transpose.
+> begin 666 Dtip.f90
+> M4U5"4D]55$E.12!D=&EP("AA+"!N,2P@;C(L(&YD:6TI#0HA("TM+2TM+2TM
+
+Hm. F90? You're not silently allocating a temporary I hope?
+
+(Why did you have to encode this? Now I have to save, this decode, ...
+and all for plain ascii?)
+
+--
+Victor Eijkhout
+"When I was coming up, [..] we knew exactly who the they were. It was us
+versus them, and it was clear who the them was were. Today, we are not
+so sure who the they are, but we know they're there." [G.W. Bush]
+
+ From: Alan Miller (amiller_@_vic.bigpond.net.au)
+ Subject: Re: transpose of a nxm matrix stored in a vector !!!
+ Newsgroups: sci.math.num-analysis
+ Date: 2000/07/25
+
+
+Victor Eijkhout wrote in message ...
+>"Alan Miller" <amiller @ vic.bigpond.net.au> writes:
+>
+>> The attached routine does an in situ transpose.
+>> begin 666 Dtip.f90
+>> M4U5"4D]55$E.12!D=&EP("AA+"!N,2P@;C(L(&YD:6TI#0HA("TM+2TM+2TM
+>
+>Hm. F90? You're not silently allocating a temporary I hope?
+>
+>(Why did you have to encode this? Now I have to save, this decode, ...
+>and all for plain ascii?)
+>
+
+I know the problem.
+I sometimes use a Unix system, and have to use decode64 to read
+attachments. On the other hand, Windows wraps lines around,
+formats then and generally makes the code unreadable.
+
+The straight code for dtip (double transpose in place) is attached
+this time.
+
+>--
+>Victor Eijkhout
+
+
+--
+Alan Miller, Retired Scientist (Statistician)
+CSIRO Mathematical & Information Sciences
+Alan.Miller -at- vic.cmis.csiro.au
+http://www.ozemail.com.au/~milleraj
+http://users.bigpond.net.au/amiller/
+
+
+=================================================================
+
+From: Darran Edmundson (dedmunds@sfu.ca)
+ Subject: array reordering algorithm?
+ Newsgroups: sci.math.num-analysis
+ Date: 1995/04/30
+
+
+A code I've written refers to a complex array as two separate real arrays.
+However, I have a canned subroutine which expects a single array where the
+real and imaginary values alternate. Essentially I have a case of mismatched
+data structures, yet for reasons that I'd rather not go into, I'm stuck with them.
+
+Assuming that the two real arrays A and B are sequential in memory, and
+that the single array of alternating real/imaginary values C shares the same
+space, what I need is a porting subroutine that remaps the data from one format
+to the other - using as little space as possible.
+
+I think of the problem as follows. Imagine an array of dimension 10 containing
+the values 1,3,5,7,9,2,4,6,8,10 in this order.
+
+ A(1) / 1 \ C(1)
+ A(2) | 3 | C(2)
+ A(3) | 5 | C(3)
+ A(4) | 7 | C(4)
+ A(5) \ 9 | C(5)
+ |
+ B(1) / 2 | C(6)
+ B(2) | 4 | C(7)
+ B(3) | 6 | C(8)
+ B(4) | 8 | C(9)
+ B(5) \ 10 / C(10)
+
+Given that I know this initial pattern, I want to sort the array C in-place *without
+making comparisons*. That is, the algorithm can only depend on the initial
+knowledge of the pattern. Do you see what a sort is going to do? It will
+make the A and B arrays alternate, i.e. C(1)=A(1), C(2)=B(1), C(3)=A(2),
+C(4)=B(2), etc. It's not a real sort though because I can't actually refer to the
+values above (i.e. no comparisons) because A and B will be holding real data,
+not this contrived pattern. The pattern above exists though - it's the
+natural ordering in memory of A and B.
+
+Either pair swapping only or a small amount of workspace can be used. The
+in-place is important - imagine scaling this problem up to an
+array of 32 or 64 million double precision values and you can easily see how
+duplicating the array is not a feasible solution.
+
+Any ideas? I've been stumped on this for a day and a half now.
+
+Darran Edmundson
+dedmunds@sfu.ca
+
+ From: Roger Critchlow (rec@elf115.elf.org)
+ Subject: Re: array reordering algorithm?
+ Newsgroups: sci.math.num-analysis
+ Date: 1995/04/30
+
+
+ Any ideas? I've been stumped on this for a day and a half now.
+
+Here's some code for in situ permutations of arrays that I wrote
+a few years ago. It all started from the in situ transposition
+algorithms in the Collected Algorithms of the ACM, the references
+for which always get lost during the decryption from fortran.
+
+This is the minimum space algorithm. All you need to supply is
+a function which computes the new order array index from the old
+order array index.
+
+If you can spare n*m bits to record the indexes of elements which
+have been permuted, then you can speed things up.
+
+-- rec --
+
+------------------------------------------------------------------------
+/*
+** Arbitrary in situ permutations of an m by n array of base type TYPE.
+** Copyright 1995 by Roger E Critchlow Jr, rec@elf.org, San Francisco, CA.
+** Fair use permitted, caveat emptor.
+*/
+typedef int TYPE;
+
+int transposition(int ij, int m, int n) /* transposition about diagonal from upper left to lower right */
+{ return ((ij%m)*n+ (ij/m)); }
+
+int countertrans(int ij, int m, int n) /* transposition about diagonal from upper right to lower left */
+{ return ((m-1-(ij%m))*n+ (n-1-(ij/m))); }
+
+int rotate90cw(int ij, int m, int n) /* 90 degree clockwise rotation */
+{ return ((m-1-(ij%m))*n+ (ij/m)); }
+
+int rotate90ccw(int ij, int m, int n) /* 90 degree counter clockwise rotation */
+{ return ((ij%m)*n+ (n-1-(ij/m))); }
+
+int rotate180(int ij, int m, int n) /* 180 degree rotation */
+{ return ((m-1-(ij/n))*n+ (n-1-(ij%n))); }
+
+int reflecth(int ij, int m, int n) /* reflection across horizontal plane */
+{ return ((m-1-(ij/n))*n+ (ij%n)); }
+
+int reflectv(int ij, int m, int n) /* reflection across vertical plane */
+{ return ((ij/n)*n+ (n-1-(ij%n))); }
+
+int in_situ_permutation(TYPE a[], int m, int n, int (*origination)(int ij, int m, int n))
+{
+ int ij, oij, dij, n_to_do;
+ TYPE b;
+ n_to_do = m*n;
+ for (ij = 0; ij < m*n && n_to_do > 0; ij += 1) {
+ /* Test for previously permuted */
+ for (oij = origination(ij,m,n); oij > ij; oij = origination(oij,m,n))
+ ;
+ if (oij < ij)
+ continue;
+ /* Chase the cycle */
+ dij = ij;
+ b = a[ij];
+ for (oij = origination(dij,m,n); oij != ij; oij = origination(dij,m,n)) {
+ a[dij] = a[oij];
+ dij = oij;
+ n_to_do -= 1;
+ }
+ a[dij] = b;
+ n_to_do -= 1;
+ } return 0;
+}
+
+#define TESTING 1
+#if TESTING
+
+/* fill a matrix with sequential numbers, row major ordering */
+void fill_matrix_rows(a, m, n) TYPE *a; int m, n;
+{
+ int i, j;
+ for (i = 0; i < m; i += 1)
+ for (j = 0; j < n; j += 1)
+ a[i*n+j] = i*n+j;
+}
+
+/* fill a matrix with sequential numbers, column major ordering */
+void fill_matrix_cols(a, m, n) TYPE *a; int m, n;
+{
+ int i, j;
+ for (i = 0; i < m; i += 1)
+ for (j = 0; j < n; j += 1)
+ a[i*n+j] = j*m+i;
+}
+
+/* test a matrix for sequential numbers, row major ordering */
+int test_matrix_rows(a, m, n) TYPE *a; int m, n;
+{
+ int i, j, o;
+ for (o = i = 0; i < m; i += 1)
+ for (j = 0; j < n; j += 1)
+ o += a[i*n+j] != i*n+j;
+ return o;
+}
+
+/* test a matrix for sequential numbers, column major ordering */
+int test_matrix_cols(a, m, n) TYPE *a; int m, n;
+{
+ int i, j, o;
+ for (o = i = 0; i < m; i += 1)
+ for (j = 0; j < n; j += 1)
+ o += a[i*n+j] != j*m+i;
+ return o;
+}
+
+/* print a matrix */
+void print_matrix(a, m, n) TYPE *a; int m, n;
+{
+ char *format;
+ int i, j;
+ if (m*n < 10) format = "%2d";
+ if (m*n < 100) format = "%3d";
+ if (m*n < 1000) format = "%4d";
+ if (m*n < 10000) format = "%5d";
+ for (i = 0; i < m; i += 1) {
+ for (j = 0; j < n; j += 1)
+ printf(format, a[i*n+j]);
+ printf("\n");
+ }
+}
+
+#if TEST_TRANSPOSE
+#define MAXSIZE 1000
+
+main()
+{
+ int i, j, m, n, o;
+ TYPE a[MAXSIZE];
+ for (m = 1; m < sizeof(a)/sizeof(a[0]); m += 1)
+ for (n = 1; m*n < sizeof(a)/sizeof(a[0]); n += 1) {
+ fill_matrix_rows(a, m, n); /* {0 1} {2 3} */
+ if (o = transpose(a, m, n))
+ printf(">> transpose returned %d for a[%d][%d], row major\n", o, m, n);
+ if ((o = test_matrix_cols(a, n, m)) != 0) /* {0 2} {1 3} */
+ printf(">> transpose made %d mistakes for a[%d][%d], row major\n", o, m, n);
+ /* column major */
+ fill_matrix_rows(a, m, n);
+ if (o = transpose(a, m, n))
+ printf(">> transpose returned %d for a[%d][%d], column major\n", o, m, n);
+ if ((o = test_matrix_cols(a, n, m)) != 0)
+ printf(">> transpose made %d mistakes for a[%d][%d], column major\n", o, m, n);
+ } return 0;
+}
+#endif /* TEST_TRANSPOSE */
+
+
+#define TEST_DISPLAY 1
+#if TEST_DISPLAY
+main(argc, argv) int argc; char *argv[];
+{
+ TYPE *a;
+ int m = 5, n = 5;
+ extern void *malloc();
+ if (argc > 1) {
+ m = atoi(argv[1]);
+ if (argc > 2)
+ n = atoi(argv[2]);
+ }
+ a = malloc(m*n*sizeof(TYPE));
+
+ printf("matrix\n");
+ fill_matrix_rows(a, m, n);
+ print_matrix(a, m, n);
+ printf("transposition\n");
+ in_situ_permutation(a, m, n, transposition);
+ print_matrix(a, n, m);
+
+ printf("counter transposition\n");
+ fill_matrix_rows(a, m, n);
+ in_situ_permutation(a, m, n, countertrans);
+ print_matrix(a, n, m);
+
+ printf("rotate 90 degrees clockwise\n");
+ fill_matrix_rows(a, m, n);
+ in_situ_permutation(a, m, n, rotate90cw);
+ print_matrix(a, n, m);
+
+ printf("rotate 90 degrees counterclockwise\n");
+ fill_matrix_rows(a, m, n);
+ in_situ_permutation(a, m, n, rotate90ccw);
+ print_matrix(a, n, m);
+
+ printf("rotate 180 degrees\n");
+ fill_matrix_rows(a, m, n);
+ in_situ_permutation(a, m, n, rotate180);
+ print_matrix(a, m, n);
+
+ printf("reflect across horizontal\n");
+ fill_matrix_rows(a, m, n);
+ in_situ_permutation(a, m, n, reflecth);
+ print_matrix(a, m, n);
+
+ printf("reflect across vertical\n");
+ fill_matrix_rows(a, m, n);
+ in_situ_permutation(a, m, n, reflectv);
+ print_matrix(a, m, n);
+
+ return 0;
+}
+
+#endif
+#endif
+
diff --git a/numpy/f2py/doc/multiarrays.txt b/numpy/f2py/doc/multiarrays.txt
new file mode 100644
index 000000000..704208976
--- /dev/null
+++ b/numpy/f2py/doc/multiarrays.txt
@@ -0,0 +1,120 @@
+From pearu@ioc.ee Thu Dec 30 09:58:01 1999
+Date: Fri, 26 Nov 1999 12:02:42 +0200 (EET)
+From: Pearu Peterson <pearu@ioc.ee>
+To: Users of f2py2e -- Curtis Jensen <cjensen@be-research.ucsd.edu>,
+ Vladimir Janku <vjanku@kvet.sk>,
+ Travis Oliphant <Oliphant.Travis@mayo.edu>
+Subject: Multidimensional arrays in f2py2e
+
+
+Hi!
+
+Below I will describe how f2py2e wraps Fortran multidimensional arrays as
+it constantly causes confusion. As for example, consider Fortran code
+
+ subroutine foo(l,m,n,a)
+ integer l,m,n
+ real*8 a(l,m,n)
+ ..
+ end
+Running f2py2e with -h flag, it generates the following signature
+
+subroutine foo(l,m,n,a)
+ integer optional,check(shape(a,2)==l),depend(a) :: l=shape(a,2)
+ integer optional,check(shape(a,1)==m),depend(a) :: m=shape(a,1)
+ integer optional,check(shape(a,0)==n),depend(a) :: n=shape(a,0)
+ real*8 dimension(l,m,n),check(rank(a)==3) :: a
+end subroutine foo
+
+where parameters l,m,n are considered optional and they are initialized in
+Python C/API code using the array a. Note that a can be also a proper
+list, that is, asarray(a) should result in a rank-3 array. But then there
+is an automatic restriction that elements of a (in Python) are not
+changeable (in place) even if Fortran subroutine changes the array a (in
+C,Fortran).
+
+Hint: you can attribute the array a with 'intent(out)' which causes foo to
+return the array a (in Python) if you are to lazy to define a=asarray(a)
+before the call to foo (in Python).
+
+Calling f2py2e without the switch -h, a Python C/API module will be
+generated. After compiling it and importing it to Python
+>>> print foo.__doc__
+shows
+None = foo(a,l=shape(a,2),m=shape(a,1),n=shape(a,0))
+
+You will notice that f2py2e has changed the order of arguments putting the
+optional ones at the end of the argument list.
+Now, you have to be careful when specifying the parameters l,m,n (though
+situations where you need this should be rare). A proper definition
+of the array a should be, say
+
+ a = zeros(n,m,l)
+
+Note that the dimensions l,m,n are in reverse, that is, the array a should
+be transposed when feeding it to the wrapper.
+
+Hint (and a performance hit): To be always consistent with fortran
+arrays, you can define, for example
+ a = zeros(l,m,n)
+and call from Python
+ foo(transpose(a),l,m,n)
+which is equivalent with the given Fortran call
+ call foo(l,m,n,a)
+
+Another hint (not recommended, though): If you don't like optional
+arguments feature at all and want to be strictly consistent with Fortran
+signature, that is, you want to call foo from Python as
+ foo(l,m,n,a)
+then you should edit the signature to
+subroutine foo(l,m,n,a)
+ integer :: l
+ integer :: m
+ integer :: n
+ real*8 dimension(l,m,n),check(rank(a)==3),depend(l,m,n), &
+ check(shape(a,2)==l,shape(a,1)==m,shape(a,0)==n):: a
+end
+Important! Note that now the array a should depend on l,m,n
+so that the checks can be performed in the proper order.
+(you cannot check, say, shape(a,2)==l before initializing a or l)
+(There are other ways to edit the signature in order to get the same
+effect but they are not so safe and I will not discuss about them here).
+
+Hint: If the array a should be a work array (as used frequently in
+Fortran) and you a too lazy (its good lazyness;) to provide it (in Python)
+then you can define it as optional by ediding the signature:
+subroutine foo(l,m,n,a)
+ integer :: l
+ integer :: m
+ integer :: n
+ real*8 dimension(l,m,n),check(rank(a)==3),depend(l,m,n), &
+ check(shape(a,2)==l,shape(a,1)==m,shape(a,0)==n):: a
+ optional a
+end
+Note again that the array a must depend on l,m,n. Then the array a will be
+allocated in the Python C/API module. Not also that
+>>> print foo.__doc__
+shows then
+None = foo(l,m,n,a=)
+Performance hint: If you call the given foo lots of times from Python then
+you don't want to allocate/deallocate the memory in each call. So, it is
+then recommended to define a temporary array in Python, for instance
+>>> tmp = zeros(n,m,l)
+>>> for i in ...:
+>>> foo(l,m,n,a=tmp)
+
+Important! It is not good at all to define
+ >>> tmp = transpose(zeros(l,m,n))
+because tmp will be then a noncontiguous array and there will be a
+huge performance hit as in Python C/API a new array will be allocated and
+also a copying of arrays will be performed elementwise!
+But
+ >>> tmp = asarray(transpose(zeros(l,m,n)))
+is still ok.
+
+I hope that the above answers lots of your (possible) questions about
+wrapping Fortran multidimensional arrays with f2py2e.
+
+Regards,
+ Pearu
+
diff --git a/numpy/f2py/doc/notes.tex b/numpy/f2py/doc/notes.tex
new file mode 100644
index 000000000..2746b049d
--- /dev/null
+++ b/numpy/f2py/doc/notes.tex
@@ -0,0 +1,310 @@
+
+\section{Calling wrapper functions from Python}
+\label{sec:notes}
+
+\subsection{Scalar arguments}
+\label{sec:scalars}
+
+In general, for scalar argument you can pass in in
+addition to ordinary Python scalars (like integers, floats, complex
+values) also arbitrary sequence objects (lists, arrays, strings) ---
+then the first element of a sequence is passed in to the Fortran routine.
+
+It is recommended that you always pass in scalars of required type. This
+ensures the correctness as no type-casting is needed.
+However, no exception is raised if type-casting would produce
+inaccurate or incorrect results! For example, in place of an expected
+complex value you can give an integer, or vice-versa (in the latter case only
+a rounded real part of the complex value will be used).
+
+If the argument is \texttt{intent(inout)} then Fortran routine can change the
+value ``in place'' only if you pass in a sequence object, for
+instance, rank-0 array. Also make sure that the type of an array is of
+correct type. Otherwise type-casting will be performed and you may
+get inaccurate or incorrect results. The following example illustrates this
+\begin{verbatim}
+>>> a = array(0)
+>>> calculate_pi(a)
+>>> print a
+3
+\end{verbatim}
+
+If you pass in an ordinary Python scalar in place of
+\texttt{intent(inout)} variable, it will be used as an input argument
+since
+Python
+scalars cannot not be changed ``in place'' (all Python scalars
+are immutable objects).
+
+\subsection{String arguments}
+\label{sec:strings}
+
+You can pass in strings of arbitrary length. If the length is greater than
+required, only a required part of the string is used. If the length
+is smaller than required, additional memory is allocated and fulfilled
+with `\texttt{\bs0}'s.
+
+Because Python strings are immutable, \texttt{intent(inout)} argument
+expects an array version of a string --- an array of chars:
+\texttt{array("<string>")}.
+Otherwise, the change ``in place'' has no effect.
+
+
+\subsection{Array arguments}
+\label{sec:arrays}
+
+If the size of an array is relatively large, it is \emph{highly
+ recommended} that you pass in arrays of required type. Otherwise,
+type-casting will be performed which includes the creation of new
+arrays and their copying. If the argument is also
+\texttt{intent(inout)}, the wasted time is doubled. So, pass in arrays
+of required type!
+
+On the other hand, there are situations where it is perfectly all
+right to ignore this recommendation: if the size of an array is
+relatively small or the actual time spent in Fortran routine takes
+much longer than copying an array. Anyway, if you want to optimize
+your Python code, start using arrays of required types.
+
+Another source of performance hit is when you use non-contiguous
+arrays. The performance hit will be exactly the same as when using
+incorrect array types. This is because a contiguous copy is created
+to be passed in to the Fortran routine.
+
+\fpy provides a feature such that the ranks of array arguments need
+not to match --- only the correct total size matters. For example, if
+the wrapper function expects a rank-1 array \texttt{array([...])},
+then it is correct to pass in rank-2 (or higher) arrays
+\texttt{array([[...],...,[...]])} assuming that the sizes will match.
+This is especially useful when the arrays should contain only one
+element (size is 1). Then you can pass in arrays \texttt{array(0)},
+\texttt{array([0])}, \texttt{array([[0]])}, etc and all cases are
+handled correctly. In this case it is correct to pass in a Python
+scalar in place of an array (but then ``change in place'' is ignored,
+of course).
+
+\subsubsection{Multidimensional arrays}
+
+If you are using rank-2 or higher rank arrays, you must always
+remember that indexing in Fortran starts from the lowest dimension
+while in Python (and in C) the indexing starts from the highest
+dimension (though some compilers have switches to change this). As a
+result, if you pass in a 2-dimensional array then the Fortran routine
+sees it as the transposed version of the array (in multi-dimensional
+case the indexes are reversed).
+
+You must take this matter into account also when modifying the
+signature file and interpreting the generated Python signatures:
+
+\begin{itemize}
+\item First, when initializing an array using \texttt{init\_expr}, the index
+vector \texttt{\_i[]} changes accordingly to Fortran convention.
+\item Second, the result of CPP-macro \texttt{shape(<array>,0)}
+ corresponds to the last dimension of the Fortran array, etc.
+\end{itemize}
+Let me illustrate this with the following example:\\
+\begin{verbatim}
+! Fortran file: arr.f
+ subroutine arr(l,m,n,a)
+ integer l,m,n
+ real*8 a(l,m,n)
+ ...
+ end
+\end{verbatim}
+\fpy will generate the following signature file:\\
+\begin{verbatim}
+!%f90
+! Signature file: arr.f90
+python module arr ! in
+ interface ! in :arr
+ subroutine arr(l,m,n,a) ! in :arr:arr.f
+ integer optional,check(shape(a,2)==l),depend(a) :: l=shape(a,2)
+ integer optional,check(shape(a,1)==m),depend(a) :: m=shape(a,1)
+ integer optional,check(shape(a,0)==n),depend(a) :: n=shape(a,0)
+ real*8 dimension(l,m,n) :: a
+ end subroutine arr
+ end interface
+end python module arr
+\end{verbatim}
+and the following wrapper function will be produced
+\begin{verbatim}
+None = arr(a,l=shape(a,2),m=shape(a,1),n=shape(a,0))
+\end{verbatim}
+
+In general, I would suggest not to specify the given optional
+variables \texttt{l,m,n} when calling the wrapper function --- let the
+interface find the values of the variables \texttt{l,m,n}. But there
+are occasions when you need to specify the dimensions in Python.
+
+So, in Python a proper way to create an array from the given
+dimensions is
+\begin{verbatim}
+>>> a = zeros(n,m,l,'d')
+\end{verbatim}
+(note that the dimensions are reversed and correct type is specified),
+and then a complete call to \texttt{arr} is
+\begin{verbatim}
+>>> arr(a,l,m,n)
+\end{verbatim}
+
+From the performance point of view, always be consistent with Fortran
+indexing convention, that is, use transposed arrays. But if you do the
+following
+\begin{verbatim}
+>>> a = transpose(zeros(l,m,n,'d'))
+>>> arr(a)
+\end{verbatim}
+then you will get a performance hit! The reason is that here the
+transposition is not actually performed. Instead, the array \texttt{a}
+will be non-contiguous which means that before calling a Fortran
+routine, internally a contiguous array is created which
+includes memory allocation and copying. In addition, if
+the argument array is also \texttt{intent(inout)}, the results are
+copied back to the initial array which doubles the
+performance hit!
+
+So, to improve the performance: always pass in
+arrays that are contiguous.
+
+\subsubsection{Work arrays}
+
+Often Fortran routines use the so-called work arrays. The
+corresponding arguments can be declared as optional arguments, but be
+sure that all dimensions are specified (bounded) and defined before
+the initialization (dependence relations).
+
+On the other hand, if you call the Fortran routine many times then you
+don't want to allocate/deallocate the memory of the work arrays on
+every call. In this case it is recommended that you create temporary
+arrays with proper sizes in Python and use them as work arrays. But be
+careful when specifying the required type and be sure that the
+temporary arrays are contiguous. Otherwise the performance hit would
+be even harder than the hit when not using the temporary arrays from
+Python!
+
+
+
+\subsection{Call-back arguments}
+\label{sec:cbargs}
+
+\fpy builds a very flexible call-back mechanisms for call-back
+arguments. If the wrapper function expects a call-back function \texttt{fun}
+with the following Python signature to be passed in
+\begin{verbatim}
+def fun(a_1,...,a_n):
+ ...
+ return x_1,...,x_k
+\end{verbatim}
+but the user passes in a function \texttt{gun} with the signature
+\begin{verbatim}
+def gun(b_1,...,b_m):
+ ...
+ return y_1,...,y_l
+\end{verbatim}
+and the following extra arguments (specified as additional optional
+argument for the wrapper function):
+\begin{verbatim}
+fun_extra_args = (e_1,...,e_p)
+\end{verbatim}
+then the actual call-back is constructed accordingly to the following rules:
+\begin{itemize}
+\item if \texttt{p==0} then \texttt{gun(a\_1,...,a\_q)}, where
+ \texttt{q=min(m,n)};
+\item if \texttt{n+p<=m} then \texttt{gun(a\_1,...,a\_n,e\_1,...,e\_p)};
+\item if \texttt{p<=m<n+p} then \texttt{gun(a\_1,...,a\_q,e\_1,...,e\_p)},
+ where \texttt{q=m-p};
+\item if \texttt{p>m} then \texttt{gun(e\_1,...,e\_m)};
+\item if \texttt{n+p} is less than the number of required arguments
+ of the function \texttt{gun}, an exception is raised.
+\end{itemize}
+
+A call-back function \texttt{gun} may return any number of objects as a tuple:
+if \texttt{k<l}, then objects \texttt{y\_k+1,...,y\_l} are ignored;
+if \texttt{k>l}, then only objects \texttt{x\_1,...,x\_l} are set.
+
+
+\subsection{Obtaining information on wrapper functions}
+\label{sec:info}
+
+From the previous sections we learned that it is useful for the
+performance to pass in arguments of expected type, if possible. To
+know what are the expected types, \fpy generates a complete
+documentation strings for all wrapper functions. You can read them
+from Python by printing out \texttt{\_\_doc\_\_} attributes of the
+wrapper functions. For the example in Sec.~\ref{sec:intro}:
+\begin{verbatim}
+>>> print foobar.foo.__doc__
+Function signature:
+ foo(a)
+Required arguments:
+ a : in/output rank-0 array(int,'i')
+>>> print foobar.bar.__doc__
+Function signature:
+ bar = bar(a,b)
+Required arguments:
+ a : input int
+ b : input int
+Return objects:
+ bar : int
+\end{verbatim}
+
+In addition, \fpy generates a LaTeX document
+(\texttt{<modulename>module.tex}) containing a bit more information on
+the wrapper functions. See for example Appendix that contains a result
+of the documentation generation for the example module
+\texttt{foobar}. Here the file \texttt{foobar-smart.f90} (modified
+version of \texttt{foobar.f90}) is used --- it contains
+\texttt{note(<LaTeX text>)} attributes for specifying some additional
+information.
+
+\subsection{Wrappers for common blocks}
+\label{sec:wrapcomblock}
+
+[See examples \texttt{test-site/e/runme*}]
+
+What follows is obsolute for \fpy version higher that 2.264.
+
+\fpy generates wrapper functions for common blocks. For every common
+block with a name \texttt{<commonname>} a function
+\texttt{get\_<commonname>()} is constructed that takes no arguments
+and returns a dictionary. The dictionary represents maps between the
+names of common block fields and the arrays containing the common
+block fields (multi-dimensional arrays are transposed). So, in order
+to access to the common block fields, you must first obtain the
+references
+\begin{verbatim}
+commonblock = get_<commonname>()
+\end{verbatim}
+and then the fields are available through the arrays
+\texttt{commonblock["<fieldname>"]}.
+To change the values of common block fields, you can use for scalars
+\begin{verbatim}
+commonblock["<fieldname>"][0] = <new value>
+\end{verbatim}
+and for arrays
+\begin{verbatim}
+commonblock["<fieldname>"][:] = <new array>
+\end{verbatim}
+for example.
+
+For more information on the particular common block wrapping, see
+\texttt{get\_<commonname>.\_\_doc\_\_}.
+
+\subsection{Wrappers for F90/95 module data and routines}
+\label{sec:wrapf90modules}
+
+[See example \texttt{test-site/mod/runme\_mod}]
+
+\subsection{Examples}
+\label{sec:examples}
+
+Examples on various aspects of wrapping Fortran routines to Python can
+be found in directories \texttt{test-site/d/} and
+\texttt{test-site/e/}: study the shell scripts \texttt{runme\_*}. See
+also files in \texttt{doc/ex1/}.
+
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "f2py2e"
+%%% End:
diff --git a/numpy/f2py/doc/oldnews.html b/numpy/f2py/doc/oldnews.html
new file mode 100644
index 000000000..914265bbe
--- /dev/null
+++ b/numpy/f2py/doc/oldnews.html
@@ -0,0 +1,121 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
+<HTML>
+<HEAD>
+<META name="Author" content="Pearu Peterson">
+<!-- You may add here some keywords (comma separeted list) -->
+<META name="Keywords" content="fortran,python,interface,f2py,f2py2e,wrapper,fpig">
+<TITLE>F2PY - Fortran to Python Interface Generator</TITLE>
+<LINK rel="stylesheet" type="text/css" href="/styles/userstyle.css">
+</HEAD>
+
+<body>
+<h2><a href="http://cens.ioc.ee/projects/f2py2e">F2PY</a> old news.</h2>
+
+<dl>
+ <dt> February 23, 2002
+ <dd> Fixed a bug of incorrect shapes of multi-dimensional arrays
+ when returning from Fortran routine (thanks to Eric for pointing
+ this out).
+ <code>F2PY_REPORT_ATEXIT</code> is disabled by default under Win32.
+ <dt> February 14, 2002
+ <dd> Introduced <code>callprotoargument</code> statement so that
+ proper prototypes can be specified (this fixes SEGFAULTs when
+ wrapping C functions with <code>f2py</code>, see <a
+ href="NEWS.txt">NEWS.txt</a> for more details). Updated for the
+ latest <code>scipy_distutils</code>. Fixed few bugs.
+ <dt> February 3, 2002
+ <dd> Introduced <code>intent(overwrite),intent(out=name)</code>
+ attributes, <code>callstatement C-expr;</code> statement, and
+ reviewed reference counting in callback mechanism. Fixed bugs.
+ <dt> January 18, 2002
+ <dd> Introduced extra keyword argument <code>copy_#varname#=1</code>
+ for <code>intent(copy)</code> variables,
+ <code>-DF2PY_REPORT_ATEXIT</code> for reporting <code>f2py</code>
+ performance,
+ <code>has_column_major_storage</code> member function for generated
+ modules, and <a href="http://dmalloc.com/">dmalloc</a> support.
+ <dt> January 16, 2002
+ <dd> BREAKING NEWS! Solved long lasted dilemma of wrapping
+ multi-dimensional arrays where different
+ storage orders in C and Fortran come into account. From now on
+ this difference is dealt automatically by the f2py generated
+ module and in a very efficient way. For example, the corresponding
+ element A(i,j) of a Fortran array can be accessed in Python as
+ A[i,j].
+ <dt> January 13, 2002
+ <dd> Fifth Public Release is coming soon..., a snapshot is available
+ for download, now with updates.
+ <dt> December 17, 2001
+ <dd> <a href="Release-4.x.txt">Fourth Public Release</a>: Win32 support.
+ <dd> Making <code>f2py2e</code> a module. Currently it has only one
+ member function <code>run_main(comline_list)</code>.
+ <dd> Removed command line arguments <code>-fix,-f90,-f77</code>
+ and introduced many new ones. See <a href="NEWS.txt">NEWS.txt</a>.
+ <dd> <code>intent(..)</code> statement with empty name list defines
+ default <code>intent(..)</code> attribute for all routine arguments.
+ <dd> Refinements in Win32 support. Eric Jones has provided a f2py
+ HOWTO for Windows users. See <a href="win32_notes.txt">win32_notes.txt</a>.
+ <dd> Major rewrote of the code generator to achieve
+ a higher quality of generated C/API modules (-Wall messages are
+ considerably reduced, especially for callback functions).
+ <dd> Many bugs were fixed.
+ <dt> December 12, 2001
+ <dd> Win32 support (thanks to Eric Jones and Tiffany Kamm). Minor
+ cleanups and fixes.
+ <dt> December 4, 2001
+ <dd> <a href="Release-3.x.txt">Third Public Release</a>: <code>f2py</code> supports <code>distutils</code>. It can be
+ installed with one and it generates <code>setup_modulename.py</code>
+ to be used for building Python extension modules.
+ <dd> Introduced <code>threadsafe</code>, <code>fortranname</code>,
+ and <code>intent(c)</code> statements.
+ <dt> August 13, 2001
+ <dd> Changed the name FPIG to F2PY for avoiding confusion with project names.
+ <dd> Updated <code>f2py</code> for use with Numeric version 20.x.
+ <dt> January 12, 2001
+ <dd> Example usages of <a href="pyfobj.html"><code>PyFortranObject</code></a>.
+ Fixed bugs. Updated the
+ <a href="f2python9.html">Python 9 Conference paper</a> (F2PY paper).
+ <dt> December 9, 2000
+ <dd> Implemented support for <code>PARAMETER</code> statement.
+ <dt> November 6, 2000
+ <dd> Submitted a paper for 9th Python Conference (accepted). It is available in <a
+ href="f2python9.html">html</a>, <a href="f2python9.pdf">PDF</a>,
+ and <a href="f2python9.ps.gz">Gzipped PS</a> formats.
+ <dt> September 17, 2000
+ <dd> Support for F90/95 module data and routines. COMMON block
+ wrapping is rewritten. New signature file syntax:
+ <code>pythonmodule</code>. Signature files generated with
+ f2py-2.264 or earlier, are incompatible (need replacement
+ <code>module</code> with
+ <code>pythonmodule</code>).
+ <dt> September 12, 2000
+ <dd> The second public release of <code>f2py</code> is out. See <a
+ href="Release-2.x.txt">Release notes</a>.
+ <dt> September 11, 2000
+ <dd> Now <code>f2py</code> supports wrapping Fortran 90/95 module routines
+ (support for F90/95 module data coming soon)
+ <dt> June 12, 2000
+ <dd> Now <code>f2py</code> has a mailing list <a
+href="#f2py-users">f2py-users</a> open for discussion.
+
+</dl>
+
+
+<!-- End of user text -->
+<HR>
+<ADDRESS>
+<A href="http://validator.w3.org/"><IMG border=0 align=right src="/icons/vh40.gif" alt="Valid HTML 4.0!" height=31 width=88></A>
+<A href="http://cens.ioc.ee/~pearu/" target="_top">Pearu Peterson</A>
+<A href="mailto:pearu (at) ioc.ee">&lt;pearu(at)ioc.ee&gt;</A><BR>
+<!-- hhmts start -->
+Last modified: Mon Dec 3 19:40:26 EET 2001
+<!-- hhmts end -->
+</ADDRESS>
+<!-- You may want to comment the following line out when the document is final-->
+<!-- Check that the reference is right -->
+<!--A href="http://validator.w3.org/check?uri=http://cens.ioc.ee/projects/f2py2e/index.html;ss"> Submit this page for validation</A-->
+
+</BODY>
+
+
+</HTML>
diff --git a/numpy/f2py/doc/options.tex b/numpy/f2py/doc/options.tex
new file mode 100644
index 000000000..84d9410f8
--- /dev/null
+++ b/numpy/f2py/doc/options.tex
@@ -0,0 +1,63 @@
+
+\section{\fpy command line options}
+\label{sec:opts}
+
+\fpy has the following command line syntax (run \fpy without arguments
+to get up to date options!!!):
+\begin{verbatim}
+f2py [<options>] <fortran files> [[[only:]||[skip:]] <fortran functions> ]\
+ [: <fortran files> ...]
+\end{verbatim}
+where
+\begin{description}
+\item[\texttt{<options>}] --- the following options are available:
+ \begin{description}
+ \item[\texttt{-f77}] --- \texttt{<fortran files>} are in Fortran~77
+ fixed format (default).
+ \item[\texttt{-f90}] --- \texttt{<fortran files>} are in
+ Fortran~90/95 free format (default for signature files).
+ \item[\texttt{-fix}] --- \texttt{<fortran files>} are in
+ Fortran~90/95 fixed format.
+ \item[\texttt{-h <filename>}] --- after scanning the
+ \texttt{<fortran files>} write the signatures of Fortran routines
+ to file \texttt{<filename>} and exit. If \texttt{<filename>}
+ exists, \fpy quits without overwriting the file. Use
+ \texttt{-{}-overwrite-signature} to overwrite.
+ \item[\texttt{-m <modulename>}] --- specify the name of the module
+ when scanning Fortran~77 codes for the first time. \fpy will
+ generate Python C/API module source \texttt{<modulename>module.c}.
+ \item[\texttt{-{}-lower/-{}-no-lower}] --- lower/do not lower the cases
+ when scanning the \texttt{<fortran files>}. Default when
+ \texttt{-h} flag is specified/unspecified (that is for Fortran~77
+ codes/signature files).
+ \item[\texttt{-{}-short-latex}] --- use this flag when you want to
+ include the generated LaTeX document to another LaTeX document.
+ \item[\texttt{-{}-debug-capi}] --- create a very verbose C/API
+ code. Useful for debbuging.
+% \item[\texttt{-{}-h-force}] --- if \texttt{-h <filename>} is used then
+% overwrite the file \texttt{<filename>} (if it exists) and continue
+% with constructing the C/API module source.
+ \item[\texttt{-makefile <options>}] --- run \fpy without arguments
+ for more information.
+ \item[\texttt{-{}-use-libs}] --- see \texttt{-makefile}.
+ \item[\texttt{-{}-overwrite-makefile}] --- overwrite existing
+ \texttt{Makefile-<modulename>}.
+ \item[\texttt{-v}] --- print \fpy version number and exit.
+ \item[\texttt{-pyinc}] --- print Python include path and exit.
+ \end{description}
+\item[\texttt{<fortran files>}] --- are the paths to Fortran files or
+ to signature files that will be scanned for \texttt{<fortran
+ functions>} in order to determine their signatures.
+\item[\texttt{<fortran functons>}] --- are the names of Fortran
+ routines for which Python C/API wrapper functions will be generated.
+ Default is all that are found in \texttt{<fortran files>}.
+\item[\texttt{only:}/\texttt{skip:}] --- are flags for filtering
+ in/out the names of fortran routines to be wrapped. Run \fpy without
+ arguments for more information about the usage of these flags.
+\end{description}
+
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "f2py2e"
+%%% End:
diff --git a/numpy/f2py/doc/python9.tex b/numpy/f2py/doc/python9.tex
new file mode 100644
index 000000000..cda3cd18b
--- /dev/null
+++ b/numpy/f2py/doc/python9.tex
@@ -0,0 +1,1046 @@
+\documentclass[twocolumn]{article}
+\usepackage{epsfig}
+\usepackage{xspace}
+\usepackage{verbatim}
+
+
+\headsep=0pt
+\topmargin=0pt
+\headheight=0pt
+\oddsidemargin=0pt
+\textwidth=6.5in
+\textheight=9in
+%%tth:\newcommand{\xspace}{ }
+\newcommand{\fpy}{\texttt{f2py}\xspace}
+\newcommand{\bs}{\symbol{`\\}}
+% need bs here:
+%%tth:\newcommand{\bs}{\texttt{<backslash>}}
+
+\newcommand{\tthhide}[1]{#1}
+\newcommand{\latexhide}[1]{}
+%%tth:\newcommand{\tthhide}[1]{}
+%%tth:\newcommand{\latexhide}[1]{#1}
+
+\newcommand{\shell}[1]{
+\latexhide{
+ \special{html:
+<BLOCKQUOTE>
+<pre>
+sh> #1
+</pre>
+</BLOCKQUOTE>}
+}
+\tthhide{
+ \\[1ex]
+ \hspace*{1em}
+ \texttt{sh> \begin{minipage}[t]{0.8\textwidth}#1\end{minipage}}\\[1ex]
+}
+}
+
+\newcommand{\email}[1]{\special{html:<A href="mailto:#1">}\texttt{<#1>}\special{html:</A>}}
+\newcommand{\wwwsite}[1]{\special{html:<A href="#1">}{#1}\special{html:</A>}}
+\title{Fortran to Python Interface Generator with
+an Application to Aerospace Engineering}
+\author{
+\large Pearu Peterson\\
+\small \email{pearu@cens.ioc.ee}\\
+\small Center of Nonlinear Studies\\
+\small Institute of Cybernetics at TTU\\
+\small Akadeemia Rd 21, 12618 Tallinn, ESTONIA\\[2ex]
+\large Joaquim R. R. A. Martins and Juan J. Alonso\\
+\small \email{joaquim.martins@stanford.edu}, \email{jjalonso@stanford.edu}\\
+\small Department of Aeronautics and Astronautics\\
+\small Stanford University, CA
+}
+\date{$Revision: 1.17 $\\\today}
+\begin{document}
+
+\maketitle
+
+\special{html: Other formats of this document:
+<A href=f2python9.ps.gz>Gzipped PS</A>,
+<A href=f2python9.pdf>PDF</A>
+}
+
+\begin{abstract}
+ FPIG --- Fortran to Python Interface Generator --- is a tool for
+ generating Python C/API extension modules that interface
+ Fortran~77/90/95 codes with Python. This tool automates the process
+ of interface generation by scanning the Fortran source code to
+ determine the signatures of Fortran routines and creating a
+ Python C/API module that contains the corresponding interface
+ functions. FPIG also attempts to find dependence relations between
+ the arguments of a Fortran routine call (e.g. an array and its
+ dimensions) and constructs interface functions with potentially
+ fewer arguments. The tool is extremely flexible since the user has
+ control over the generation process of the interface by specifying the
+ desired function signatures. The home page for FPIG can be found at
+ \wwwsite{http://cens.ioc.ee/projects/f2py2e/}.
+
+ FPIG has been used successfully to wrap a large number of Fortran
+ programs and libraries. Advances in computational science have led
+ to large improvements in the modeling of physical systems which are
+ often a result of the coupling of a variety of physical models that
+ were typically run in isolation. Since a majority of the available
+ physical models have been previously written in Fortran, the
+ importance of FPIG in accomplishing these couplings cannot be
+ understated. In this paper, we present an application of FPIG to
+ create an object-oriented framework for aero-structural analysis and
+ design of aircraft.
+\end{abstract}
+
+%%tth:
+\tableofcontents
+
+\section{Preface}
+\label{sec:preface}
+
+The use of high-performance computing has made it possible to tackle
+many important problems and discover new physical phenomena in science
+and engineering. These accomplishments would not have been achieved
+without the computer's ability to process large amounts of data in a
+reasonably short time. It can safely be said that the computer has
+become an essential tool for scientists and engineers. However, the
+diversity of problems in science and engineering has left its mark as
+computer programs have been developed in different programming
+languages, including languages developed to describe certain specific
+classes of problems.
+
+In interdisciplinary fields it is not uncommon for scientists and
+engineers to face problems that have already been solved in a
+different programming environment from the one they are familiar with.
+Unfortunately, researchers may not have the time or willingness to
+learn a new programming language and typically end up developing the
+corresponding tools in the language that they normally use. This
+approach to the development of new software can substantially impact
+the time to develop and the quality of the resulting product: firstly,
+it usually takes longer to develop and test a new tool than to learn a
+new programming environment, and secondly it is very unlikely that a
+non-specialist in a given field can produce a program that is more
+efficient than more established tools.
+
+To avoid situations such as the one described above, one alternative
+would be to provide automatic or semi-automatic interfaces between programming
+languages. Another possibility would be to provide language
+translators, but these obviously require more work than interface
+generators --- a translator must understand all language constructs
+while an interface generator only needs to understand a subset of these
+constructs. With an automatic interface between two languages, scientists or
+engineers can effectively use programs written in other programming
+languages without ever having to learn them.
+
+Although it is clear that it is impossible to interface arbitrary programming
+languages with each other, there is no reason for doing so. Low-level languages such as C and Fortran are well known for
+their speed and are therefore suitable for applications where
+performance is critical. High-level scripting languages, on the other
+hand, are generally slower but much easier to learn and use,
+especially when performing interactive analysis. Therefore, it makes
+sense to create interfaces only in one direction: from lower-level
+languages to higher-level languages.
+
+In an ideal world, scientists and engineers would use higher-level
+languages for the manipulation of the mathematical formulas in a problem
+rather than having to struggle with tedious programming details. For tasks
+that are computationally demanding, they would use interfaces to
+high-performance routines that are written in a lower-level language
+optimized for execution speed.
+
+
+\section{Introduction}
+\label{sec:intro}
+
+This paper presents a tool that has been developed for the creation of
+interfaces between Fortran and Python.
+
+
+The Fortran language is popular in
+scientific computing, and is used mostly in applications that use
+extensive matrix manipulations (e.g. linear algebra). Since Fortran
+ has been the standard language among scientists and engineers for
+ at least three decades, there is a large number of legacy codes available that
+ perform a variety of tasks using very sophisticated algorithms (see
+e.g. \cite{netlib}).
+
+The Python language \cite{python}, on the other hand, is a relatively
+new programming language. It is a very high-level scripting language
+that supports object-oriented programming. What makes Python
+especially appealing is its very clear and natural syntax, which makes it
+easy to learn and use. With Python one can implement relatively
+complicated algorithms and tasks in a short time with very compact
+source code.
+
+Although there are ongoing projects for extending Python's usage in
+scientific computation, it lacks reliable tools that are common in
+scientific and engineering such as ODE integrators, equation solvers,
+tools for FEM, etc. The implementation of all of these tools in Python
+would be not only too time-consuming but also inefficient. On the
+other hand, these tools are already developed in other,
+computationally more efficient languages such as Fortran or C.
+Therefore, the perfect role for Python in the context of scientific
+computing would be that of a ``gluing'' language. That is, the role
+of providing high-level interfaces to C, C++ and Fortran libraries.
+
+There are a number of widely-used tools that can be used for interfacing
+software libraries to Python. For binding C libraries with various
+scripting languages, including Python, the tool most often used is
+SWIG \cite{swig}. Wrapping Fortran routines with Python is less
+popular, mainly because there are many platform and compiler-specific
+issues that need to be addressed. Nevertheless, there is great
+interest in interfacing Fortran libraries because they provide
+invaluable tools for scientific computing. At LLNL, for example, a tool
+called PyFort has been developed for connecting Fortran and
+Python~\cite{pyfort}.
+
+The tools mentioned above require an input file describing signatures
+of functions to be interfaced. To create these input files, one needs
+to have a good knowledge of either C or Fortran. In addition,
+binding libraries that have thousands of routines can certainly constitute a
+very tedious task, even with these tools.
+
+The tool that is introduced in this paper, FPIG (Fortran to Python
+Interface Generator)~\cite{fpig}, automatically generates interfaces
+between Fortran and Python. It is different from the tools mentioned
+above in that FPIG can create signature files automatically by
+scanning the source code of the libraries and then construct Python
+C/API extension modules. Note that the user need not be experienced
+in C or even Fortran. In addition, FPIG is designed to wrap large
+Fortran libraries containing many routines with only one or two
+commands. This process is very flexible since one can always modify
+the generated signature files to insert additional attributes in order
+to achieve more sophisticated interface functions such as taking care
+of optional arguments, predicting the sizes of array arguments and
+performing various checks on the correctness of the input arguments.
+
+The organization of this paper is as follows. First, a simple example
+of FPIG usage is given. Then FPIG's basic features are described and
+solutions to platform and compiler specific issues are discussed.
+Unsolved problems and future work on FPIG's development are also
+addressed. Finally, an application to a large aero-structural solver
+is presented as real-world example of FPIG's usage.
+
+\section{Getting Started}
+\label{sec:getstart}
+
+To get acquainted with FPIG, let us consider the simple Fortran~77
+subroutine shown in Fig. \ref{fig:exp1.f}.
+\begin{figure}[htb]
+ \latexhide{\label{fig:exp1.f}}
+ \special{html:<BLOCKQUOTE>}
+ \verbatiminput{examples/exp1.f}
+ \special{html:</BLOCKQUOTE>}
+ \caption{Example Fortran code \texttt{exp1.f}. This routine calculates
+ the simplest rational lower and upper approximations to $e$ (for
+ details of
+ the algorithm see \cite{graham-etal}, p.122)}
+ \tthhide{\label{fig:exp1.f}}
+\end{figure}
+In the sections that follow, two ways of creating interfaces to this
+Fortran subroutine are described. The first and simplest way is
+suitable for Fortran codes that are developed in connection with \fpy.
+The second and not much more difficult method, is suitable for
+interfacing existing Fortran libraries which might have been developed
+by other programmers.
+
+Numerical Python~\cite{numpy} is needed in order to compile extension
+modules generated by FPIG.
+
+\subsection{Interfacing Simple Routines}
+\label{sec:example1}
+
+In order to call the Fortran routine \texttt{exp1} from Python, let us
+create an interface to it by using \fpy (FPIG's front-end program). In
+order to do this, we issue the following command, \shell{f2py -m foo
+exp1.f} where the option \texttt{-m foo} sets the name of the Python
+C/API extension module that \fpy will create to
+\texttt{foo}. To learn more about the \fpy command line options, run \fpy
+without arguments.
+
+The output messages in Fig. \ref{fig:f2pyoutmess}
+illustrate the procedure followed by \fpy:
+ (i) it scans the Fortran source code specified in the command line,
+ (ii) it analyses and determines the routine signatures,
+ (iii) it constructs the corresponding Python C/API extension modules,
+ (iv) it writes documentation to a LaTeX file, and
+ (v) it creates a GNU Makefile for building the shared modules.
+\begin{figure}[htb]
+ \latexhide{\label{fig:f2pyoutmess}}
+ \special{html:<BLOCKQUOTE>}
+ {\tthhide{\small}
+ \verbatiminput{examples/exp1mess.txt}
+ }
+ \special{html:</BLOCKQUOTE>}
+ \caption{Output messages of \texttt{f2py -m foo exp1.f}.}
+ \tthhide{\label{fig:f2pyoutmess}}
+\end{figure}
+
+Now we can build the \texttt{foo} module:
+\shell{make -f Makefile-foo}
+
+Figure \ref{fig:exp1session} illustrates a sample session for
+ calling the Fortran routine \texttt{exp1} from Python.
+\begin{figure}[htb]
+ \latexhide{\label{fig:exp1session}}
+ \special{html:<BLOCKQUOTE>}
+ \verbatiminput{examples/exp1session.txt}
+ \special{html:</BLOCKQUOTE>}
+ \caption{Calling Fortran routine \texttt{exp1} from Python. Here
+ \texttt{l[0]/l[1]} gives an estimate to $e$ with absolute error
+ less than \texttt{u[0]/u[1]-l[0]/l[1]} (this value may depend on
+ the platform and compiler used).}
+ \tthhide{\label{fig:exp1session}}
+\end{figure}
+
+Note the difference between the signatures of the Fortran routine
+\texttt{exp1(l,u,n)} and the corresponding wrapper function
+\texttt{l,u=exp1([n])}. Clearly, the later is more informative to
+the user: \texttt{exp1} takes one optional argument \texttt{n} and it
+returns \texttt{l}, \texttt{u}. This exchange of signatures is
+achieved by special comment lines (starting with \texttt{Cf2py}) in
+the Fortran source code --- these lines are interpreted by \fpy as
+normal Fortran code. Therefore, in the given example the line \texttt{Cf2py
+ integer*4 :: n = 1} informs \fpy that the variable \texttt{n} is
+optional with a default value equal to one. The line \texttt{Cf2py
+ intent(out) l,u} informs \fpy that the variables \texttt{l,u} are to be
+returned to Python after calling Fortran function \texttt{exp1}.
+
+\subsection{Interfacing Libraries}
+\label{sec:example2}
+
+In our example the Fortran source \texttt{exp1.f} contains \fpy
+specific information, though only as comments. When interfacing
+libraries from other parties, it is not recommended to modify their
+source. Instead, one should use a special auxiliary file to collect
+the signatures of all Fortran routines and insert \fpy specific
+declaration and attribute statements in that file. This auxiliary file
+is called a \emph{signature file} and is identified by the extension
+\texttt{.pyf}.
+
+We can use \fpy to generate these signature files by using the
+\texttt{-h <filename>.pyf} option.
+In our example, \fpy could have been called as follows,
+\shell{f2py -m foo -h foo.pyf exp1.f}
+where the option \texttt{-h foo.pyf} requests \fpy to read the
+routine signatures, save them to the file \texttt{foo.pyf}, and then
+exit.
+If \texttt{exp1.f} in Fig.~\ref{fig:exp1.f} were to
+contain no lines starting with \texttt{Cf2py}, the corresponding
+signature file \texttt{foo.pyf} would be as shown in Fig.~\ref{fig:foo.pyf}.
+In order to obtain the exchanged and more convenient signature
+\texttt{l,u=foo.exp1([n])}, we would edit \texttt{foo.pyf} as shown in
+Fig.~\ref{fig:foom.pyf}.
+The Python C/API extension module \texttt{foo} can be constructed by
+applying \fpy to the signature file with the following command:
+\shell{f2py foo.pyf}
+The procedure for building the corresponding shared module and using
+it in Python is identical to the one described in the previous section.
+
+\begin{figure}[htb]
+ \latexhide{\label{fig:foo.pyf}}
+ \special{html:<BLOCKQUOTE>}
+ \verbatiminput{examples/foo.pyf}
+ \special{html:</BLOCKQUOTE>}
+ \caption{Raw signature file \texttt{foo.pyf} generated with
+ \texttt{f2py -m foo -h foo.pyf exp1.f}}
+ \tthhide{\label{fig:foo.pyf}}
+\end{figure}
+\begin{figure}[htb]
+ \latexhide{\label{fig:foom.pyf}}
+ \special{html:<BLOCKQUOTE>}
+ \verbatiminput{examples/foom.pyf}
+ \special{html:</BLOCKQUOTE>}
+ \caption{Modified signature file \texttt{foo.pyf}}
+ \tthhide{\label{fig:foom.pyf}}
+\end{figure}
+
+As we can see, the syntax of the signature file is an
+extension of the Fortran~90/95 syntax. This means that only a few new
+constructs are introduced for \fpy in addition to all standard Fortran
+constructs; signature files can even be written in fixed form. A
+complete set of constructs that are used when creating interfaces, is
+described in the \fpy User's Guide \cite{f2py-ug}.
+
+
+\section{Basic Features}
+\label{sec:features}
+
+In this section a short overview of \fpy features is given.
+\begin{enumerate}
+\item All basic Fortran types are supported. They include
+the following type specifications:
+\begin{verbatim}
+integer[ | *1 | *2 | *4 | *8 ]
+logical[ | *1 | *2 | *4 | *8 ]
+real[ | *4 | *8 | *16 ]
+complex[ | *8 | *16 | *32 ]
+double precision, double complex
+character[ |*(*)|*1|*2|*3|...]
+\end{verbatim}
+In addition, they can all be in the kind-selector form
+(e.g. \texttt{real(kind=8)}) or char-selector form
+(e.g. \texttt{character(len=5)}).
+\item Arrays of all basic types are supported. Dimension
+ specifications can be of form \texttt{<dimension>} or
+ \texttt{<start>:<end>}. In addition, \texttt{*} and \texttt{:}
+ dimension specifications can be used for input arrays.
+ Dimension specifications may contain also \texttt{PARAMETER}'s.
+\item The following attributes are supported:
+ \begin{itemize}
+ \item
+ \texttt{intent(in)}: used for input-only arguments.
+ \item
+ \texttt{intent(inout)}: used for arguments that are changed in
+ place.
+ \item
+ \texttt{intent(out)}: used for return arguments.
+ \item
+ \texttt{intent(hide)}: used for arguments to be removed from
+ the signature of the Python function.
+ \item
+ \texttt{intent(in,out)}, \texttt{intent(inout,out)}: used for
+ arguments with combined behavior.
+ \item
+ \texttt{dimension(<dimspec>)}
+ \item
+ \texttt{depend([<names>])}: used
+ for arguments that depend on other arguments in \texttt{<names>}.
+ \item
+ \texttt{check([<C booleanexpr>])}: used for checking the
+ correctness of input arguments.
+ \item
+ \texttt{note(<LaTeX text>)}: used for
+ adding notes to the module documentation.
+ \item
+ \texttt{optional}, \texttt{required}
+ \item
+ \texttt{external}: used for call-back arguments.
+ \item
+ \texttt{allocatable}: used for Fortran 90/95 allocatable arrays.
+ \end{itemize}
+\item Using \fpy one can call arbitrary Fortran~77/90/95 subroutines
+ and functions from Python, including Fortran 90/95 module routines.
+\item Using \fpy one can access data in Fortran~77 COMMON blocks and
+ variables in Fortran 90/95 modules, including allocatable arrays.
+\item Using \fpy one can call Python functions from Fortran (call-back
+ functions). \fpy supports very flexible hooks for call-back functions.
+\item Wrapper functions perform the necessary type conversations for their
+ arguments resulting in contiguous Numeric arrays that are suitable for
+ passing to Fortran routines.
+\item \fpy generates documentation strings
+for \texttt{\_\_doc\_\_} attributes of the wrapper functions automatically.
+\item \fpy scans Fortran codes and creates the signature
+ files. It automatically detects the signatures of call-back functions,
+ solves argument dependencies, decides the order of initialization of
+ optional arguments, etc.
+\item \fpy automatically generates GNU Makefiles for compiling Fortran
+ and C codes, and linking them to a shared module.
+ \fpy detects available Fortran and C compilers. The
+ supported compilers include the GNU project C Compiler (gcc), Compaq
+ Fortran, VAST/f90 Fortran, Absoft F77/F90, and MIPSpro 7 Compilers, etc.
+ \fpy has been tested to work on the following platforms: Intel/Alpha
+ Linux, HP-UX, IRIX64.
+\item Finally, the complete \fpy User's Guide is available in various
+ formats (ps, pdf, html, dvi). A mailing list,
+ \email{f2py-users@cens.ioc.ee}, is open for support and feedback. See
+ the FPIG's home page for more information \cite{fpig}.
+\end{enumerate}
+
+
+\section{Implementation Issues}
+\label{sec:impl}
+
+The Fortran to Python interface can be thought of as a three layer
+``sandwich'' of different languages: Python, C, and Fortran. This
+arrangement has two interfaces: Python-C and C-Fortran. Since Python
+itself is written in C, there are no basic difficulties in
+implementing the Python-C interface~\cite{python-doc:ext}. The C-Fortran
+interface, on the other hand, results in many platform and compiler specific
+issues that have to be dealt with. We will now discuss these issues
+in some detail and describe how they are solved in FPIG.
+
+\subsection{Mapping Fortran Types to C Types}
+\label{sec:mapF2Ctypes}
+
+Table \ref{tab:mapf2c} defines how Fortran types are mapped to C types
+in \fpy.
+\begin{table}[htb]
+ \begin{center}
+ \begin{tabular}[c]{l|l}
+ Fortran type & C type \\\hline
+ \texttt{integer *1} & \texttt{char}\\
+ \texttt{byte} & \texttt{char}\\
+ \texttt{integer *2} & \texttt{short}\\
+ \texttt{integer[ | *4]} & \texttt{int}\\
+ \texttt{integer *8} & \texttt{long long}\\
+ \texttt{logical *1} & \texttt{char}\\
+ \texttt{logical *2} & \texttt{short}\\
+ \texttt{logical[ | *4]} & \texttt{int}\\
+ \texttt{logical *8} & \texttt{int}\\
+ \texttt{real[ | *4]} & \texttt{float}\\
+ \texttt{real *8} & \texttt{double}\\
+ \texttt{real *16} & \texttt{long double}\\
+ \texttt{complex[ | *8]} & \texttt{struct \{float r,i;\}}\\
+ \texttt{complex *16} & \texttt{struct \{double r,i;\}}\\
+ \texttt{complex *32} & \texttt{struct \{long double r,i;\}}\\
+ \texttt{character[*...]} & \texttt{char *}\\
+ \end{tabular}
+ \caption{Mapping Fortran types to C types.}
+ \label{tab:mapf2c}
+ \end{center}
+\end{table}
+Users may redefine these mappings by creating a \texttt{.f2py\_f2cmap}
+file in the working directory. This file should contain a Python
+dictionary of dictionaries, e.g. \texttt{\{'real':\{'low':'float'\}\}},
+that informs \fpy to map Fortran type \texttt{real(low)}
+to C type \texttt{float} (here \texttt{PARAMETER low = ...}).
+
+
+\subsection{Calling Fortran (Module) Routines}
+\label{sec:callrout}
+
+When mixing Fortran and C codes, one has to know how function names
+are mapped to low-level symbols in their object files. Different
+compilers may use different conventions for this purpose. For example, gcc
+appends the underscore \texttt{\_} to a Fortran routine name. Other
+compilers may use upper case names, prepend or append different
+symbols to Fortran routine names or both. In any case, if the
+low-level symbols corresponding to Fortran routines are valid for the
+C language specification, compiler specific issues can be solved by
+using CPP macro features.
+
+Unfortunately, there are Fortran compilers that use symbols in
+constructing low-level routine names that are not valid for C. For
+example, the (IRIX64) MIPSpro 7 Compilers use `\$' character in the
+low-level names of module routines which makes it impossible (at
+least directly) to call such routines from C when using the MIPSpro 7
+C Compiler.
+
+In order to overcome this difficulty, FPIG introduces an unique
+solution: instead of using low-level symbols for calling Fortran
+module routines from C, the references to such routines are determined
+at run-time by using special wrappers. These wrappers are called once
+during the initialization of an extension module. They are simple
+Fortran subroutines that use a Fortran module and call another C
+function with Fortran module routines as arguments in order to save
+their references to C global variables that are later used for calling
+the corresponding Fortran module routines. This arrangement is
+set up as follows. Consider the following Fortran 90 module with the
+subroutine \texttt{bar}:
+\special{html:<BLOCKQUOTE>}
+\begin{verbatim}
+module fun
+ subroutine bar()
+ end
+end
+\end{verbatim}
+\special{html:</BLOCKQUOTE>}
+Figure \ref{fig:capi-sketch} illustrates a Python C/API extension
+module for accessing the F90 module subroutine \texttt{bar} from Python.
+When the Python module \texttt{foo} is loaded, \texttt{finitbar} is
+called. \texttt{finitbar} calls \texttt{init\_bar} by passing the
+reference of the Fortran 90 module subroutine \texttt{bar} to C where it is
+saved to the variable \texttt{bar\_ptr}. Now, when one executes \texttt{foo.bar()}
+from Python, \texttt{bar\_ptr} is used in \texttt{bar\_capi} to call
+the F90 module subroutine \texttt{bar}.
+\begin{figure}[htb]
+ \latexhide{\label{fig:capi-sketch}}
+ \special{html:<BLOCKQUOTE>}
+\begin{verbatim}
+#include "Python.h"
+...
+char *bar_ptr;
+void init_bar(char *bar) {
+ bar_ptr = bar;
+}
+static PyObject *
+bar_capi(PyObject *self,PyObject *args) {
+ ...
+ (*((void *)bar_ptr))();
+ ...
+}
+static PyMethodDef
+foo_module_methods[] = {
+ {"bar",bar_capi,METH_VARARGS},
+ {NULL,NULL}
+};
+extern void finitbar_; /* GCC convention */
+void initfoo() {
+ ...
+ finitbar_(init_bar);
+ Py_InitModule("foo",foo_module_methods);
+ ...
+}
+\end{verbatim}
+ \special{html:</BLOCKQUOTE>}
+ \caption{Sketch of Python C/API for accessing F90 module subroutine
+ \texttt{bar}. The Fortran function \texttt{finitbar} is defined in
+ Fig.~\ref{fig:wrapbar}.}
+ \tthhide{\label{fig:capi-sketch}}
+\end{figure}
+\begin{figure}[ht]
+ \latexhide{\label{fig:wrapbar}}
+\special{html:<BLOCKQUOTE>}
+\begin{verbatim}
+ subroutine finitbar(cinit)
+ use fun
+ extern cinit
+ call cinit(bar)
+ end
+\end{verbatim}
+\special{html:</BLOCKQUOTE>}
+ \caption{Wrapper for passing the reference of \texttt{bar} to C code.}
+ \tthhide{\label{fig:wrapbar}}
+\end{figure}
+
+Surprisingly, mixing C code and Fortran modules in this way is as
+portable and compiler independent as mixing C and ordinary Fortran~77
+code.
+
+Note that extension modules generated by \fpy actually use
+\texttt{PyFortranObject} that implements above described scheme with
+exchanged functionalities (see Section \ref{sec:PFO}).
+
+
+\subsection{Wrapping Fortran Functions}
+\label{sec:wrapfunc}
+
+The Fortran language has two types of routines: subroutines and
+functions. When a Fortran function returns a composed type such as
+\texttt{COMPLEX} or \texttt{CHARACTER}-array then calling this
+function directly from C may not work for all compilers, as C
+functions are not supposed to return such references. In order to
+avoid this, FPIG constructs an additional Fortran wrapper subroutine
+for each such Fortran function. These wrappers call just the
+corresponding functions in the Fortran layer and return the result to
+C through its first argument.
+
+
+\subsection{Accessing Fortran Data}
+\label{sec:accsdata}
+
+In Fortran one can use \texttt{COMMON} blocks and Fortran module
+variables to save data that is accessible from other routines. Using
+FPIG, one can also access these data containers from Python. To achieve
+this, FPIG uses special wrapper functions (similar to the ones used
+for wrapping Fortran module routines) to save the references to these
+data containers so that they can later be used from C.
+
+FPIG can also handle \texttt{allocatable} arrays. For example, if a
+Fortran array is not yet allocated, then by assigning it in Python,
+the Fortran to Python interface will allocate and initialize the
+array. For example, the F90 module allocatable array \texttt{bar}
+defined in
+\special{html:<BLOCKQUOTE>}
+\begin{verbatim}
+module fun
+ integer, allocatable :: bar(:)
+end module
+\end{verbatim}
+\special{html:</BLOCKQUOTE>}
+can be allocated from Python as follows
+\special{html:<BLOCKQUOTE>}
+\begin{verbatim}
+>>> import foo
+>>> foo.fun.bar = [1,2,3,4]
+\end{verbatim}
+\special{html:</BLOCKQUOTE>}
+
+\subsection{\texttt{PyFortranObject}}
+\label{sec:PFO}
+
+In general, we would like to access from Python the following Fortran
+objects:
+\begin{itemize}
+\item subroutines and functions,
+\item F90 module subroutines and functions,
+\item items in COMMON blocks,
+\item F90 module data.
+\end{itemize}
+Assuming that the Fortran source is available, we can determine the signatures
+of these objects (the full specification of routine arguments, the
+layout of Fortran data, etc.). In fact, \fpy gets this information
+while scanning the Fortran source.
+
+In order to access these Fortran objects from C, we need to determine
+their references. Note that the direct access of F90 module objects is
+extremely compiler dependent and in some cases even impossible.
+Therefore, FPIG uses various wrapper functions for obtaining the
+references to Fortran objects. These wrapper functions are ordinary
+F77 subroutines that can easily access objects from F90 modules and
+that pass the references to Fortran objects as C variables.
+
+
+\fpy generated Python C/API extension modules use
+\texttt{PyFortranObject} to store the references of Fortran objects.
+In addition to the storing functionality, the \texttt{PyFortranObject}
+also provides methods for accessing/calling Fortran objects from
+Python in a user-friendly manner. For example, the item \texttt{a} in
+\texttt{COMMON /bar/ a(2)} can be accessed from Python as
+\texttt{foo.bar.a}.
+
+Detailed examples of \texttt{PyFortranObject} usage can be found in
+\cite{PFO}.
+
+\subsection{Callback Functions}
+\label{sec:callback}
+
+Fortran routines may have arguments specified as \texttt{external}.
+These arguments are functions or subroutines names that the receiving Fortran routine
+will call from its body. For such arguments FPIG
+constructs a call-back mechanism (originally contributed by Travis
+Oliphant) that allows Fortran routines to call Python functions. This
+is actually realized using a C layer between Python and
+Fortran. Currently, the call-back mechanism is compiler independent
+unless a call-back function needs to return a composed type
+(e.g. \texttt{COMPLEX}).
+
+The signatures of call-back functions are determined when \fpy scans
+the Fortran source code. To illustrate this, consider the following
+example:
+\special{html:<BLOCKQUOTE>}
+\begin{verbatim}
+ subroutine foo(bar, fun, boo)
+ integer i
+ real r
+ external bar,fun,boo
+ call bar(i, 1.2)
+ r = fun()
+ call sun(boo)
+ end
+\end{verbatim}
+\special{html:</BLOCKQUOTE>}
+\fpy recognizes the signatures of the user routines \texttt{bar} and
+\texttt{fun} using the information contained in the lines \texttt{call
+ bar(i, 1.2)} and \texttt{r = fun()}:
+\special{html:<BLOCKQUOTE>}
+\begin{verbatim}
+subroutine bar(a,b)
+ integer a
+ real b
+end
+function fun()
+ real fun
+end
+\end{verbatim}
+\special{html:</BLOCKQUOTE>}
+But \fpy cannot determine the signature of the user routine
+\texttt{boo} because the source contains no information at all about
+the \texttt{boo} specification. Here user needs to provide the
+signature of \texttt{boo} manually.
+
+\section{Future Work}
+\label{sec:future}
+
+FPIG can be used to wrap almost any Fortran code. However, there are
+still issues that need to be resolved. Some of them are listed below:
+\begin{enumerate}
+\item One of the FPIG's goals is to become as platform and compiler
+ independent as possible. Currently FPIG can be used on
+ any UN*X platform that has gcc installed in it. In the future, FPIG
+ should be also tested on Windows systems.
+\item Another goal of FPIG is to become as simple to use as
+ possible. To achieve that, FPIG should start using the facilities of
+ \texttt{distutils}, the new Python standard to distribute and build
+ Python modules. Therefore, a contribution to \texttt{distutils}
+ that can handle Fortran extensions should be developed.
+\item Currently users must be aware of
+ the fact that multi-dimensional arrays are stored differently in C
+ and Fortran (they must provide transposed multi-dimensional arrays
+ to wrapper functions). In the future a solution should be found such
+ that users do not need to worry about this rather
+ confusing and technical detail.
+\item Finally, a repository of signature files for widely-used Fortran
+ libraries (e.g. BLAS, LAPACK, MINPACK, ODEPACK, EISPACK, LINPACK) should be
+ provided.
+\end{enumerate}
+
+
+\section{Application to a Large Aero-Structural Analysis Framework}
+\label{sec:app}
+
+
+\subsection{The Need for Python and FPIG}
+\label{sec:appsub1}
+
+As a demonstration of the power and usefulness of FPIG, we will
+present work that has been done at the Aerospace Computing Laboratory
+at Stanford University. The focus of the research is on aircraft
+design optimization using high-fidelity analysis tools such as
+Computational Fluid Dynamics (CFD) and Computational Structural
+Mechanics (CSM)~\cite{reno99}.
+
+The group's analysis programs are written mainly in Fortran and are the result
+of many years of development. Until now, any researcher that needed
+to use these tools would have to learn a less than user-friendly
+interface and become relatively familiar with the inner workings of
+the codes before starting the research itself. The need to
+couple analyses of different disciplines revealed the additional
+inconvenience of gluing and scripting the different codes with
+Fortran.
+
+It was therefore decided that the existing tools should be wrapped
+using an object-oriented language in order to improve their ease of
+use and versatility. The use of several different languages such as
+C++, Java and Perl was investigated but Python seemed to provide the
+best solution. The fact that it combines scripting capability
+with a fully-featured object-oriented programming language, and that
+it has a clean syntax were factors that determined our choice. The
+introduction of tools that greatly facilitate the task of wrapping
+Fortran with Python provided the final piece needed to realize our
+objective.
+
+\subsection{Wrapping the Fortran Programs}
+
+In theory, it would have been possible to wrap our Fortran programs
+with C and then with Python by hand. However, this would have been a
+labor intensive task that would detract from our research. The use of
+tools that automate the task of wrapping has been extremely useful.
+
+The first such tool that we used was PyFort. This tool created the C
+wrappers and Python modules automatically, based on signature files
+(\texttt{.pyf}) provided by the user. Although it made the task of
+wrapping considerably easier, PyFort was limited by the fact that any
+Fortran data that was needed at the Python level had to be passed in
+the argument list of the Fortran subroutine. Since the bulk of the
+data in our programs is shared by using Fortran~77 common blocks and
+Fortran~90 modules, this required adding many more arguments to the
+subroutine headers. Furthermore, since Fortran does not allow common
+block variables or module data to be specified in a subroutine
+argument list, a dummy pointer for each desired variable had to be
+created and initialized.
+
+The search for a better solution to this problem led us to \fpy.
+Since \fpy provides a solution for accessing common block and module
+variables, there was no need to change the Fortran source anymore,
+making the wrapping process even easier. With \fpy we also
+experienced an increased level of automation since it produces the
+signature files automatically, as well as a Makefile for the joint
+compilation of the original Fortran and C wrapper codes. This increased
+automation did not detract from its flexibility since it was always
+possible to edit the signature files to provide different functionality.
+
+Once Python interfaces were created for each Fortran application
+by running \fpy, it was just a matter of using Python to achieve the
+final objective of developing an object-oriented framework for our
+multidisciplinary solvers. The Python modules that we designed are
+discussed in the following section.
+
+
+\subsection{Module Design}
+\label{ssec:module}
+
+The first objective of this effort was to design the classes for each
+type of analysis, each representing an independent Python module. In
+our case, we are interested in performing aero-structural analysis and
+optimization of aircraft wings. We therefore needed an analysis tool
+for the flow (CFD), another for analyzing the structure (CSM), as well
+as a geometry database. In addition, we needed to interface these two
+tools in order to analyze the coupled system. The object design for
+each of these modules should be general enough that the underlying
+analysis code in Fortran can be changed without changing the Python
+interface. Another requirement was that the modules be usable on
+their own for single discipline analysis.
+
+\subsubsection{Geometry}
+
+The \emph{Geometry} class provides a database for the outer mold
+geometry of the aircraft. This database needs to be accessed by both
+the flow and structural solvers. It contains a parametric description
+of the aircraft's surface as well as methods that extract and update
+this information.
+
+
+\subsubsection{Flow}
+
+The flow solver was wrapped in a class called \emph{Flow}. The class
+was designed so that it can wrap any type of CFD solver. It contains
+two main objects: the computational mesh and a solver object. A graph
+showing the hierarchy of the objects in \emph{Flow} is shown in
+Fig.~\ref{fig:flow}.
+\tthhide{
+\begin{figure}[h]
+ \centering
+ \epsfig{file=./flow.eps, angle=0, width=.7\linewidth}
+ \caption{The \emph{Flow} container class.}
+ \label{fig:flow}
+\end{figure}
+}
+\latexhide{
+\begin{figure}[h]
+ \label{fig:flow}
+\special{html:
+<CENTER>
+ <IMG SRC="flow.jpg" WIDTH="400">
+</CENTER>
+}
+ \caption{The \emph{Flow} container class.}
+\end{figure}
+}
+Methods in the flow class include those used for the initialization of
+all the class components as well as methods that write the current
+solution to a file.
+
+
+\subsubsection{Structure}
+
+The \emph{Structure} class wraps a structural analysis code. The class
+stores the information about the structure itself in an object called
+\emph{Model} which also provides methods for changing and exporting
+its information. A list of the objects contained in this class can be
+seen in Fig.~\ref{fig:structure}.
+\tthhide{
+\begin{figure}[h]
+ \centering
+ \epsfig{file=./structure.eps, angle=0, width=.7\linewidth}
+ \caption{The \emph{Structure} container class.}
+ \label{fig:structure}
+\end{figure}
+}
+\latexhide{
+\begin{figure}[h]
+ \label{fig:structure}
+\special{html:
+<CENTER>
+ <IMG SRC="structure.jpg" WIDTH="400">
+</CENTER>
+}
+ \caption{The \emph{Structure} container class.}
+\end{figure}
+}
+Since the \emph{Structure} class contains a
+dictionary of \emph{LoadCase} objects, it is able to store and solve
+multiple load cases, a capability that the original Fortran code
+does not have.
+
+
+\subsubsection{Aerostructure}
+
+The \emph{Aerostructure} class is the main class in the
+aero-structural analysis module and contains a \emph{Geometry}, a
+\emph{Flow} and a \emph{Structure}. In addition, the class defines
+all the functions that are necessary to translate aerodynamic
+loads to structural loads and structural displacements to
+geometry surface deformations.
+
+One of the main methods of this class is the one that solves the
+aeroelastic system. This method is printed below:
+\begin{verbatim}
+def Iterate(self, load_case):
+ """Iterates the aero-structural solution."""
+ self.flow.Iterate()
+ self._UpdateStructuralLoads()
+ self.structure.CalcDisplacements(load_case)
+ self.structure.CalcStresses(load_case)
+ self._UpdateFlowMesh()
+ return
+\end{verbatim}
+This is indeed a very readable script, thanks to Python, and any
+high-level changes to the solution procedure can be easily
+implemented.
+The \emph{Aerostructure} class also contains methods that export all
+the information on the current solution for visualization, an example
+of which is shown in the next section.
+
+
+\subsection{Results}
+
+In order to visualize results, and because we needed to view results
+from multiple disciplines simultaneously, we selected OpenDX. Output
+files in DX format are written at the Python level and the result can
+be seen in Fig.~\ref{fig:aerostructure} for the case of a transonic
+airliner configuration.
+\tthhide{
+\begin{figure*}[t]
+ \centering
+ \epsfig{file=./aerostructure.eps, angle=-90, width=\linewidth}
+ \caption{Aero-structural model and results.}
+ \label{fig:aerostructure}
+\end{figure*}
+}
+\latexhide{
+\begin{figure}[h]
+ \label{fig:aerostructure}
+\special{html:
+<CENTER>
+ <IMG SRC="aerostructure.jpg" WIDTH="600">
+</CENTER>
+}
+ \caption{Aero-structural model and results.}
+\end{figure}
+}
+
+
+The figure illustrates the multidisciplinary nature of the
+problem. The grid pictured in the background is the mesh used by the
+flow solver and is colored by the pressure values computed at the
+cell centers. The wing in the foreground and its outer surface is
+clipped to show the internal structural components which are colored
+by their stress value.
+
+In conclusion, \fpy and Python have been extremely useful tools in our
+pursuit for increasing the usability and flexibility of existing Fortran
+tools.
+
+
+\begin{thebibliography}{99}
+\bibitem{netlib}
+\newblock Netlib repository at UTK and ORNL.
+\newblock \\\wwwsite{http://www.netlib.org/}
+\bibitem{python}
+Python language.
+\newblock \\\wwwsite{http://www.python.org/}
+\bibitem{swig}
+SWIG --- Simplified Wrapper and Interface Generator.
+\newblock \\\wwwsite{http://www.swig.org/}
+\bibitem{pyfort}
+PyFort --- The Python-Fortran connection tool.
+\newblock \\\wwwsite{http://pyfortran.sourceforge.net/}
+\bibitem{fpig}
+FPIG --- Fortran to Python Interface Generator.
+\newblock \\\wwwsite{http://cens.ioc.ee/projects/f2py2e/}
+\bibitem{numpy}
+Numerical Extension to Python.
+\newblock \\\wwwsite{http://numpy.sourceforge.net/}
+\bibitem{graham-etal}
+R. L. Graham, D. E. Knuth, and O. Patashnik.
+\newblock {\em {C}oncrete {M}athematics: a foundation for computer science.}
+\newblock Addison-Wesley, 1988
+\bibitem{f2py-ug}
+P. Peterson.
+\newblock {\em {\tt f2py} - Fortran to Python Interface Generator. Second Edition.}
+\newblock 2000
+\newblock
+\\\wwwsite{http://cens.ioc.ee/projects/f2py2e/usersguide.html}
+\bibitem{python-doc:ext}
+Python Documentation: Extending and Embedding.
+\newblock \\\wwwsite{http://www.python.org/doc/ext/}
+\bibitem{PFO}
+P. Peterson. {\em {\tt PyFortranObject} example usages.}
+\newblock 2001
+\newblock \\\wwwsite{http://cens.ioc.ee/projects/f2py2e/pyfobj.html}
+\bibitem{reno99}
+Reuther, J., J. J. Alonso, J. R. R. A. Martins, and
+S. C. Smith.
+\newblock ``A Coupled Aero-Structural Optimization Method for
+ Complete Aircraft Configurations'',
+\newblock {\em Proceedings of the 37th Aerospace Sciences Meeting},
+\newblock AIAA Paper 1999-0187. Reno, NV, January, 1999
+\end{thebibliography}
+
+%\end{multicols}
+
+%\begin{figure}[htbp]
+% \begin{center}
+% \epsfig{file=aerostructure2b.ps,width=0.75\textwidth}
+% \end{center}
+%\end{figure}
+
+
+
+\end{document}
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: t
+%%% End:
+
+
diff --git a/numpy/f2py/doc/signaturefile.tex b/numpy/f2py/doc/signaturefile.tex
new file mode 100644
index 000000000..3cd16d890
--- /dev/null
+++ b/numpy/f2py/doc/signaturefile.tex
@@ -0,0 +1,368 @@
+
+\section{Signature file}
+\label{sec:signaturefile}
+
+The syntax of a signature file is borrowed from the Fortran~90/95
+language specification. Almost all Fortran~90/95 standard constructs
+are understood. Recall that Fortran~77 is a subset of Fortran~90/95.
+This tool introduces also some new attributes that are used for
+controlling the process of Fortran to Python interface construction.
+In the following, a short overview of the constructs
+used in signature files will be given.
+
+
+\subsection{Module block}
+\label{sec:moduleblock}
+
+A signature file contains one or more \texttt{pythonmodule} blocks. A
+\texttt{pythonmodule} block has the following structure:
+\begin{verbatim}
+python module <modulename>
+ interface
+ <routine signatures>
+ end [interface]
+ interface
+ module <F90/95 modulename>
+ <F90 module data type declarations>
+ <F90 module routine signatures>
+ end [module [<F90/95 modulename>]]
+ end [interface]
+end [pythonmodule [<modulename>]]
+\end{verbatim}
+For each \texttt{pythonmodule} block \fpy will generate a C-file
+\texttt{<modulename>module.c} (see step (iii)). (This is not true if
+\texttt{<modulename>} contains substring \texttt{\_\_user\_\_}, see
+Sec.~\ref{sec:cbmodule} and \texttt{external} attribute).
+
+\subsection{Signatures of Fortran routines and Python functions}
+\label{sec:routineblock}
+
+
+The signature of a Fortran routine has the following structure:
+\begin{verbatim}
+[<typespec>] function|subroutine <routine name> [([<arguments>])] \
+ [result (<entityname>)]
+ [<argument type declarations>]
+ [<argument attribute statements>]
+ [<use statements>]
+ [<common block statements>]
+ [<other statements>]
+end [function|subroutine [<routine name>]]
+\end{verbatim}
+
+Let us introduce also the signature of the corresponding wrapper
+function:
+\begin{verbatim}
+def <routine name>(<required arguments>[,<optional arguments>]):
+ ...
+ return <return variables>
+\end{verbatim}
+
+Before you edit the signature file, you should first decide what is the
+desired signature of the corresponding Python function. \fpy offers
+many possibilities to control the interface construction process: you
+may want to insert/change/remove various attributes in the
+declarations of the arguments in order to change the appearance
+of the arguments in the Python wrapper function.
+
+\begin{itemize}
+\item
+The definition of the \texttt{<argument type declaration>} is
+\begin{verbatim}
+<typespec> [[<attrspec>]::] <entitydecl>
+\end{verbatim}
+where
+\begin{verbatim}
+<typespec> := byte | character[<charselector>]
+ | complex[<kindselector>] | real[<kindselector>]
+ | double complex | double precision
+ | integer[<kindselector>] | logical[<kindselector>]
+\end{verbatim}
+\begin{verbatim}
+<charselector> := *<charlen> | ([len=]<len>[,[kind]<kind>])
+ | (kind=<kind>[,len=<len>])
+<kindselector> := *<intlen> | ([kind=]<kind>)
+\end{verbatim}
+(there is no sense to modify \texttt{<typespec>}s generated by \fpy).
+\texttt{<attrspec>} is a comma separated list of attributes (see
+Sec.~\ref{sec:attributes});
+\begin{verbatim}
+<entitydecl> := <name> [[*<charlen>][(<arrayspec>)]
+ | [(<arrayspec>)]*<charlen>]
+ | [/<init_expr>/ | =<init_expr>] [,<entitydecl>]
+\end{verbatim}
+where \texttt{<arrayspec>} is a comma separated list of dimension
+bounds; \texttt{<init\_expr>} is a C-expression (see
+Sec.~\ref{sec:C-expr}). If an argument is not defined with
+\texttt{<argument type declaration>}, its type is determined by
+applying \texttt{implicit} rules (if it is not specifyied, then
+standard rules are applied).
+
+\item The definition of the \texttt{<argument attribute statement>} is
+a short form of the \texttt{<argument type declaration>}:
+\begin{verbatim}
+<attrspec> <entitydecl>
+\end{verbatim}
+
+\item \texttt{<use statement>} is defined as follows
+\begin{verbatim}
+use <modulename> [,<rename_list> | ,ONLY:<only_list>]
+<rename_list> := local_name=>use_name [,<rename_list>]
+\end{verbatim}
+ Currently the \texttt{use} statement is used to link call-back
+ modules (Sec.~\ref{sec:cbmodule}) and the \texttt{external}
+ arguments (call-back functions).
+
+\item \texttt{<common block statement>} is defined as follows
+\begin{verbatim}
+common /<commonname>/ <shortentitydecl>
+\end{verbatim}
+where
+\begin{verbatim}
+<shortentitydecl> := <name> [(<arrayspec>)] [,<shortentitydecl>]
+\end{verbatim}
+One \texttt{module} block should not contain two or more
+\texttt{common} blocks with the same name. Otherwise, the later ones
+are ignored. The types of variables in \texttt{<shortentitydecl>} can
+be defined in \texttt{<argument type declarations>}. Note that there
+you can specify also the array specifications; then you don't need to
+do that in \texttt{<shortentitydecl>}.
+\end{itemize}
+
+\subsection{Attributes}
+\label{sec:attributes}
+
+The following attributes are used by \fpy:
+\begin{description}
+\item[\texttt{optional}] --- the variable is moved to the end of
+ optional argument list of the wrapper function. Default value of an
+ optional argument can be specified using \texttt{<init\_expr>} in
+ \texttt{entitydecl}. You can use \texttt{optional} attribute also for
+ \texttt{external} arguments (call-back functions), but it is your
+ responsibility to ensure that it is given by the user if Fortran
+ routine wants to call it.
+\item[\texttt{required}] --- the variable is considered as a required
+ argument (that is default). You will need this in order to overwrite
+ the \texttt{optional} attribute that is automatically set when
+ \texttt{<init\_expr>} is used. However, usage of this attribute
+ should be rare.
+\item[\texttt{dimension(<arrayspec>)}] --- used when the variable is
+ an array. For unbounded dimensions symbols `\texttt{*}' or
+ `\texttt{:}' can be used (then internally the corresponding
+ dimensions are set to -1; you'll notice this when certain exceptions
+ are raised).
+\item[\texttt{external}] --- the variable is a call-back function. \fpy will
+ construct a call-back mechanism for this function. Also call-back
+ functions must be defined by their signatures, and there are several
+ ways to do that. In most cases, \fpy will be able to determine the signatures
+ of call-back functions from the Fortran source code; then it
+ builds an additional \texttt{module} block with a name containing
+ string `\texttt{\_\_user\_\_}' (see Sec.~\ref{sec:cbmodule}) and
+ includes \texttt{use} statement to the routines signature. Anyway,
+ you should check that the generated signature is correct.
+
+ Alternatively, you can specify the signature by inserting to the
+ routines block a ``model'' how the call-back function would be called
+ from Fortran. For subroutines you should use\\
+ \hspace*{2em}\texttt{call <call-back name>(<arguments>)}\\
+ and for functions\\%
+ \hspace*{2em}\texttt{<return value> = <call-back name>(<arguments>)}\\
+ The variables in \texttt{<arguments>} and \texttt{<return value>}
+ must be defined as well. You can use the arguments of the main
+ routine, for instance.
+\item[\texttt{intent(<intentspec>)}] --- this specifies the
+ ``intention'' of the variable. \texttt{<intentspec>} is a comma
+ separated list of the following specifications:
+ \begin{description}
+ \item[\texttt{in}] --- the variable is considered to be an input
+ variable (default). It means that the Fortran function uses only
+ the value(s) of the variable and is assumed not to change it.
+ \item[\texttt{inout}] --- the variable is considered to be an
+ input/output variable which means that Fortran routine may change
+ the value(s) of the variable. Note that in Python only array
+ objects can be changed ``in place''. (\texttt{intent(outin)} is
+ \texttt{intent(inout)}.)
+ \item[\texttt{out}] --- the value of the (output) variable is
+ returned by the wrapper function: it is appended to the list of
+ \texttt{<returned variables>}. If \texttt{out} is specified alone,
+ also \texttt{hide} is assumed.
+ \item[\texttt{hide}] --- use this if the variable \emph{should not}
+ or \emph{need not} to be in the list of wrapper function arguments
+ (not even in optional ones). For example, this is assumed if
+ \texttt{intent(out)} is used. You can ``hide'' an argument if it
+ has always a constant value specified in \texttt{<init\_expr>},
+ for instance.
+ \end{description}
+ The following rules apply:
+ \begin{itemize}
+ \item if no \texttt{intent} attribute is specified, \texttt{intent(in)} is
+ assumed;
+ \item \texttt{intent(in,inout)} is \texttt{intent(in)};
+ \item \texttt{intent(in,hide)}, \texttt{intent(inout,hide)} are \texttt{intent(hide)};
+ \item \texttt{intent(out)} is \texttt{intent(out,hide)};
+\item \texttt{intent(inout)} is NOT \texttt{intent(in,out)}.
+ \end{itemize}
+ In conclusion, the following combinations are ``minimal'':
+ \texttt{intent(in)}, \texttt{intent(inout)}, \texttt{intent(out)},
+ \texttt{intent(hide)}, \texttt{intent(in,out)}, and
+ \texttt{intent(inout,out)}.
+\item[\texttt{check([<C-booleanexpr>])}] --- if
+ \texttt{<C-booleanexpr>} evaluates to zero, an exception is raised
+ about incorrect value or size or any other incorrectness of the
+ variable. If \texttt{check()} or \texttt{check} is used then \fpy
+ will not try to guess the checks automatically.
+\item[\texttt{depend([<names>])}] --- the variable depends on other
+ variables listed in \texttt{<names>}. These dependence relations
+ determine the order of internal initialization of the variables. If
+ you need to change these relations then be careful not to break the
+ dependence relations of other relevant variables. If
+ \texttt{depend()} or \texttt{depend} is used then \fpy will not try
+ to guess the dependence relations automatically.
+\item[\texttt{note(<LaTeX text>)}] --- with this attribute you can
+ include human readable documentation strings to the LaTeX document
+ that \fpy generates. Do not insert here information that \fpy can
+ establish by itself, such as, types, sizes, lengths of the
+ variables. Here you can insert almost arbitrary LaTeX text. Note
+ that \texttt{<LaTeX text>} is mainly used inside the LaTeX
+ \texttt{description} environment. Hint: you can use
+ \texttt{\bs{}texttt\{<name>\}} for typesetting variable \texttt{<name>}
+ in LaTeX. In order to get a new line to the LaTeX document, use
+ \texttt{\bs{}n} followed by a space. For longer text, you may want
+ to use line continuation feature of Fortran 90/95 language: set
+ \texttt{\&} (ampersand)
+ to be the last character in a line.
+\item[\texttt{parameter}] --- the variable is parameter and it must
+ have a value. If the parameter is used in dimension specification,
+ it is replaced by its value. (Are there any other usages of
+ parameters except in dimension specifications? Let me know and I'll
+ add support for it).
+\end{description}
+
+
+\subsection{C-expressions}
+\label{sec:C-expr}
+
+The signature of a routine may contain C-expressions in
+\begin{itemize}
+\item \texttt{<init\_expr>} for initializing particular variable, or in
+\item \texttt{<C-booleanexpr>} of the \texttt{check} attribute, or in
+\item \texttt{<arrayspec>} of the \texttt{dimension} attribute.
+\end{itemize}
+A C-expression may contain
+\begin{itemize}
+\item standard C-statement,
+\item functions offered in \texttt{math.h},
+\item previously initialized variables (study
+the dependence relations) from the argument list, and
+\item the following CPP-macros:
+ \begin{description}
+ \item[\texttt{len(<name>)}] --- the length of an array \texttt{<name>};
+ \item[\texttt{shape(<name>,<n>)}] --- the $n$-th dimension of an array
+ \texttt{<name>};
+ \item[\texttt{rank(<name>)}] --- the rank of an array \texttt{<name>};
+ \item[\texttt{slen(<name>)}] --- the length of a string \texttt{<name>}.
+ \end{description}
+\end{itemize}
+
+
+In addition, when initializing arrays, an index vector \texttt{int
+ \_i[rank(<name>)];}
+is available: \texttt{\_i[0]} refers to
+the index of the first dimension, \texttt{\_i[1]} to the index of
+the second dimension, etc. For example, the argument type declaration\\
+\hspace*{2em}\texttt{integer a(10) = \_i[0]}\\
+is equivalent with the following Python statement\\
+\hspace*{2em}\texttt{a = array(range(10))}
+
+
+\subsection{Required/optional arguments}
+\label{sec:reqoptargs}
+
+When \texttt{optional} attribute is used (including the usage of
+\texttt{<init\_expr>} without the \texttt{required} attribute), the
+corresponding variable in the argument list of a Fortran routine is
+appended to the optional argument list of the wrapper function.
+
+For optional array argument all dimensions must be bounded (not
+\texttt{(*)} or \texttt{(:)}) and defined at the time of
+initialization (dependence relations).
+
+If the \texttt{None} object is passed in in place of a required array
+argument, it will be considered as optional: that is, the memory is
+allocated (of course, if it has unbounded dimensions, an exception
+will be raised), and if \texttt{<init\_expr>} is defined,
+initialization is carried out.
+
+
+\subsection{Internal checks}
+\label{sec:intchecks}
+
+All array arguments are checked against the correctness of their rank.
+If there is a mismatch, \fpy attempts to fix that by constructing an
+array with a correct rank from the given array argument (there will be
+no performance hit as no data is copied). The freedom to do so is
+given only if some dimensions are unbounded or their value is 1. An
+exception is raised when the sizes will not match.
+
+All bounded dimensions of an array are checked to be larger or equal
+to the dimensions specified in the signature.
+
+So, you don't need to give explicit \texttt{check} attributes to check
+these internal checks.
+
+
+\subsection{Call-back modules}
+\label{sec:cbmodule}
+
+A Fortran routine may have \texttt{external} arguments (call-back
+functions). The signatures of the call-back functions must be defined
+in a call-back \texttt{module} block (its name contains
+\texttt{\_\_user\_\_}), in general; other possibilities are described
+in the \texttt{external} attribute specification (see
+Sec.~\ref{sec:attributes}). For the signatures of call-back
+functions the following restrictions apply:
+\begin{itemize}
+\item Attributes \texttt{external}, \texttt{check(...)}, and
+ initialization statements are ignored.
+\item Attribute \texttt{optional} is used only for changing the order
+ of the arguments.
+\item For arrays all dimension bounds must be specified. They may be
+ C-expressions containing variables from the argument list.
+ Note that here CPP-macros \texttt{len}, \texttt{shape},
+ \texttt{rank}, and \texttt{slen} are not available.
+\end{itemize}
+
+
+\subsection{Common blocks}
+\label{sec:commonblocks}
+
+All fields in a common block are mapped to arrays of appropriate sizes
+and types. Scalars are mapped to rank-0 arrays. For multi-dimensional
+fields the corresponding arrays are transposed. In the type
+declarations of the variables representing the common block fields,
+only \texttt{dimension(<arrayspec>)}, \texttt{intent(hide)}, and
+\texttt{note(<LaTeX text>)} attributes are used, others are ignored.
+
+\subsection{Including files}
+\label{sec:include}
+
+You can include files to the signature file using
+\begin{verbatim}
+include '<filename>'
+\end{verbatim}
+statement. It can be used in any part of the signature file.
+If the file \texttt{<filename>} does not exists or it is not in the path,
+the \texttt{include} line is ignored.
+
+\subsection{\fpy directives}
+\label{sec:directives}
+
+You can insert signature statements directly to Fortran source codes
+as comments. Anything that follows \texttt{<comment char>f2py} is
+regarded as normal statement for \fpy.
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "f2py2e"
+%%% End:
+
diff --git a/numpy/f2py/doc/using_F_compiler.txt b/numpy/f2py/doc/using_F_compiler.txt
new file mode 100644
index 000000000..3067f0776
--- /dev/null
+++ b/numpy/f2py/doc/using_F_compiler.txt
@@ -0,0 +1,147 @@
+
+Title: Wrapping F compiled Fortran 90 modules with F2PY
+ ================================================
+
+Rationale: The F compiler does not support external procedures which
+ makes it impossible to use it in F2PY in a normal way.
+ This document describes a workaround to this problem so
+ that F compiled codes can be still wrapped with F2PY.
+
+Author: Pearu Peterson
+Date: May 8, 2002
+
+Acknowledgement: Thanks to Siegfried Gonzi who hammered me to produce
+ this document.
+
+Normally wrapping Fortran 90 modules to Python using F2PY is carried
+out with the following command
+
+ f2py -c -m fun foo.f90
+
+where file foo.f90 contains, for example,
+
+module foo
+ public :: bar
+ contains
+ subroutine bar (a)
+ integer,intent(inout) :: a
+ print *,"Hello from foo.bar"
+ print *,"a=",a
+ a = a + 5
+ print *,"a=",a
+ end subroutine bar
+end module foo
+
+Then with a supported F90 compiler (running `f2py -c --help-compiler'
+will display the found compilers) f2py will generate an extension
+module fun.so into the current directory and the Fortran module foo
+subroutine bar can be called from Python as follows
+
+>>> import fun
+>>> print fun.foo.bar.__doc__
+bar - Function signature:
+ bar(a)
+Required arguments:
+ a : in/output rank-0 array(int,'i')
+
+>>> from Numeric import array
+>>> a = array(3)
+>>> fun.foo.bar(a)
+ Hello from foo.bar
+ a= 3
+ a= 8
+>>> a
+8
+>>>
+
+This works nicely with all supported Fortran compilers.
+
+However, the F compiler (http://www.fortran.com/F/compilers.html) is
+an exception. Namely, the F compiler is designed to recognize only
+module procedures (and main programs, of course) but F2PY needs to
+compile also the so-called external procedures that it generates to
+facilitate accessing Fortran F90 module procedures from C and
+subsequently from Python. As a result, wrapping F compiled Fortran
+procedures to Python is _not_ possible using the simple procedure as
+described above. But, there is a workaround that I'll describe below
+in five steps.
+
+1) Compile foo.f90:
+
+ F -c foo.f90
+
+This creates an object file foo.o into the current directory.
+
+2) Create the signature file:
+
+ f2py foo.f90 -h foo.pyf
+
+This creates a file foo.pyf containing
+
+module foo ! in foo.f90
+ real public :: bar
+ subroutine bar(a) ! in foo.f90:foo
+ integer intent(inout) :: a
+ end subroutine bar
+end module foo
+
+3) Open the file foo.pyf with your favorite text editor and change the
+ above signature to
+
+python module foo
+ interface
+ subroutine bar(a)
+ fortranname foo_MP_bar
+ intent(c) bar
+ integer intent(in,out) :: a
+ end subroutine bar
+ end interface
+end python module foo
+
+The most important modifications are
+
+ a) adding `python' keyword everywhere before the `module' keyword
+
+ b) including an `interface' block around the all subroutine blocks.
+
+ c) specifying the real symbol name of the subroutine using
+ `fortranname' statement. F generated symbol names are in the form
+ <module name>_MP_<subroutine name>
+
+ d) specifying that subroutine is `intent(c)'.
+
+Notice that the `intent(inout)' attribute is changed to
+`intent(in,out)' that instructs the wrapper to return the modified
+value of `a'.
+
+4) Build the extension module
+
+ f2py -c foo.pyf foo.o --fcompiler=Gnu /opt/F/lib/quickfit.o \
+ /opt/F/lib/libf96.a
+
+This will create the extension module foo.so into the current
+directory. Notice that you must use Gnu compiler (gcc) for linking.
+And the paths to F specific object files and libraries may differ for
+your F installation.
+
+5) Finally, we can call the module subroutine `bar' from Python
+
+>>> import foo
+>>> print foo.bar.__doc__
+bar - Function signature:
+ a = bar(a)
+Required arguments:
+ a : input int
+Return objects:
+ a : int
+
+>>> foo.bar(3)
+8
+>>>
+
+Notice that the F compiled module procedures are called as ordinary
+external procedures. Also I/O seems to be lacking for F compiled
+Fortran modules.
+
+Enjoy,
+ Pearu
diff --git a/numpy/f2py/doc/win32_notes.txt b/numpy/f2py/doc/win32_notes.txt
new file mode 100644
index 000000000..1b7b9029c
--- /dev/null
+++ b/numpy/f2py/doc/win32_notes.txt
@@ -0,0 +1,85 @@
+The following notes are from Eric Jones.
+
+My Setup:
+
+For Python/Fortran development, I run Windows 2000 and use the mingw32
+(www.mingw.org) set of gcc/g77 compilers and tools (gcc 2.95.2) to build python
+extensions. I'll also ocassionally use MSVC for extension development, but
+rarely on projects that include Fortran code. This short HOWTO describes how
+I use f2py in the Windows environment. Pretty much everything is done from
+a CMD (DOS) prompt, so you'll need to be familiar with using shell commands.
+
+Installing f2py:
+
+Before installing f2py, you'll need to install python. I use python2.1 (maybe
+python2.2 will be out by the time you read this). Any version of Python beyond
+version 1.52 should be fine. See www.python.org for info on installing Python.
+
+You'll also need Numeric which is available at
+http://sourceforge.net/projects/numpy/. The latest version is 20.3.
+
+Since Pearu has moved to a setup.py script, installation is pretty easy. You
+can download f2py from http://cens.ioc.ee/projects/f2py2e/. The latest public
+release is http://cens.ioc.ee/projects/f2py2e/rel-3.x/f2py-3.latest.tgz. Even
+though this is a .tgz file instead of a .zip file, most standard compression
+utilities such as WinZip (www.winzip.com) handle unpacking .tgz files
+automatically. Here are the download steps:
+
+ 1. Download the latest version of f2py and save it to disk.
+
+ 2. Use WinZip or some other tool to open the "f2py.xxx.tgz" file.
+ a. When WinZip says archive contains one file, "f2py.xxx.tar"
+ and ask if it should open it, respond with "yes".
+ b. Extract (use the extract button at the top) all the files
+ in the archive into a file. I'll use c:\f2py2e
+
+ 3. Open a cmd prompt by clicking start->run and typing "cmd.exe".
+ Now type the following commands.
+
+ C:\WINDOWS\SYSTEM32> cd c:\f2py2e
+ C:\F2PY2E> python setup.py install
+
+ This will install f2py in the c:\python21\f2py2e directory. It
+ also copies a few scripts into the c:\python21\Scripts directory.
+ Thats all there is to installing f2py. Now lets set up the environment
+ so that f2py is easy to use.
+
+ 4. You need to set up a couple of environement variables. The path
+ "c:\python21\Scripts" needs to be added to your path variables.
+ To do this, go to the enviroment variables settings page. This is
+ where it is on windows 2000:
+
+ Desktop->(right click)My Computer->Properties->Advanced->
+ Environment Variables
+
+ a. Add "c:\python21\Scripts" to the end of the Path variable.
+ b. If it isn't already there, add ".py" to the PATHEXT variable.
+ This tells the OS to execute f2py.py even when just "f2py" is
+ typed at a command prompt.
+
+ 5. Well, there actually isn't anything to be done here. The Python
+ installation should have taken care of associating .py files with
+ Python for execution, so you shouldn't have to do anything to
+ registry settings.
+
+To test your installation, open a new cmd prompt, and type the following:
+
+ C:\WINDOWS\SYSTEM32> f2py
+ Usage:
+ f2py [<options>] <fortran files> [[[only:]||[skip:]] \
+ <fortran functions> ] \
+ [: <fortran files> ...]
+ ...
+
+This prints out the usage information for f2py. If it doesn't, there is
+something wrong with the installation.
+
+Testing:
+The f2py test scripts are kinda Unix-centric, so they don't work under windows.
+
+XXX include test script XXX.
+
+Compiler and setup.py issues:
+
+XXX
+
diff --git a/numpy/f2py/docs/FAQ.txt b/numpy/f2py/docs/FAQ.txt
new file mode 100644
index 000000000..e2ed79445
--- /dev/null
+++ b/numpy/f2py/docs/FAQ.txt
@@ -0,0 +1,615 @@
+
+======================================================================
+ F2PY Frequently Asked Questions
+======================================================================
+
+.. contents::
+
+General information
+===================
+
+Q: How to get started?
+----------------------
+
+First, install__ F2PY. Then check that F2PY installation works
+properly (see below__). Try out a `simple example`__.
+
+Read `F2PY Users Guide and Reference Manual`__. It contains lots
+of complete examples.
+
+If you have any questions/problems when using F2PY, don't hesitate to
+turn to `F2PY users mailing list`__ or directly to me.
+
+__ index.html#installation
+__ #testing
+__ index.html#usage
+__ usersguide/index.html
+__ index.html#mailing-list
+
+Q: When to report bugs?
+-----------------------
+
+* If F2PY scanning fails on Fortran sources that otherwise compile
+ fine.
+
+* After checking that you have the latest version of F2PY from its
+ CVS. It is possible that a bug has been fixed already. See also the
+ log entries in the file `HISTORY.txt`_ (`HISTORY.txt in CVS`_).
+
+* After checking that your Python and Numerical Python installations
+ work correctly.
+
+* After checking that your C and Fortran compilers work correctly.
+
+
+Q: How to report bugs?
+----------------------
+
+You can send bug reports directly to me. Please, include information
+about your platform (operating system, version) and
+compilers/linkers, e.g. the output (both stdout/stderr) of
+::
+
+ python -c 'import f2py2e.diagnose;f2py2e.diagnose.run()'
+
+Feel free to add any other relevant information. However, avoid
+sending the output of F2PY generated ``.pyf`` files (unless they are
+manually modified) or any binary files like shared libraries or object
+codes.
+
+While reporting bugs, you may find the following notes useful:
+
+* `How To Ask Questions The Smart Way`__ by E. S. Raymond and R. Moen.
+
+* `How to Report Bugs Effectively`__ by S. Tatham.
+
+__ http://www.catb.org/~esr/faqs/smart-questions.html
+__ http://www.chiark.greenend.org.uk/~sgtatham/bugs.html
+
+Installation
+============
+
+Q: How to use F2PY with different Python versions?
+--------------------------------------------------
+
+Run the installation command using the corresponding Python
+executable. For example,
+::
+
+ python2.1 setup.py install
+
+installs the ``f2py`` script as ``f2py2.1``.
+
+See `Distutils User Documentation`__ for more information how to
+install Python modules to non-standard locations.
+
+__ http://www.python.org/sigs/distutils-sig/doc/inst/inst.html
+
+
+Q: Why F2PY is not working after upgrading?
+-------------------------------------------
+
+If upgrading from F2PY version 2.3.321 or earlier then remove all f2py
+specific files from ``/path/to/python/bin`` directory before
+running installation command.
+
+Q: How to get/upgrade scipy_distutils when using F2PY from CVS?
+---------------------------------------------------------------
+
+To get scipy_distutils from SciPy CVS repository, run
+::
+
+ cd cvs/f2py2e/
+ make scipy_distutils
+
+This will checkout scipy_distutils to the current directory.
+
+You can upgrade scipy_distutils by executing
+::
+
+ cd cvs/f2py2e/scipy_distutils
+ cvs update -Pd
+
+and install it by executing
+::
+
+ cd cvs/f2py2e/scipy_distutils
+ python setup_scipy_distutils.py install
+
+In most of the time, f2py2e and scipy_distutils can be upgraded
+independently.
+
+Testing
+=======
+
+Q: How to test if F2PY is installed correctly?
+----------------------------------------------
+
+Run
+::
+
+ f2py
+
+without arguments. If F2PY is installed correctly then it should print
+the usage information for f2py.
+
+Q: How to test if F2PY is working correctly?
+--------------------------------------------
+
+For a quick test, try out an example problem from Usage__
+section in `README.txt`_.
+
+__ index.html#usage
+
+For running F2PY unit tests, see `TESTING.txt`_.
+
+
+Q: How to run tests and examples in f2py2e/test-suite/ directory?
+---------------------------------------------------------------------
+
+You shouldn't. These tests are obsolete and I have no intention to
+make them work. They will be removed in future.
+
+
+Compiler/Platform-specific issues
+=================================
+
+Q: What are supported platforms and compilers?
+----------------------------------------------
+
+F2PY is developed on Linux system with a GCC compiler (versions
+2.95.x, 3.x). Fortran 90 related hooks are tested against Intel
+Fortran Compiler. F2PY should work under any platform where Python and
+Numeric are installed and has supported Fortran compiler installed.
+
+To see a list of supported compilers, execute::
+
+ f2py -c --help-fcompiler
+
+Example output::
+
+ List of available Fortran compilers:
+ --fcompiler=gnu GNU Fortran Compiler (3.3.4)
+ --fcompiler=intel Intel Fortran Compiler for 32-bit apps (8.0)
+ List of unavailable Fortran compilers:
+ --fcompiler=absoft Absoft Corp Fortran Compiler
+ --fcompiler=compaq Compaq Fortran Compiler
+ --fcompiler=compaqv DIGITAL|Compaq Visual Fortran Compiler
+ --fcompiler=hpux HP Fortran 90 Compiler
+ --fcompiler=ibm IBM XL Fortran Compiler
+ --fcompiler=intele Intel Fortran Compiler for Itanium apps
+ --fcompiler=intelev Intel Visual Fortran Compiler for Itanium apps
+ --fcompiler=intelv Intel Visual Fortran Compiler for 32-bit apps
+ --fcompiler=lahey Lahey/Fujitsu Fortran 95 Compiler
+ --fcompiler=mips MIPSpro Fortran Compiler
+ --fcompiler=nag NAGWare Fortran 95 Compiler
+ --fcompiler=pg Portland Group Fortran Compiler
+ --fcompiler=sun Sun|Forte Fortran 95 Compiler
+ --fcompiler=vast Pacific-Sierra Research Fortran 90 Compiler
+ List of unimplemented Fortran compilers:
+ --fcompiler=f Fortran Company/NAG F Compiler
+ For compiler details, run 'config_fc --verbose' setup command.
+
+
+Q: How to use the F compiler in F2PY?
+-------------------------------------
+
+Read `f2py2e/doc/using_F_compiler.txt`__. It describes why the F
+compiler cannot be used in a normal way (i.e. using ``-c`` switch) to
+build F2PY generated modules. It also gives a workaround to this
+problem.
+
+__ http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/doc/using_F_compiler.txt?rev=HEAD&content-type=text/vnd.viewcvs-markup
+
+Q: How to use F2PY under Windows?
+---------------------------------
+
+F2PY can be used both within Cygwin__ and MinGW__ environments under
+Windows, F2PY can be used also in Windows native terminal.
+See the section `Setting up environment`__ for Cygwin and MinGW.
+
+__ http://cygwin.com/
+__ http://www.mingw.org/
+__ http://cens.ioc.ee/~pearu/scipy/BUILD_WIN32.html#setting-up-environment
+
+Install scipy_distutils and F2PY. Win32 installers of these packages
+are provided in `F2PY Download`__ section.
+
+__ http://cens.ioc.ee/projects/f2py2e/#download
+
+Use ``--compiler=`` and ``--fcompiler`` F2PY command line switches to
+to specify which C and Fortran compilers F2PY should use, respectively.
+
+Under MinGW environment, ``mingw32`` is default for a C compiler.
+
+Supported and Unsupported Features
+==================================
+
+Q: Does F2PY support ``ENTRY`` statements?
+------------------------------------------
+
+Yes, starting at F2PY version higher than 2.39.235_1706.
+
+Q: Does F2PY support derived types in F90 code?
+-----------------------------------------------
+
+Not yet. However I do have plans to implement support for F90 TYPE
+constructs in future. But note that the task in non-trivial and may
+require the next edition of F2PY for which I don't have resources to
+work with at the moment.
+
+Jeffrey Hagelberg from LLNL has made progress on adding
+support for derived types to f2py. He writes:
+
+ At this point, I have a version of f2py that supports derived types
+ for most simple cases. I have multidimensional arrays of derived
+ types and allocatable arrays of derived types working. I'm just now
+ starting to work on getting nested derived types to work. I also
+ haven't tried putting complex number in derived types yet.
+
+Hopefully he can contribute his changes to f2py soon.
+
+Q: Does F2PY support pointer data in F90 code?
+-----------------------------------------------
+
+No. I have never needed it and I haven't studied if there are any
+obstacles to add pointer data support to F2PY.
+
+Q: What if Fortran 90 code uses ``<type spec>(kind=KIND(..))``?
+---------------------------------------------------------------
+
+Currently, F2PY can handle only ``<type spec>(kind=<kindselector>)``
+declarations where ``<kindselector>`` is a numeric integer (e.g. 1, 2,
+4,...) but not a function call ``KIND(..)`` or any other
+expression. F2PY needs to know what would be the corresponding C type
+and a general solution for that would be too complicated to implement.
+
+However, F2PY provides a hook to overcome this difficulty, namely,
+users can define their own <Fortran type> to <C type> maps. For
+example, if Fortran 90 code contains::
+
+ REAL(kind=KIND(0.0D0)) ...
+
+then create a file ``.f2py_f2cmap`` (into the working directory)
+containing a Python dictionary::
+
+ {'real':{'KIND(0.0D0)':'double'}}
+
+for instance.
+
+Or more generally, the file ``.f2py_f2cmap`` must contain a dictionary
+with items::
+
+ <Fortran typespec> : {<selector_expr>:<C type>}
+
+that defines mapping between Fortran type::
+
+ <Fortran typespec>([kind=]<selector_expr>)
+
+and the corresponding ``<C type>``. ``<C type>`` can be one of the
+following::
+
+ char
+ signed_char
+ short
+ int
+ long_long
+ float
+ double
+ long_double
+ complex_float
+ complex_double
+ complex_long_double
+ string
+
+For more information, see ``f2py2e/capi_maps.py``.
+
+Related software
+================
+
+Q: How F2PY distinguishes from Pyfort?
+--------------------------------------
+
+F2PY and Pyfort have very similar aims and ideology of how they are
+targeted. Both projects started to evolve in the same year 1999
+independently. When we discovered each others projects, a discussion
+started to join the projects but that unfortunately failed for
+various reasons, e.g. both projects had evolved too far that merging
+the tools would have been impractical and giving up the efforts that
+the developers of both projects have made was unacceptable to both
+parties. And so, nowadays we have two tools for connecting Fortran
+with Python and this fact will hardly change in near future. To decide
+which one to choose is a matter of taste, I can only recommend to try
+out both to make up your choice.
+
+At the moment F2PY can handle more wrapping tasks than Pyfort,
+e.g. with F2PY one can wrap Fortran 77 common blocks, Fortran 90
+module routines, Fortran 90 module data (including allocatable
+arrays), one can call Python from Fortran, etc etc. F2PY scans Fortran
+codes to create signature (.pyf) files. F2PY is free from most of the
+limitations listed in in `the corresponding section of Pyfort
+Reference Manual`__.
+
+__ http://pyfortran.sourceforge.net/pyfort/pyfort_reference.htm#pgfId-296925
+
+There is a conceptual difference on how F2PY and Pyfort handle the
+issue of different data ordering in Fortran and C multi-dimensional
+arrays. Pyfort generated wrapper functions have optional arguments
+TRANSPOSE and MIRROR that can be used to control explicitly how the array
+arguments and their dimensions are passed to Fortran routine in order
+to deal with the C/Fortran data ordering issue. F2PY generated wrapper
+functions hide the whole issue from an end-user so that translation
+between Fortran and C/Python loops and array element access codes is
+one-to-one. How the F2PY generated wrappers deal with the issue is
+determined by a person who creates a signature file via using
+attributes like ``intent(c)``, ``intent(copy|overwrite)``,
+``intent(inout|in,out|inplace)`` etc.
+
+For example, let's consider a typical usage of both F2PY and Pyfort
+when wrapping the following simple Fortran code:
+
+.. include:: simple.f
+ :literal:
+
+The comment lines starting with ``cf2py`` are read by F2PY (so that we
+don't need to generate/handwrite an intermediate signature file in
+this simple case) while for a Fortran compiler they are just comment
+lines.
+
+And here is a Python version of the Fortran code:
+
+.. include:: pytest.py
+ :literal:
+
+To generate a wrapper for subroutine ``foo`` using F2PY, execute::
+
+ $ f2py -m f2pytest simple.f -c
+
+that will generate an extension module ``f2pytest`` into the current
+directory.
+
+To generate a wrapper using Pyfort, create the following file
+
+.. include:: pyforttest.pyf
+ :literal:
+
+and execute::
+
+ $ pyfort pyforttest
+
+In Pyfort GUI add ``simple.f`` to the list of Fortran sources and
+check that the signature file is in free format. And then copy
+``pyforttest.so`` from the build directory to the current directory.
+
+Now, in Python
+
+.. include:: simple_session.dat
+ :literal:
+
+Q: Can Pyfort .pyf files used with F2PY and vice versa?
+-------------------------------------------------------
+
+After some simple modifications, yes. You should take into account the
+following differences in Pyfort and F2PY .pyf files.
+
++ F2PY signature file contains ``python module`` and ``interface``
+ blocks that are equivalent to Pyfort ``module`` block usage.
+
++ F2PY attribute ``intent(inplace)`` is equivalent to Pyfort
+ ``intent(inout)``. F2PY ``intent(inout)`` is a strict (but safe)
+ version of ``intent(inplace)``, any mismatch in arguments with
+ expected type, size, or contiguouness will trigger an exception
+ while ``intent(inplace)`` (dangerously) modifies arguments
+ attributes in-place.
+
+Misc
+====
+
+Q: How to establish which Fortran compiler F2PY will use?
+---------------------------------------------------------
+
+This question may be releavant when using F2PY in Makefiles. Here
+follows a script demonstrating how to determine which Fortran compiler
+and flags F2PY will use::
+
+ # Using post-0.2.2 scipy_distutils
+ from scipy_distutils.fcompiler import new_fcompiler
+ compiler = new_fcompiler() # or new_fcompiler(compiler='intel')
+ compiler.dump_properties()
+
+ # Using pre-0.2.2 scipy_distutils
+ import os
+ from scipy_distutils.command.build_flib import find_fortran_compiler
+ def main():
+ fcompiler = os.environ.get('FC_VENDOR')
+ fcompiler_exec = os.environ.get('F77')
+ f90compiler_exec = os.environ.get('F90')
+ fc = find_fortran_compiler(fcompiler,
+ fcompiler_exec,
+ f90compiler_exec,
+ verbose = 0)
+ print 'FC=',fc.f77_compiler
+ print 'FFLAGS=',fc.f77_switches
+ print 'FOPT=',fc.f77_opt
+ if __name__ == "__main__":
+ main()
+
+Users feedback
+==============
+
+Q: Where to find additional information on using F2PY?
+------------------------------------------------------
+
+There are several F2PY related tutorials, slides, papers, etc
+available:
+
++ `Fortran to Python Interface Generator with an Application to
+ Aerospace Engineering`__ by P. Peterson, J. R. R. A. Martins, and
+ J. J. Alonso in `In Proceedings of the 9th International Python
+ Conference`__, Long Beach, California, 2001.
+
+__ http://www.python9.org/p9-cdrom/07/index.htm
+__ http://www.python9.org/
+
++ Section `Adding Fortran90 code`__ in the UG of `The Bolometer Data
+ Analysis Project`__.
+
+__ http://www.astro.rub.de/laboca/download/boa_master_doc/7_4Adding_Fortran90_code.html
+__ http://www.openboa.de/
+
++ Powerpoint presentation `Python for Scientific Computing`__ by Eric
+ Jones in `The Ninth International Python Conference`__.
+
+__ http://www.python9.org/p9-jones.ppt
+__ http://www.python9.org/
+
++ Paper `Scripting a Large Fortran Code with Python`__ by Alvaro Caceres
+ Calleja in `International Workshop on Software Engineering for High
+ Performance Computing System Applications`__.
+
+__ http://csdl.ics.hawaii.edu/se-hpcs/pdf/calleja.pdf
+__ http://csdl.ics.hawaii.edu/se-hpcs/
+
++ Section `Automatic building of C/Fortran extension for Python`__ by
+ Simon Lacoste-Julien in `Summer 2002 Report about Hybrid Systems
+ Modelling`__.
+
+__ http://moncs.cs.mcgill.ca/people/slacoste/research/report/SummerReport.html#tth_sEc3.4
+__ http://moncs.cs.mcgill.ca/people/slacoste/research/report/SummerReport.html
+
++ `Scripting for Computational Science`__ by Hans Petter Langtangen
+ (see the `Mixed language programming`__ and `NumPy array programming`__
+ sections for examples on using F2PY).
+
+__ http://www.ifi.uio.no/~inf3330/lecsplit/
+__ http://www.ifi.uio.no/~inf3330/lecsplit/slide662.html
+__ http://www.ifi.uio.no/~inf3330/lecsplit/slide718.html
+
++ Chapters 5 and 9 of `Python Scripting for Computational Science`__
+ by H. P. Langtangen for case studies on using F2PY.
+
+__ http://www.springeronline.com/3-540-43508-5
+
++ Section `Fortran Wrapping`__ in `Continuity`__, a computational tool
+ for continuum problems in bioengineering and physiology.
+
+__ http://www.continuity.ucsd.edu/cont6_html/docs_fram.html
+__ http://www.continuity.ucsd.edu/
+
++ Presentation `PYFORT and F2PY: 2 ways to bind C and Fortran with Python`__
+ by Reiner Vogelsang.
+
+__ http://www.prism.enes.org/WPs/WP4a/Slides/pyfort/pyfort.html
+
++ Lecture slides of `Extending Python: speed it up`__.
+
+__ http://www.astro.uni-bonn.de/~heith/lecture_pdf/friedrich5.pdf
+
++ Wiki topics on `Wrapping Tools`__ and `Wrapping Bemchmarks`__ for Climate
+ System Center at the University of Chicago.
+
+__ https://geodoc.uchicago.edu/climatewiki/DiscussWrappingTools
+__ https://geodoc.uchicago.edu/climatewiki/WrappingBenchmarks
+
++ `Performance Python with Weave`__ by Prabhu Ramachandran.
+
+__ http://www.scipy.org/documentation/weave/weaveperformance.html
+
++ `How To Install py-f2py on Mac OSX`__
+
+__ http://py-f2py.darwinports.com/
+
+Please, let me know if there are any other sites that document F2PY
+usage in one or another way.
+
+Q: What projects use F2PY?
+--------------------------
+
++ `SciPy: Scientific tools for Python`__
+
+__ http://www.scipy.org/
+
++ `The Bolometer Data Analysis Project`__
+
+__ http://www.openboa.de/
+
++ `pywavelet`__
+
+__ http://www.met.wau.nl/index.html?http://www.met.wau.nl/medewerkers/moenea/python/pywavelet.html
+
++ `PyARTS: an ARTS related Python package`__.
+
+__ http://www.met.ed.ac.uk/~cory/PyARTS/
+
++ `Python interface to PSPLINE`__, a collection of Spline and
+ Hermite interpolation tools for 1D, 2D, and 3D datasets on
+ rectilinear grids.
+
+__ http://pypspline.sourceforge.net
+
++ `Markovian Analysis Package for Python`__.
+
+__ http://pymc.sourceforge.net
+
++ `Modular toolkit for Data Processing (MDP)`__
+
+__ http://mdp-toolkit.sourceforge.net/
+
+
+Please, send me a note if you are using F2PY in your project.
+
+Q: What people think about F2PY?
+--------------------------------
+
+*F2PY is GOOD*:
+
+Here are some comments people have posted to f2py mailing list and c.l.py:
+
++ Ryan Krauss: I really appreciate f2py. It seems weird to say, but I
+ am excited about relearning FORTRAN to compliment my python stuff.
+
++ Fabien Wahl: f2py is great, and is used extensively over here...
+
++ Fernando Perez: Anyway, many many thanks for this amazing tool.
+
+ I haven't used pyfort, but I can definitely vouch for the amazing quality of
+ f2py. And since f2py is actively used by scipy, it won't go unmaintained.
+ It's quite impressive, and very easy to use.
+
++ Kevin Mueller: First off, thanks to those responsible for F2PY;
+ its been an integral tool of my research for years now.
+
++ David Linke: Best regards and thanks for the great tool!
+
++ Perrin Meyer: F2Py is really useful!
+
++ Hans Petter Langtangen: First of all, thank you for developing
+ F2py. This is a very important contribution to the scientific
+ computing community. We are using F2py a lot and are very happy with
+ it.
+
++ Berthold Höllmann: Thank's alot. It seems it is also working in my
+ 'real' application :-)
+
++ John Hunter: At first I wrapped them with f2py (unbelievably easy!)...
+
++ Cameron Laird: Among many other features, Python boasts a mature
+ f2py, which makes it particularly rewarding to yoke Fortran- and
+ Python-coded modules into finished applications.
+
++ Ryan Gutenkunst: f2py is sweet magic.
+
+*F2PY is BAD*:
+
++ `Is it worth using on a large scale python drivers for Fortran
+ subroutines, interfaced with f2py?`__
+
+__ http://sepwww.stanford.edu/internal/computing/python.html
+
+Additional comments on F2PY, good or bad, are welcome!
+
+.. References:
+.. _README.txt: index.html
+.. _HISTORY.txt: HISTORY.html
+.. _HISTORY.txt in CVS: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup
+.. _TESTING.txt: TESTING.html
diff --git a/numpy/f2py/docs/HISTORY.txt b/numpy/f2py/docs/HISTORY.txt
new file mode 100644
index 000000000..876ab2362
--- /dev/null
+++ b/numpy/f2py/docs/HISTORY.txt
@@ -0,0 +1,1044 @@
+.. -*- rest -*-
+
+=========================
+ F2PY History
+=========================
+
+:Author: Pearu Peterson <pearu@cens.ioc.ee>
+:Web-site: http://cens.ioc.ee/projects/f2py2e/
+:Date: $Date: 2005/09/16 08:36:45 $
+:Revision: $Revision: 1.191 $
+
+.. Contents::
+
+Release 2.46.243
+=====================
+
+* common_rules.py
+
+ - Fixed compiler warnings.
+
+* fortranobject.c
+
+ - Fixed another dims calculation bug.
+ - Fixed dims calculation bug and added the corresponding check.
+ - Accept higher dimensional arrays if their effective rank matches.
+ Effective rank is multiplication of non-unit dimensions.
+
+* f2py2e.py
+
+ - Added support for scipy.distutils version 0.4.0.
+
+* Documentation
+
+ - Added example about ``intent(callback,hide)`` usage. Updates.
+ - Updated FAQ.
+
+* cb_rules.py
+
+ - Fixed missing need kw error.
+ - Fixed getting callback non-existing extra arguments.
+ - External callback functions and extra_args can be set via
+ ext.module namespace.
+ - Avoid crash when external callback function is not set.
+
+* rules.py
+
+ - Enabled ``intent(out)`` for ``intent(aux)`` non-complex scalars.
+ - Fixed splitting lines in F90 fixed form mode.
+ - Fixed FORTRANAME typo, relevant when wrapping scalar functions with
+ ``--no-wrap-functions``.
+ - Improved failure handling for callback functions.
+ - Fixed bug in writting F90 wrapper functions when a line length
+ is exactly 66.
+
+* cfuncs.py
+
+ - Fixed dependency issue with typedefs.
+ - Introduced ``-DUNDERSCORE_G77`` that cause extra underscore to be
+ used for external names that contain an underscore.
+
+* capi_maps.py
+
+ - Fixed typos.
+ - Fixed using complex cb functions.
+
+* crackfortran.py
+
+ - Introduced parent_block key. Get ``use`` statements recursively
+ from parent blocks.
+ - Apply parameter values to kindselectors.
+ - Fixed bug evaluating ``selected_int_kind`` function.
+ - Ignore Name and Syntax errors when evaluating scalars.
+ - Treat ``<int>_intType`` as ``<int>`` in get_parameters.
+ - Added support for F90 line continuation in fix format mode.
+ - Include optional attribute of external to signature file.
+ - Add ``entry`` arguments to variable lists.
+ - Treat \xa0 character as space.
+ - Fixed bug where __user__ callback subroutine was added to its
+ argument list.
+ - In strict 77 mode read only the first 72 columns.
+ - Fixed parsing ``v(i) = func(r)``.
+ - Fixed parsing ``integer*4::``.
+ - Fixed parsing ``1.d-8`` when used as a parameter value.
+
+Release 2.45.241_1926
+=====================
+
+* diagnose.py
+
+ - Clean up output.
+
+* cb_rules.py
+
+ - Fixed ``_cpointer`` usage for subroutines.
+ - Fortran function ``_cpointer`` can be used for callbacks.
+
+* func2subr.py
+
+ - Use result name when wrapping functions with subroutines.
+
+* f2py2e.py
+
+ - Fixed ``--help-link`` switch.
+ - Fixed ``--[no-]lower`` usage with ``-c`` option.
+ - Added support for ``.pyf.src`` template files.
+
+* __init__.py
+
+ - Using ``exec_command`` in ``compile()``.
+
+* setup.py
+
+ - Clean up.
+ - Disabled ``need_scipy_distutils`` function. From now on it is assumed
+ that proper version of ``scipy_distutils`` is already installed.
+
+* capi_maps.py
+
+ - Added support for wrapping unsigned integers. In a .pyf file
+ ``integer(-1)``, ``integer(-2)``, ``integer(-4)`` correspond to
+ ``unsigned char``, ``unsigned short``, ``unsigned`` C types,
+ respectively.
+
+* tests/c/return_real.py
+
+ - Added tests to wrap C functions returning float/double.
+
+* fortranobject.c
+
+ - Added ``_cpointer`` attribute to wrapped objects.
+
+* rules.py
+
+ - ``_cpointer`` feature for wrapped module functions is not
+ functional at the moment.
+ - Introduced ``intent(aux)`` attribute. Useful to save a value
+ of a parameter to auxiliary C variable. Note that ``intent(aux)``
+ implies ``intent(c)``.
+ - Added ``usercode`` section. When ``usercode`` is used in ``python
+ module`` block twise then the contents of the second multi-line
+ block is inserted after the definition of external routines.
+ - Call-back function arguments can be CObjects.
+
+* cfuncs.py
+
+ - Allow call-back function arguments to be fortran objects.
+ - Allow call-back function arguments to be built-in functions.
+
+* crackfortran.py
+
+ - Fixed detection of a function signature from usage example.
+ - Cleaned up -h output for intent(callback) variables.
+ - Repair malformed argument list (missing argument name).
+ - Warn on the usage of multiple attributes without type specification.
+ - Evaluate only scalars ``<initexpr>`` (e.g. not of strings).
+ - Evaluate ``<initexpr>`` using parameters name space.
+ - Fixed resolving `<name>(<args>)[result(<result>)]` pattern.
+ - ``usercode`` can be used more than once in the same context.
+
+Release 2.43.239_1831
+=====================
+
+* auxfuncs.py
+
+ - Made ``intent(in,inplace)`` to mean ``intent(inplace)``.
+
+* f2py2e.py
+
+ - Intoduced ``--help-link`` and ``--link-<resource>``
+ switches to link generated extension module with system
+ ``<resource>`` as defined by scipy_distutils/system_info.py.
+
+* fortranobject.c
+
+ - Patch to make PyArray_CanCastSafely safe on 64-bit machines.
+ Fixes incorrect results when passing ``array('l')`` to
+ ``real*8 intent(in,out,overwrite)`` arguments.
+
+* rules.py
+
+ - Avoid empty continuation lines in Fortran wrappers.
+
+* cfuncs.py
+
+ - Adding ``\0`` at the end of a space-padded string, fixes tests
+ on 64-bit Gentoo.
+
+* crackfortran.py
+
+ - Fixed splitting lines with string parameters.
+
+Release 2.43.239_1806
+=====================
+
+* Tests
+
+ - Fixed test site that failed after padding strings with spaces
+ instead of zeros.
+
+* Documentation
+
+ - Documented ``intent(inplace)`` attribute.
+ - Documented ``intent(callback)`` attribute.
+ - Updated FAQ, added Users Feedback section.
+
+* cfuncs.py
+
+ - Padding longer (than provided from Python side) strings with spaces
+ (that is Fortran behavior) instead of nulls (that is C strncpy behavior).
+
+* f90mod_rules.py
+
+ - Undoing rmbadnames in Python and Fortran layers.
+
+* common_rules.py
+
+ - Renaming common block items that have names identical to C keywords.
+ - Fixed wrapping blank common blocks.
+
+* fortranobject.h
+
+ - Updated numarray (0.9, 1.0, 1.1) support (patch by Todd Miller).
+
+* fortranobject.c
+
+ - Introduced ``intent(inplace)`` feature.
+ - Fix numarray reference counts (patch by Todd).
+ - Updated numarray (0.9, 1.0, 1.1) support (patch by Todd Miller).
+ - Enabled F2PY_REPORT_ON_ARRAY_COPY for Numarray.
+
+* capi_maps.py
+
+ - Always normalize .f2py_f2cmap keys to lower case.
+
+* rules.py
+
+ - Disabled ``index`` macro as it conflicts with the one defined
+ in string.h.
+ - Moved ``externroutines`` up to make it visible to ``usercode``.
+ - Fixed bug in f90 code generation: no empty line continuation is
+ allowed.
+ - Fixed undefined symbols failure when ``fortranname`` is used
+ to rename a wrapped function.
+ - Support for ``entry`` statement.
+
+* auxfuncs.py
+
+ - Made is* functions more robust with respect to parameters that
+ have no typespec specified.
+ - Using ``size_t`` instead of ``int`` as the type of string
+ length. Fixes issues on 64-bit platforms.
+
+* setup.py
+
+ - Fixed bug of installing ``f2py`` script as ``.exe`` file.
+
+* f2py2e.py
+
+ - ``--compiler=`` and ``--fcompiler=`` can be specified at the same time.
+
+* crackfortran.py
+
+ - Fixed dependency detection for non-intent(in|inout|inplace) arguments.
+ They must depend on their dimensions, not vice-versa.
+ - Don't match ``!!f2py`` as a start of f2py directive.
+ - Only effective intent attributes will be output to ``-h`` target.
+ - Introduced ``intent(callback)`` to build interface between Python
+ functions and Fortran external routines.
+ - Avoid including external arguments to __user__ modules.
+ - Initial hooks to evaluate ``kind`` and ``selected_int_kind``.
+ - Evaluating parameters in {char,kind}selectors and applying rmbadname.
+ - Evaluating parameters using also module parameters. Fixed the order
+ of parameter evaluation.
+ - Fixed silly bug: when block name was not lower cased, it was not
+ recognized correctly.
+ - Applying mapping '.false.'->'False', '.true.'->'True' to logical
+ parameters. TODO: Support for logical expressions is needed.
+ - Added support for multiple statements in one line (separated with semicolon).
+ - Impl. get_useparameters function for using parameter values from
+ other f90 modules.
+ - Applied Bertholds patch to fix bug in evaluating expressions
+ like ``1.d0/dvar``.
+ - Fixed bug in reading string parameters.
+ - Evaluating parameters in charselector. Code cleanup.
+ - Using F90 module parameters to resolve kindselectors.
+ - Made the evaluation of module data init-expression more robust.
+ - Support for ``entry`` statement.
+ - Fixed ``determineexprtype`` that in the case of parameters
+ returned non-dictionary objects.
+ - Use ``-*- fix -*-`` to specify that a file is in fixed format.
+
+Release 2.39.235_1693
+=====================
+
+* fortranobject.{h,c}
+
+ - Support for allocatable string arrays.
+
+* cfuncs.py
+
+ - Call-back arguments can now be also instances that have ``__call__`` method
+ as well as instance methods.
+
+* f2py2e.py
+
+ - Introduced ``--include_paths <path1>:<path2>:..`` command line
+ option.
+ - Added ``--compiler=`` support to change the C/C++ compiler from
+ f2py command line.
+
+* capi_maps.py
+
+ - Handle ``XDY`` parameter constants.
+
+* crackfortran.py
+
+ - Handle ``XDY`` parameter constants.
+
+ - Introduced formatpattern to workaround a corner case where reserved
+ keywords are used in format statement. Other than that, format pattern
+ has no use.
+
+ - Parameters are now fully evaluated.
+
+* More splitting of documentation strings.
+
+* func2subr.py - fixed bug for function names that f77 compiler
+ would set ``integer`` type.
+
+Release 2.39.235_1660
+=====================
+
+* f2py2e.py
+
+ - Fixed bug in using --f90flags=..
+
+* f90mod_rules.py
+
+ - Splitted generated documentation strings (to avoid MSVC issue when
+ string length>2k)
+
+ - Ignore ``private`` module data.
+
+Release 2.39.235_1644
+=====================
+
+:Date:24 February 2004
+
+* Character arrays:
+
+ - Finished complete support for character arrays and arrays of strings.
+ - ``character*n a(m)`` is treated like ``character a(m,n)`` with ``intent(c)``.
+ - Character arrays are now considered as ordinary arrays (not as arrays
+ of strings which actually didn't work).
+
+* docs
+
+ - Initial f2py manpage file f2py.1.
+ - Updated usersguide and other docs when using scipy_distutils 0.2.2
+ and up.
+
+* capi_maps.py
+
+ - Try harder to use .f2py_f2cmap mappings when kind is used.
+
+* crackfortran.py
+
+ - Included files are first search in the current directory and
+ then from the source file directory.
+ - Ignoring dimension and character selector changes.
+ - Fixed bug in Fortran 90 comments of fixed format.
+ - Warn when .pyf signatures contain undefined symbols.
+ - Better detection of source code formats. Using ``-*- fortran -*-``
+ or ``-*- f90 -*-`` in the first line of a Fortran source file is
+ recommended to help f2py detect the format, fixed or free,
+ respectively, correctly.
+
+* cfuncs.py
+
+ - Fixed intent(inout) scalars when typecode=='l'.
+ - Fixed intent(inout) scalars when not using numarray.
+ - Fixed intent(inout) scalars when using numarray.
+
+* diagnose.py
+
+ - Updated for scipy_distutils 0.2.2 and up.
+ - Added numarray support to diagnose.
+
+* fortranobject.c
+
+ - Fixed nasty bug with intent(in,copy) complex slice arrays.
+ - Applied Todd's patch to support numarray's byteswapped or
+ misaligned arrays, requires numarray-0.8 or higher.
+
+* f2py2e.py
+
+ - Applying new hooks for scipy_distutils 0.2.2 and up, keeping
+ backward compatibility with depreciation messages.
+ - Using always os.system on non-posix platforms in f2py2e.compile
+ function.
+
+* rules.py
+
+ - Changed the order of buildcallback and usercode junks.
+
+* setup.cfg
+
+ - Added so that docs/ and tests/ directories are included to RPMs.
+
+* setup.py
+
+ - Installing f2py.py instead of f2py.bat under NT.
+ - Introduced ``--with-scipy_distutils`` that is useful when making
+ f2py tar-ball with scipy_distutils included.
+
+Release 2.37.233-1545
+=====================
+
+:Date: 11 September 2003
+
+* rules.py
+
+ - Introduced ``interface_usercode`` replacement. When ``usercode``
+ statement is used inside the first interface block, its contents
+ will be inserted at the end of initialization function of a F2PY
+ generated extension module (feature request: Berthold Höllmann).
+ - Introduced auxiliary function ``as_column_major_storage`` that
+ converts input array to an array with column major storage order
+ (feature request: Hans Petter Langtangen).
+
+* crackfortran.py
+
+ - Introduced ``pymethoddef`` statement.
+
+* cfuncs.py
+
+ - Fixed "#ifdef in #define TRYPYARRAYTEMPLATE" bug (patch thanks
+ to Bernhard Gschaider)
+
+* auxfuncs.py
+
+ - Introduced ``getpymethod`` function.
+ - Enabled multi-line blocks in ``callprotoargument`` statement.
+
+* f90mod_rules.py
+
+ - Undone "Fixed Warning 43 emitted by Intel Fortran compiler" that
+ causes (curios) segfaults.
+
+* fortranobject.c
+
+ - Fixed segfaults (that were introduced with recent memory leak
+ fixes) when using allocatable arrays.
+ - Introduced F2PY_REPORT_ON_ARRAY_COPY CPP macro int-variable. If defined
+ then a message is printed to stderr whenever a copy of an array is
+ made and arrays size is larger than F2PY_REPORT_ON_ARRAY_COPY.
+
+Release 2.35.229-1505
+=====================
+
+:Date: 5 August 2003
+
+* General
+
+ - Introduced ``usercode`` statement (dropped ``c_code`` hooks).
+
+* setup.py
+
+ - Updated the CVS location of scipy_distutils.
+
+* auxfuncs.py
+
+ - Introduced ``isint1array(var)`` for fixing ``integer*1 intent(out)``
+ support.
+
+* tests/f77/callback.py
+
+ Introduced some basic tests.
+
+* src/fortranobject.{c,h}
+
+ - Fixed memory leaks when getting/setting allocatable arrays.
+ (Bug report by Bernhard Gschaider)
+
+ - Initial support for numarray (Todd Miller's patch). Use -DNUMARRAY
+ on the f2py command line to enable numarray support. Note that
+ there is no character arrays support and these hooks are not
+ tested with F90 compilers yet.
+
+* cfuncs.py
+
+ - Fixed reference counting bug that appeared when constructing extra
+ argument list to callback functions.
+ - Added ``PyArray_LONG != PyArray_INT`` test.
+
+* f2py2e.py
+
+ Undocumented ``--f90compiler``.
+
+* crackfortran.py
+
+ - Introduced ``usercode`` statement.
+ - Fixed newlines when outputting multi-line blocks.
+ - Optimized ``getlincoef`` loop and ``analyzevars`` for cases where
+ len(vars) is large.
+ - Fixed callback string argument detection.
+ - Fixed evaluating expressions: only int|float expressions are
+ evaluated succesfully.
+
+* docs
+
+ Documented -DF2PY_REPORT_ATEXIT feature.
+
+* diagnose.py
+
+ Added CPU information and sys.prefix printout.
+
+* tests/run_all.py
+
+ Added cwd to PYTHONPATH.
+
+* tests/f??/return_{real,complex}.py
+
+ Pass "infinity" check in SunOS.
+
+* rules.py
+
+ - Fixed ``integer*1 intent(out)`` support
+ - Fixed free format continuation of f2py generated F90 files.
+
+* tests/mixed/
+
+ Introduced tests for mixing Fortran 77, Fortran 90 fixed and free
+ format codes in one module.
+
+* f90mod_rules.py
+
+ - Fixed non-prototype warnings.
+ - Fixed Warning 43 emitted by Intel Fortran compiler.
+ - Avoid long lines in Fortran codes to reduce possible problems with
+ continuations of lines.
+
+Public Release 2.32.225-1419
+============================
+
+:Date: 8 December 2002
+
+* docs/usersguide/
+
+ Complete revision of F2PY Users Guide
+
+* tests/run_all.py
+
+ - New file. A Python script to run all f2py unit tests.
+
+* Removed files: buildmakefile.py, buildsetup.py.
+
+* tests/f77/
+
+ - Added intent(out) scalar tests.
+
+* f2py_testing.py
+
+ - Introduced. It contains jiffies, memusage, run, cmdline functions
+ useful for f2py unit tests site.
+
+* setup.py
+
+ - Install scipy_distutils only if it is missing or is too old
+ for f2py.
+
+* f90modrules.py
+
+ - Fixed wrapping f90 module data.
+ - Fixed wrapping f90 module subroutines.
+ - Fixed f90 compiler warnings for wrapped functions by using interface
+ instead of external stmt for functions.
+
+* tests/f90/
+
+ - Introduced return_*.py tests.
+
+* func2subr.py
+
+ - Added optional signature argument to createfuncwrapper.
+ - In f2pywrappers routines, declare external, scalar, remaining
+ arguments in that order. Fixes compiler error 'Invalid declaration'
+ for::
+
+ real function foo(a,b)
+ integer b
+ real a(b)
+ end
+
+* crackfortran.py
+
+ - Removed first-line comment information support.
+ - Introduced multiline block. Currently usable only for
+ ``callstatement`` statement.
+ - Improved array length calculation in getarrlen(..).
+ - "From sky" program group is created only if ``groupcounter<1``.
+ See TODO.txt.
+ - Added support for ``dimension(n:*)``, ``dimension(*:n)``. They are
+ treated as ``dimesnion(*)`` by f2py.
+ - Fixed parameter substitution (this fixes TODO item by Patrick
+ LeGresley, 22 Aug 2001).
+
+* f2py2e.py
+
+ - Disabled all makefile, setup, manifest file generation hooks.
+ - Disabled --[no]-external-modroutines option. All F90 module
+ subroutines will have Fortran/C interface hooks.
+ - --build-dir can be used with -c option.
+ - only/skip modes can be used with -c option.
+ - Fixed and documented `-h stdout` feature.
+ - Documented extra options.
+ - Introduced --quiet and --verbose flags.
+
+* cb_rules.py
+
+ - Fixed debugcapi hooks for intent(c) scalar call-back arguments
+ (bug report: Pierre Schnizer).
+ - Fixed intent(c) for scalar call-back arguments.
+ - Improved failure reports.
+
+* capi_maps.py
+
+ - Fixed complex(kind=..) to C type mapping bug. The following hold
+ complex==complex(kind=4)==complex*8, complex(kind=8)==complex*16
+ - Using signed_char for integer*1 (bug report: Steve M. Robbins).
+ - Fixed logical*8 function bug: changed its C correspondence to
+ long_long.
+ - Fixed memory leak when returning complex scalar.
+
+* __init__.py
+
+ - Introduced a new function (for f2py test site, but could be useful
+ in general) ``compile(source[,modulename,extra_args])`` for
+ compiling fortran source codes directly from Python.
+
+* src/fortranobject.c
+
+ - Multi-dimensional common block members and allocatable arrays
+ are returned as Fortran-contiguous arrays.
+ - Fixed NULL return to Python without exception.
+ - Fixed memory leak in getattr(<fortranobj>,'__doc__').
+ - <fortranobj>.__doc__ is saved to <fortranobj>.__dict__ (previously
+ it was generated each time when requested).
+ - Fixed a nasty typo from the previous item that caused data
+ corruption and occasional SEGFAULTs.
+ - array_from_pyobj accepts arbitrary rank arrays if the last dimension
+ is undefined. E.g. dimension(3,*) accepts a(3,4,5) and the result is
+ array with dimension(3,20).
+ - Fixed (void*) casts to make g++ happy (bug report: eric).
+ - Changed the interface of ARR_IS_NULL macro to avoid "``NULL used in
+ arithmetics``" warnings from g++.
+
+* src/fortranobject.h
+
+ - Undone previous item. Defining NO_IMPORT_ARRAY for
+ src/fortranobject.c (bug report: travis)
+ - Ensured that PY_ARRAY_UNIQUE_SYMBOL is defined only for
+ src/fortranobject.c (bug report: eric).
+
+* rules.py
+
+ - Introduced dummy routine feature.
+ - F77 and F90 wrapper subroutines (if any) as saved to different
+ files, <modulename>-f2pywrappers.f and <modulename>-f2pywrappers2.f90,
+ respectively. Therefore, wrapping F90 requires scipy_distutils >=
+ 0.2.0_alpha_2.229.
+ - Fixed compiler warnings about meaningless ``const void (*f2py_func)(..)``.
+ - Improved error messages for ``*_from_pyobj``.
+ - Changed __CPLUSPLUS__ macros to __cplusplus (bug report: eric).
+ - Changed (void*) casts to (f2py_init_func) (bug report: eric).
+ - Removed unnecessary (void*) cast for f2py_has_column_major_storage
+ in f2py_module_methods definition (bug report: eric).
+ - Changed the interface of f2py_has_column_major_storage function:
+ removed const from the 1st argument.
+
+* cfuncs.py
+
+ - Introduced -DPREPEND_FORTRAN.
+ - Fixed bus error on SGI by using PyFloat_AsDouble when ``__sgi`` is defined.
+ This seems to be `know bug`__ with Python 2.1 and SGI.
+ - string_from_pyobj accepts only arrays whos elements size==sizeof(char).
+ - logical scalars (intent(in),function) are normalized to 0 or 1.
+ - Removed NUMFROMARROBJ macro.
+ - (char|short)_from_pyobj now use int_from_pyobj.
+ - (float|long_double)_from_pyobj now use double_from_pyobj.
+ - complex_(float|long_double)_from_pyobj now use complex_double_from_pyobj.
+ - Rewrote ``*_from_pyobj`` to be more robust. This fixes segfaults if
+ getting * from a string. Note that int_from_pyobj differs
+ from PyNumber_Int in that it accepts also complex arguments
+ (takes the real part) and sequences (takes the 1st element).
+ - Removed unnecessary void* casts in NUMFROMARROBJ.
+ - Fixed casts in ``*_from_pyobj`` functions.
+ - Replaced CNUMFROMARROBJ with NUMFROMARROBJ.
+
+.. __: http://sourceforge.net/tracker/index.php?func=detail&aid=435026&group_id=5470&atid=105470
+
+* auxfuncs.py
+
+ - Introduced isdummyroutine().
+ - Fixed islong_* functions.
+ - Fixed isintent_in for intent(c) arguments (bug report: Pierre Schnizer).
+ - Introduced F2PYError and throw_error. Using throw_error, f2py
+ rejects illegal .pyf file constructs that otherwise would cause
+ compilation failures or python crashes.
+ - Fixed islong_long(logical*8)->True.
+ - Introduced islogical() and islogicalfunction().
+ - Fixed prototype string argument (bug report: eric).
+
+* Updated README.txt and doc strings. Starting to use docutils.
+
+* Speed up for ``*_from_pyobj`` functions if obj is a sequence.
+
+* Fixed SegFault (reported by M.Braun) due to invalid ``Py_DECREF``
+ in ``GETSCALARFROMPYTUPLE``.
+
+Older Releases
+==============
+
+::
+
+ *** Fixed missing includes when wrapping F90 module data.
+ *** Fixed typos in docs of build_flib options.
+ *** Implemented prototype calculator if no callstatement or
+ callprotoargument statements are used. A warning is issued if
+ callstatement is used without callprotoargument.
+ *** Fixed transposing issue with array arguments in callback functions.
+ *** Removed -pyinc command line option.
+ *** Complete tests for Fortran 77 functions returning scalars.
+ *** Fixed returning character bug if --no-wrap-functions.
+ *** Described how to wrap F compiled Fortran F90 module procedures
+ with F2PY. See doc/using_F_compiler.txt.
+ *** Fixed the order of build_flib options when using --fcompiler=...
+ *** Recognize .f95 and .F95 files as Fortran sources with free format.
+ *** Cleaned up the output of 'f2py -h': removed obsolete items,
+ added build_flib options section.
+ *** Added --help-compiler option: it lists available Fortran compilers
+ as detected by scipy_distutils/command/build_flib.py. This option
+ is available only with -c option.
+
+
+:Release: 2.13.175-1250
+:Date: 4 April 2002
+
+::
+
+ *** Fixed copying of non-contigious 1-dimensional arrays bug.
+ (Thanks to Travis O.).
+
+
+:Release: 2.13.175-1242
+:Date: 26 March 2002
+
+::
+
+ *** Fixed ignoring type declarations.
+ *** Turned F2PY_REPORT_ATEXIT off by default.
+ *** Made MAX,MIN macros available by default so that they can be
+ always used in signature files.
+ *** Disabled F2PY_REPORT_ATEXIT for FreeBSD.
+
+
+:Release: 2.13.175-1233
+:Date: 13 March 2002
+
+::
+
+ *** Fixed Win32 port when using f2py.bat. (Thanks to Erik Wilsher).
+ *** F2PY_REPORT_ATEXIT is disabled for MACs.
+ *** Fixed incomplete dependency calculator.
+
+
+:Release: 2.13.175-1222
+:Date: 3 March 2002
+
+::
+
+ *** Plugged a memory leak for intent(out) arrays with overwrite=0.
+ *** Introduced CDOUBLE_to_CDOUBLE,.. functions for copy_ND_array.
+ These cast functions probably work incorrectly in Numeric.
+
+
+:Release: 2.13.175-1212
+:Date: 23 February 2002
+
+::
+
+ *** Updated f2py for the latest scipy_distutils.
+ *** A nasty bug with multi-dimensional Fortran arrays is fixed
+ (intent(out) arrays had wrong shapes). (Thanks to Eric for
+ pointing out this bug).
+ *** F2PY_REPORT_ATEXIT is disabled by default for __WIN32__.
+
+
+:Release: 2.11.174-1161
+:Date: 14 February 2002
+
+::
+
+ *** Updated f2py for the latest scipy_distutils.
+ *** Fixed raise error when f2py missed -m flag.
+ *** Script name `f2py' now depends on the name of python executable.
+ For example, `python2.2 setup.py install' will create a f2py
+ script with a name `f2py2.2'.
+ *** Introduced 'callprotoargument' statement so that proper prototypes
+ can be declared. This is crucial when wrapping C functions as it
+ will fix segmentation faults when these wrappers use non-pointer
+ arguments (thanks to R. Clint Whaley for explaining this to me).
+ Note that in f2py generated wrapper, the prototypes have
+ the following forms:
+ extern #rtype# #fortranname#(#callprotoargument#);
+ or
+ extern #rtype# F_FUNC(#fortranname#,#FORTRANNAME#)(#callprotoargument#);
+ *** Cosmetic fixes to F2PY_REPORT_ATEXIT feature.
+
+
+:Release: 2.11.174-1146
+:Date: 3 February 2002
+
+::
+
+ *** Reviewed reference counting in call-back mechanism. Fixed few bugs.
+ *** Enabled callstatement for complex functions.
+ *** Fixed bug with initializing capi_overwrite_<varname>
+ *** Introduced intent(overwrite) that is similar to intent(copy) but
+ has opposite effect. Renamed copy_<name>=1 to overwrite_<name>=0.
+ intent(overwrite) will make default overwrite_<name>=1.
+ *** Introduced intent(in|inout,out,out=<name>) attribute that renames
+ arguments name when returned. This renaming has effect only in
+ documentation strings.
+ *** Introduced 'callstatement' statement to pyf file syntax. With this
+ one can specify explicitly how wrapped function should be called
+ from the f2py generated module. WARNING: this is a dangerous feature
+ and should be used with care. It is introduced to provide a hack
+ to construct wrappers that may have very different signature
+ pattern from the wrapped function. Currently 'callstatement' can
+ be used only inside a subroutine or function block (it should be enough
+ though) and must be only in one continuous line. The syntax of the
+ statement is: callstatement <C-expression>;
+
+
+:Release: 2.11.174
+:Date: 18 January 2002
+
+::
+
+ *** Fixed memory-leak for PyFortranObject.
+ *** Introduced extra keyword argument copy_<varname> for intent(copy)
+ variables. It defaults to 1 and forces to make a copy for
+ intent(in) variables when passing on to wrapped functions (in case
+ they undesirably change the variable in-situ).
+ *** Introduced has_column_major_storage member function for all f2py
+ generated extension modules. It is equivalent to Python call
+ 'transpose(obj).iscontiguous()' but very efficient.
+ *** Introduced -DF2PY_REPORT_ATEXIT. If this is used when compiling,
+ a report is printed to stderr as python exits. The report includes
+ the following timings:
+ 1) time spent in all wrapped function calls;
+ 2) time spent in f2py generated interface around the wrapped
+ functions. This gives a hint whether one should worry
+ about storing data in proper order (C or Fortran).
+ 3) time spent in Python functions called by wrapped functions
+ through call-back interface.
+ 4) time spent in f2py generated call-back interface.
+ For now, -DF2PY_REPORT_ATEXIT is enabled by default. Use
+ -DF2PY_REPORT_ATEXIT_DISABLE to disable it (I am not sure if
+ Windows has needed tools, let me know).
+ Also, I appreciate if you could send me the output of 'F2PY
+ performance report' (with CPU and platform information) so that I
+ could optimize f2py generated interfaces for future releases.
+ *** Extension modules can be linked with dmalloc library. Use
+ -DDMALLOC when compiling.
+ *** Moved array_from_pyobj to fortranobject.c.
+ *** Usage of intent(inout) arguments is made more strict -- only
+ with proper type contiguous arrays are accepted. In general,
+ you should avoid using intent(inout) attribute as it makes
+ wrappers of C and Fortran functions asymmetric. I recommend using
+ intent(in,out) instead.
+ *** intent(..) has new keywords: copy,cache.
+ intent(copy,in) - forces a copy of an input argument; this
+ may be useful for cases where the wrapped function changes
+ the argument in situ and this may not be desired side effect.
+ Otherwise, it is safe to not use intent(copy) for the sake
+ of a better performance.
+ intent(cache,hide|optional) - just creates a junk of memory.
+ It does not care about proper storage order. Can be also
+ intent(in) but then the corresponding argument must be a
+ contiguous array with a proper elsize.
+ *** intent(c) can be used also for subroutine names so that
+ -DNO_APPEND_FORTRAN can be avoided for C functions.
+
+ *** IMPORTANT BREAKING GOOD ... NEWS!!!:
+
+ From now on you don't have to worry about the proper storage order
+ in multi-dimensional arrays that was earlier a real headache when
+ wrapping Fortran functions. Now f2py generated modules take care
+ of the proper conversations when needed. I have carefully designed
+ and optimized this interface to avoid any unnecessary memory usage
+ or copying of data. However, it is wise to use input arrays that
+ has proper storage order: for C arguments it is row-major and for
+ Fortran arguments it is column-major. But you don't need to worry
+ about that when developing your programs. The optimization of
+ initializing the program with proper data for possibly better
+ memory usage can be safely postponed until the program is working.
+
+ This change also affects the signatures in .pyf files. If you have
+ created wrappers that take multi-dimensional arrays in arguments,
+ it is better to let f2py re-generate these files. Or you have to
+ manually do the following changes: reverse the axes indices in all
+ 'shape' macros. For example, if you have defined an array A(n,m)
+ and n=shape(A,1), m=shape(A,0) then you must change the last
+ statements to n=shape(A,0), m=shape(A,1).
+
+
+:Release: 2.8.172
+:Date: 13 January 2002
+
+::
+
+ *** Fixed -c process. Removed pyf_extensions function and pyf_file class.
+ *** Reorganized setup.py. It generates f2py or f2py.bat scripts
+ depending on the OS and the location of the python executable.
+ *** Started to use update_version from scipy_distutils that makes
+ f2py startup faster. As a side effect, the version number system
+ changed.
+ *** Introduced test-site/test_f2py2e.py script that runs all
+ tests.
+ *** Fixed global variables initialization problem in crackfortran
+ when run_main is called several times.
+ *** Added 'import Numeric' to C/API init<module> function.
+ *** Fixed f2py.bat in setup.py.
+ *** Switched over to scipy_distutils and dropped fortran_support.
+ *** On Windows create f2py.bat file.
+ *** Introduced -c option: read fortran or pyf files, construct extension
+ modules, build, and save them to current directory.
+ In one word: do-it-all-in-one-call.
+ *** Introduced pyf_extensions(sources,f2py_opts) function. It simplifies
+ the extension building process considerably. Only for internal use.
+ *** Converted tests to use scipy_distutils in order to improve portability:
+ a,b,c
+ *** f2py2e.run_main() returns a pyf_file class instance containing
+ information about f2py generated files.
+ *** Introduced `--build-dir <dirname>' command line option.
+ *** Fixed setup.py for bdist_rpm command.
+ *** Added --scipy-setup command line option.
+ *** Fixed crackfortran that did not recognized capitalized type
+ specification with --no-lower flag.
+ *** `-h stdout' writes signature to stdout.
+ *** Fixed incorrect message for check() with empty name list.
+
+
+:Release: 2.4.366
+:Date: 17 December 2001
+
+::
+
+ *** Added command line option --[no-]manifest.
+ *** `make test' should run on Windows, but the results are not truthful.
+ *** Reorganized f2py2e.py a bit. Introduced run_main(comline_list) function
+ that can be useful when running f2py from another Python module.
+ *** Removed command line options -f77,-fix,-f90 as the file format
+ is determined from the extension of the fortran file
+ or from its header (first line starting with `!%' and containing keywords
+ free, fix, or f77). The later overrides the former one.
+ *** Introduced command line options --[no-]makefile,--[no-]latex-doc.
+ Users must explicitly use --makefile,--latex-doc if Makefile-<modulename>,
+ <modulename>module.tex is desired. --setup is default. Use --no-setup
+ to disable setup_<modulename>.py generation. --overwrite-makefile
+ will set --makefile.
+ *** Added `f2py_rout_' to #capiname# in rules.py.
+ *** intent(...) statement with empty namelist forces intent(...) attribute for
+ all arguments.
+ *** Dropped DL_IMPORT and DL_EXPORT in fortranobject.h.
+ *** Added missing PyFortran_Type.ob_type initialization.
+ *** Added gcc-3.0 support.
+ *** Raising non-existing/broken Numeric as a FatalError exception.
+ *** Fixed Python 2.x specific += construct in fortran_support.py.
+ *** Fixed copy_ND_array for 1-rank arrays that used to call calloc(0,..)
+ and caused core dump with a non-gcc compiler (Thanks to Pierre Schnizer
+ for reporting this bug).
+ *** Fixed "warning: variable `..' might be clobbered by `longjmp' or `vfork'":
+ - Reorganized the structure of wrapper functions to get rid of
+ `goto capi_fail' statements that caused the above warning.
+
+
+:Release: 2.3.343
+:Date: 12 December 2001
+
+::
+
+ *** Issues with the Win32 support (thanks to Eric Jones and Tiffany Kamm):
+ - Using DL_EXPORT macro for init#modulename#.
+ - Changed PyObject_HEAD_INIT(&PyType_Type) to PyObject_HEAD_INIT(0).
+ - Initializing #name#_capi=NULL instead of Py_None in cb hooks.
+ *** Fixed some 'warning: function declaration isn't a prototype', mainly
+ in fortranobject.{c,h}.
+ *** Fixed 'warning: missing braces around initializer'.
+ *** Fixed reading a line containing only a label.
+ *** Fixed nonportable 'cp -fv' to shutil.copy in f2py2e.py.
+ *** Replaced PyEval_CallObject with PyObject_CallObject in cb_rules.
+ *** Replaced Py_DECREF with Py_XDECREF when freeing hidden arguments.
+ (Reason: Py_DECREF caused segfault when an error was raised)
+ *** Impl. support for `include "file"' (in addition to `include 'file'')
+ *** Fixed bugs (buildsetup.py missing in Makefile, in generated MANIFEST.in)
+
+
+:Release: 2.3.327
+:Date: 4 December 2001
+
+::
+
+ *** Sending out the third public release of f2py.
+ *** Support for Intel(R) Fortran Compiler (thanks to Patrick LeGresley).
+ *** Introduced `threadsafe' statement to pyf-files (or to be used with
+ the 'f2py' directive in fortran codes) to force
+ Py_BEGIN|END_ALLOW_THREADS block around the Fortran subroutine
+ calling statement in Python C/API. `threadsafe' statement has
+ an effect only inside a subroutine block.
+ *** Introduced `fortranname <name>' statement to be used only within
+ pyf-files. This is useful when the wrapper (Python C/API) function
+ has different name from the wrapped (Fortran) function.
+ *** Introduced `intent(c)' directive and statement. It is useful when
+ wrapping C functions. Use intent(c) for arguments that are
+ scalars (not pointers) or arrays (with row-ordering of elements).
+
+
+:Release: 2.3.321
+:Date: 3 December 2001
+
+::
+
+ *** f2py2e can be installed using distutils (run `python setup.py install').
+ *** f2py builds setup_<modulename>.py. Use --[no-]setup to control this
+ feature. setup_<modulename>.py uses fortran_support module (from SciPy),
+ but for your convenience it is included also with f2py as an additional
+ package. Note that it has not as many compilers supported as with
+ using Makefile-<modulename>, but new compilers should be added to
+ fortran_support module, not to f2py2e package.
+ *** Fixed some compiler warnings about else statements.
+
diff --git a/numpy/f2py/docs/OLDNEWS.txt b/numpy/f2py/docs/OLDNEWS.txt
new file mode 100644
index 000000000..b21215464
--- /dev/null
+++ b/numpy/f2py/docs/OLDNEWS.txt
@@ -0,0 +1,63 @@
+
+.. topic:: Old F2PY NEWS
+
+ March 30, 2004
+ F2PY bug fix release (version 2.39.235-1693). Two new command line switches:
+ ``--compiler`` and ``--include_paths``. Support for allocatable string arrays.
+ Callback arguments may now be arbitrary callable objects. Win32 installers
+ for F2PY and Scipy_core are provided.
+ `Differences with the previous release (version 2.37.235-1660)`__.
+
+ __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.98&r2=1.87&f=h
+
+
+ March 9, 2004
+ F2PY bug fix release (version 2.39.235-1660).
+ `Differences with the previous release (version 2.37.235-1644)`__.
+
+ __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.87&r2=1.83&f=h
+
+ February 24, 2004
+ Latest F2PY release (version 2.39.235-1644).
+ Support for scipy_distutils 0.2.2 and up (e.g. compiler flags can be
+ changed via f2py command line options). Implemented support for
+ character arrays and arrays of strings (e.g. ``character*(*) a(m,..)``).
+ *Important bug fixes regarding complex arguments, upgrading is
+ highly recommended*. Documentation updates.
+ `Differences with the previous release (version 2.37.233-1545)`__.
+
+ __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.83&r2=1.58&f=h
+
+ September 11, 2003
+ Latest F2PY release (version 2.37.233-1545).
+ New statements: ``pymethoddef`` and ``usercode`` in interface blocks.
+ New function: ``as_column_major_storage``.
+ New CPP macro: ``F2PY_REPORT_ON_ARRAY_COPY``.
+ Bug fixes.
+ `Differences with the previous release (version 2.35.229-1505)`__.
+
+ __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.58&r2=1.49&f=h
+
+ August 2, 2003
+ Latest F2PY release (version 2.35.229-1505).
+ `Differences with the previous release (version 2.32.225-1419)`__.
+
+ __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.49&r2=1.28&f=h
+
+ April 2, 2003
+ Initial support for Numarray_ (thanks to Todd Miller).
+
+ December 8, 2002
+ Sixth public release of F2PY (version 2.32.225-1419). Comes with
+ revised `F2PY Users Guide`__, `new testing site`__, lots of fixes
+ and other improvements, see `HISTORY.txt`_ for details.
+
+ __ usersguide/index.html
+ __ TESTING.txt_
+
+.. References
+ ==========
+
+.. _HISTORY.txt: HISTORY.html
+.. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray
+.. _TESTING.txt: TESTING.html \ No newline at end of file
diff --git a/numpy/f2py/docs/README.txt b/numpy/f2py/docs/README.txt
new file mode 100644
index 000000000..fc7149645
--- /dev/null
+++ b/numpy/f2py/docs/README.txt
@@ -0,0 +1,457 @@
+.. -*- rest -*-
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ F2PY: Fortran to Python interface generator
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+:Author: Pearu Peterson <pearu@cens.ioc.ee>
+:License: LGPL_.
+:Web-site: http://cens.ioc.ee/projects/f2py2e/
+:Discussions to: `f2py-users mailing list`_
+:Documentation: `User's Guide`__, FAQ__
+:Platforms: All
+:Date: $Date: 2005/01/30 18:54:53 $
+
+.. _f2py-users mailing list: http://cens.ioc.ee/mailman/listinfo/f2py-users/
+__ usersguide/index.html
+__ FAQ.html
+
+-------------------------------
+
+.. topic:: NEWS!!!
+
+ January 30, 2005
+
+ Latest F2PY release (version 2.45.241_1926).
+ New features: wrapping unsigned integers, support for ``.pyf.src`` template files,
+ callback arguments can now be CObjects, fortran objects, built-in functions.
+ Introduced ``intent(aux)`` attribute. Wrapped objects have ``_cpointer``
+ attribute holding C pointer to wrapped functions or variables.
+ Many bug fixes and improvements, updated documentation.
+ `Differences with the previous release (version 2.43.239_1831)`__.
+
+ __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.163&r2=1.137&f=h
+
+ October 4, 2004
+ F2PY bug fix release (version 2.43.239_1831).
+ Better support for 64-bit platforms.
+ Introduced ``--help-link`` and ``--link-<resource>`` options.
+ Bug fixes.
+ `Differences with the previous release (version 2.43.239_1806)`__.
+
+ __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.137&r2=1.131&f=h
+
+ September 25, 2004
+ Latest F2PY release (version 2.43.239_1806).
+ Support for ``ENTRY`` statement. New attributes:
+ ``intent(inplace)``, ``intent(callback)``. Supports Numarray 1.1.
+ Introduced ``-*- fix -*-`` header content. Improved ``PARAMETER`` support.
+ Documentation updates. `Differences with the previous release
+ (version 2.39.235-1693)`__.
+
+ __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.131&r2=1.98&f=h
+
+ `History of NEWS`__
+
+ __ OLDNEWS.html
+
+-------------------------------
+
+.. Contents::
+
+==============
+ Introduction
+==============
+
+The purpose of the F2PY --*Fortran to Python interface generator*--
+project is to provide connection between Python_ and Fortran
+languages. F2PY is a Python extension tool for creating Python C/API
+modules from (handwritten or F2PY generated) signature files (or
+directly from Fortran sources). The generated extension modules
+facilitate:
+
+* Calling Fortran 77/90/95, Fortran 90/95 module, and C functions from
+ Python.
+
+* Accessing Fortran 77 ``COMMON`` blocks and Fortran 90/95 module
+ data (including allocatable arrays) from Python.
+
+* Calling Python functions from Fortran or C (call-backs).
+
+* Automatically handling the difference in the data storage order of
+ multi-dimensional Fortran and Numerical Python (i.e. C) arrays.
+
+In addition, F2PY can build the generated extension modules to shared
+libraries with one command. F2PY uses the ``scipy_distutils`` module
+from SciPy_ that supports number of major Fortran compilers.
+
+..
+ (see `COMPILERS.txt`_ for more information).
+
+F2PY generated extension modules depend on NumPy_ package that
+provides fast multi-dimensional array language facility to Python.
+
+
+---------------
+ Main features
+---------------
+
+Here follows a more detailed list of F2PY features:
+
+* F2PY scans real Fortran codes to produce the so-called signature
+ files (.pyf files). The signature files contain all the information
+ (function names, arguments and their types, etc.) that is needed to
+ construct Python bindings to Fortran (or C) functions.
+
+ The syntax of signature files is borrowed from the
+ Fortran 90/95 language specification and has some F2PY specific
+ extensions. The signature files can be modified to dictate how
+ Fortran (or C) programs are called from Python:
+
+ + F2PY solves dependencies between arguments (this is relevant for
+ the order of initializing variables in extension modules).
+
+ + Arguments can be specified to be optional or hidden that
+ simplifies calling Fortran programs from Python considerably.
+
+ + In principle, one can design any Python signature for a given
+ Fortran function, e.g. change the order arguments, introduce
+ auxiliary arguments, hide the arguments, process the arguments
+ before passing to Fortran, return arguments as output of F2PY
+ generated functions, etc.
+
+* F2PY automatically generates __doc__ strings (and optionally LaTeX
+ documentation) for extension modules.
+
+* F2PY generated functions accept arbitrary (but sensible) Python
+ objects as arguments. The F2PY interface automatically takes care of
+ type-casting and handling of non-contiguous arrays.
+
+* The following Fortran constructs are recognized by F2PY:
+
+ + All basic Fortran types::
+
+ integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ]
+ integer*([ -1 | -2 | -4 | -8 ])
+ character[ | *(*) | *1 | *2 | *3 | ... ]
+ real[ | *4 | *8 | *16 ], double precision
+ complex[ | *8 | *16 | *32 ]
+
+ Negative ``integer`` kinds are used to wrap unsigned integers.
+
+ + Multi-dimensional arrays of all basic types with the following
+ dimension specifications::
+
+ <dim> | <start>:<end> | * | :
+
+ + Attributes and statements::
+
+ intent([ in | inout | out | hide | in,out | inout,out | c |
+ copy | cache | callback | inplace | aux ])
+ dimension(<dimspec>)
+ common, parameter
+ allocatable
+ optional, required, external
+ depend([<names>])
+ check([<C-booleanexpr>])
+ note(<LaTeX text>)
+ usercode, callstatement, callprotoargument, threadsafe, fortranname
+ pymethoddef
+ entry
+
+* Because there are only little (and easily handleable) differences
+ between calling C and Fortran functions from F2PY generated
+ extension modules, then F2PY is also well suited for wrapping C
+ libraries to Python.
+
+* Practice has shown that F2PY generated interfaces (to C or Fortran
+ functions) are less error prone and even more efficient than
+ handwritten extension modules. The F2PY generated interfaces are
+ easy to maintain and any future optimization of F2PY generated
+ interfaces transparently apply to extension modules by just
+ regenerating them with the latest version of F2PY.
+
+* `F2PY Users Guide and Reference Manual`_
+
+
+===============
+ Prerequisites
+===============
+
+F2PY requires the following software installed:
+
+* Python_ (versions 1.5.2 or later; 2.1 and up are recommended).
+ You must have python-dev package installed.
+* NumPy_ (versions 13 or later; 20.x, 21.x, 22.x, 23.x are recommended)
+* Numarray_ (version 0.9 and up), optional, partial support.
+* Scipy_distutils (version 0.2.2 and up are recommended) from SciPy_
+ project. Get it from Scipy CVS or download it below.
+
+Python 1.x users also need distutils_.
+
+Of course, to build extension modules, you'll need also working C
+and/or Fortran compilers installed.
+
+==========
+ Download
+==========
+
+You can download the sources for the latest F2PY and scipy_distutils
+releases as:
+
+* `2.x`__/`F2PY-2-latest.tar.gz`__
+* `2.x`__/`scipy_distutils-latest.tar.gz`__
+
+Windows users might be interested in Win32 installer for F2PY and
+Scipy_distutils (these installers are built using Python 2.3):
+
+* `2.x`__/`F2PY-2-latest.win32.exe`__
+* `2.x`__/`scipy_distutils-latest.win32.exe`__
+
+Older releases are also available in the directories
+`rel-0.x`__, `rel-1.x`__, `rel-2.x`__, `rel-3.x`__, `rel-4.x`__, `rel-5.x`__,
+if you need them.
+
+.. __: 2.x/
+.. __: 2.x/F2PY-2-latest.tar.gz
+.. __: 2.x/
+.. __: 2.x/scipy_distutils-latest.tar.gz
+.. __: 2.x/
+.. __: 2.x/F2PY-2-latest.win32.exe
+.. __: 2.x/
+.. __: 2.x/scipy_distutils-latest.win32.exe
+.. __: rel-0.x
+.. __: rel-1.x
+.. __: rel-2.x
+.. __: rel-3.x
+.. __: rel-4.x
+.. __: rel-5.x
+
+Development version of F2PY from CVS is available as `f2py2e.tar.gz`__.
+
+__ http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/f2py2e.tar.gz?tarball=1
+
+Debian Sid users can simply install ``python-f2py`` package.
+
+==============
+ Installation
+==============
+
+Unpack the source file, change to directrory ``F2PY-?-???/`` and run
+(you may need to become a root)::
+
+ python setup.py install
+
+The F2PY installation installs a Python package ``f2py2e`` to your
+Python ``site-packages`` directory and a script ``f2py`` to your
+Python executable path.
+
+See also Installation__ section in `F2PY FAQ`_.
+
+.. __: FAQ.html#installation
+
+Similarly, to install ``scipy_distutils``, unpack its tar-ball and run::
+
+ python setup.py install
+
+=======
+ Usage
+=======
+
+To check if F2PY is installed correctly, run
+::
+
+ f2py
+
+without any arguments. This should print out the usage information of
+the ``f2py`` program.
+
+Next, try out the following three steps:
+
+1) Create a Fortran file `hello.f`__ that contains::
+
+ C File hello.f
+ subroutine foo (a)
+ integer a
+ print*, "Hello from Fortran!"
+ print*, "a=",a
+ end
+
+__ hello.f
+
+2) Run
+
+ ::
+
+ f2py -c -m hello hello.f
+
+ This will build an extension module ``hello.so`` (or ``hello.sl``,
+ or ``hello.pyd``, etc. depending on your platform) into the current
+ directory.
+
+3) Now in Python try::
+
+ >>> import hello
+ >>> print hello.__doc__
+ >>> print hello.foo.__doc__
+ >>> hello.foo(4)
+ Hello from Fortran!
+ a= 4
+ >>>
+
+If the above works, then you can try out more thorough
+`F2PY unit tests`__ and read the `F2PY Users Guide and Reference Manual`_.
+
+__ FAQ.html#q-how-to-test-if-f2py-is-working-correctly
+
+===============
+ Documentation
+===============
+
+The documentation of the F2PY project is collected in ``f2py2e/docs/``
+directory. It contains the following documents:
+
+`README.txt`_ (in CVS__)
+ The first thing to read about F2PY -- this document.
+
+__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/README.txt?rev=HEAD&content-type=text/x-cvsweb-markup
+
+`usersguide/index.txt`_, `usersguide/f2py_usersguide.pdf`_
+ F2PY Users Guide and Reference Manual. Contains lots of examples.
+
+`FAQ.txt`_ (in CVS__)
+ F2PY Frequently Asked Questions.
+
+__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/FAQ.txt?rev=HEAD&content-type=text/x-cvsweb-markup
+
+`TESTING.txt`_ (in CVS__)
+ About F2PY testing site. What tests are available and how to run them.
+
+__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/TESTING.txt?rev=HEAD&content-type=text/x-cvsweb-markup
+
+`HISTORY.txt`_ (in CVS__)
+ A list of latest changes in F2PY. This is the most up-to-date
+ document on F2PY.
+
+__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup
+
+`THANKS.txt`_
+ Acknowledgments.
+
+..
+ `COMPILERS.txt`_
+ Compiler and platform specific notes.
+
+===============
+ Mailing list
+===============
+
+A mailing list f2py-users@cens.ioc.ee is open for F2PY releated
+discussion/questions/etc.
+
+* `Subscribe..`__
+* `Archives..`__
+
+__ http://cens.ioc.ee/mailman/listinfo/f2py-users
+__ http://cens.ioc.ee/pipermail/f2py-users
+
+
+=====
+ CVS
+=====
+
+F2PY is being developed under CVS_. The CVS version of F2PY can be
+obtained as follows:
+
+1) First you need to login (the password is ``guest``)::
+
+ cvs -d :pserver:anonymous@cens.ioc.ee:/home/cvs login
+
+2) and then do the checkout::
+
+ cvs -z6 -d :pserver:anonymous@cens.ioc.ee:/home/cvs checkout f2py2e
+
+3) You can update your local F2PY tree ``f2py2e/`` by executing::
+
+ cvs -z6 update -P -d
+
+You can browse the `F2PY CVS`_ repository.
+
+===============
+ Contributions
+===============
+
+* `A short introduction to F2PY`__ by Pierre Schnizer.
+
+* `F2PY notes`__ by Fernando Perez.
+
+* `Debian packages of F2PY`__ by José Fonseca. [OBSOLETE, Debian Sid
+ ships python-f2py package]
+
+__ http://fubphpc.tu-graz.ac.at/~pierre/f2py_tutorial.tar.gz
+__ http://cens.ioc.ee/pipermail/f2py-users/2003-April/000472.html
+__ http://jrfonseca.dyndns.org/debian/
+
+
+===============
+ Related sites
+===============
+
+* `Numerical Python`_ -- adds a fast array facility to the Python language.
+* Pyfort_ -- A Python-Fortran connection tool.
+* SciPy_ -- An open source library of scientific tools for Python.
+* `Scientific Python`_ -- A collection of Python modules that are
+ useful for scientific computing.
+* `The Fortran Company`_ -- A place to find products, services, and general
+ information related to the Fortran programming language.
+* `American National Standard Programming Language FORTRAN ANSI(R) X3.9-1978`__
+* `J3`_ -- The US Fortran standards committee.
+* SWIG_ -- A software development tool that connects programs written
+ in C and C++ with a variety of high-level programming languages.
+* `Mathtools.net`_ -- A technical computing portal for all scientific
+ and engineering needs.
+
+.. __: http://www.fortran.com/fortran/F77_std/rjcnf.html
+
+.. References
+ ==========
+
+
+.. _F2PY Users Guide and Reference Manual: usersguide/index.html
+.. _usersguide/index.txt: usersguide/index.html
+.. _usersguide/f2py_usersguide.pdf: usersguide/f2py_usersguide.pdf
+.. _README.txt: README.html
+.. _COMPILERS.txt: COMPILERS.html
+.. _F2PY FAQ:
+.. _FAQ.txt: FAQ.html
+.. _HISTORY.txt: HISTORY.html
+.. _HISTORY.txt from CVS: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup
+.. _THANKS.txt: THANKS.html
+.. _TESTING.txt: TESTING.html
+.. _F2PY CVS2: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/
+.. _F2PY CVS: http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/
+
+.. _CVS: http://www.cvshome.org/
+.. _Python: http://www.python.org/
+.. _SciPy: http://www.scipy.org/
+.. _NumPy: http://www.numpy.org/
+.. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray
+.. _docutils: http://docutils.sourceforge.net/
+.. _distutils: http://www.python.org/sigs/distutils-sig/
+.. _LGPL: http://www.fsf.org/copyleft/lesser.html
+.. _Numerical Python: http://www.numpy.org/
+.. _Pyfort: http://pyfortran.sourceforge.net/
+.. _Scientific Python:
+ http://starship.python.net/crew/hinsen/scientific.html
+.. _The Fortran Company: http://www.fortran.com/fortran/
+.. _J3: http://www.j3-fortran.org/
+.. _Mathtools.net: http://www.mathtools.net/
+.. _SWIG: http://www.swig.org/
+
+..
+ Local Variables:
+ mode: indented-text
+ indent-tabs-mode: nil
+ sentence-end-double-space: t
+ fill-column: 70
+ End:
diff --git a/numpy/f2py/docs/TESTING.txt b/numpy/f2py/docs/TESTING.txt
new file mode 100644
index 000000000..feae18dc6
--- /dev/null
+++ b/numpy/f2py/docs/TESTING.txt
@@ -0,0 +1,108 @@
+
+=======================================================
+ F2PY unit testing site
+=======================================================
+
+.. Contents::
+
+Tests
+-----
+
+* To run all F2PY unit tests in one command::
+
+ cd tests
+ python run_all.py [<options>]
+
+ For example::
+
+ localhost:~/src_cvs/f2py2e/tests$ python2.2 run_all.py 100 --quiet
+ **********************************************
+ Running '/usr/bin/python2.2 f77/return_integer.py 100 --quiet'
+ run 1000 tests in 1.87 seconds
+ initial virtual memory size: 3952640 bytes
+ current virtual memory size: 3952640 bytes
+ ok
+ **********************************************
+ Running '/usr/bin/python2.2 f77/return_logical.py 100 --quiet'
+ run 1000 tests in 1.47 seconds
+ initial virtual memory size: 3952640 bytes
+ current virtual memory size: 3952640 bytes
+ ok
+ ...
+
+ If some tests fail, try to run the failing tests separately (without
+ the ``--quiet`` option) as described below to get more information
+ about the failure.
+
+* Test intent(in), intent(out) scalar arguments,
+ scalars returned by F77 functions
+ and F90 module functions::
+
+ tests/f77/return_integer.py
+ tests/f77/return_real.py
+ tests/f77/return_logical.py
+ tests/f77/return_complex.py
+ tests/f77/return_character.py
+ tests/f90/return_integer.py
+ tests/f90/return_real.py
+ tests/f90/return_logical.py
+ tests/f90/return_complex.py
+ tests/f90/return_character.py
+
+ Change to tests/ directory and run::
+
+ python f77/return_<type>.py [<options>]
+ python f90/return_<type>.py [<options>]
+
+ where ``<type>`` is integer, real, logical, complex, or character.
+ Test scripts options are described below.
+
+ A test is considered succesful if the last printed line is "ok".
+
+ If you get import errors like::
+
+ ImportError: No module named f77_ext_return_integer
+
+ but ``f77_ext_return_integer.so`` exists in the current directory then
+ it means that the current directory is not included in to `sys.path`
+ in your Python installation. As a fix, prepend ``.`` to ``PYTHONPATH``
+ environment variable and rerun the tests. For example::
+
+ PYTHONPATH=. python f77/return_integer.py
+
+* Test mixing Fortran 77, Fortran 90 fixed and free format codes::
+
+ tests/mixed/run.py
+
+* Test basic callback hooks::
+
+ tests/f77/callback.py
+
+Options
+-------
+
+You may want to use the following options when running the test
+scripts:
+
+``<integer>``
+ Run tests ``<integer>`` times. Useful for detecting memory leaks. Under
+ Linux tests scripts output virtual memory size state of the process
+ before and after calling the wrapped functions.
+
+``--quiet``
+ Suppress all messages. On success only "ok" should be displayed.
+
+``--fcompiler=<Gnu|Intel|...>``
+ Use::
+
+ f2py -c --help-fcompiler
+
+ to find out what compilers are available (or more precisely, which
+ ones are recognized by ``scipy_distutils``).
+
+Reporting failures
+------------------
+
+XXX: (1) make sure that failures are due to f2py and (2) send full
+stdout/stderr messages to me. Also add compiler,python,platform
+information.
diff --git a/numpy/f2py/docs/THANKS.txt b/numpy/f2py/docs/THANKS.txt
new file mode 100644
index 000000000..cbaa083fc
--- /dev/null
+++ b/numpy/f2py/docs/THANKS.txt
@@ -0,0 +1,63 @@
+
+=================
+ Acknowledgments
+=================
+
+F2PY__ is a LGPL'd Python package and command line tool developed and
+maintained by Pearu Peterson (me__).
+
+.. __: http://cens.ioc.ee/projects/f2py2e/
+.. __: http://cens.ioc.ee/~pearu/
+
+Many people have contributed to the F2PY project in terms of interest,
+encouragement, suggestions, criticism, bug reports, code
+contributions, and keeping me busy with developing F2PY. For all that
+I thank
+
+ James Amundson, John Barnard, David Beazley, Frank Bertoldi, Roman
+ Bertle, James Boyle, Moritz Braun, Rolv Erlend Bredesen, John
+ Chaffer, Fred Clare, Adam Collard, Ben Cornett, Jose L Gomez Dans,
+ Jaime D. Perea Duarte, Paul F Dubois, Thilo Ernst, Bonilla Fabian,
+ Martin Gelfand, Eduardo A. Gonzalez, Siegfried Gonzi, Bernhard
+ Gschaider, Charles Doutriaux, Jeff Hagelberg, Janko Hauser, Thomas
+ Hauser, Heiko Henkelmann, William Henney, Yueqiang Huang, Asim
+ Hussain, Berthold Höllmann, Vladimir Janku, Henk Jansen, Curtis
+ Jensen, Eric Jones, Tiffany Kamm, Andrey Khavryuchenko, Greg
+ Kochanski, Jochen Küpper, Simon Lacoste-Julien, Tim Lahey, Hans
+ Petter Langtangen, Jeff Layton, Matthew Lewis, Patrick LeGresley,
+ Joaquim R R A Martins, Paul Magwene Lionel Maziere, Craig McNeile,
+ Todd Miller, David C. Morrill, Dirk Muders, Kevin Mueller, Andrew
+ Mullhaupt, Vijayendra Munikoti, Travis Oliphant, Kevin O'Mara, Arno
+ Paehler, Fernando Perez, Didrik Pinte, Todd Alan Pitts, Prabhu
+ Ramachandran, Brad Reisfeld, Steve M. Robbins, Theresa Robinson,
+ Pedro Rodrigues, Les Schaffer, Christoph Scheurer, Herb Schilling,
+ Pierre Schnizer, Kevin Smith, Paulo Teotonio Sobrinho, José Rui
+ Faustino de Sousa, Andrew Swan, Dustin Tang, Charlie Taylor, Paul le
+ Texier, Michael Tiller, Semen Trygubenko, Ravi C Venkatesan, Peter
+ Verveer, Nils Wagner, R. Clint Whaley, Erik Wilsher, Martin
+ Wiechert, Gilles Zerah, SungPil Yoon.
+
+(This list may not be complete. Please forgive me if I have left you
+out and let me know, I'll add your name.)
+
+Special thanks are due to ...
+
+Eric Jones - he and Travis O. are responsible for starting the
+scipy_distutils project that allowed to move most of the platform and
+compiler specific codes out from F2PY. This simplified maintaining the
+F2PY project considerably.
+
+Joaquim R R A Martins - he made possible for me to test F2PY on IRIX64
+platform. He also presented our paper about F2PY in the 9th Python
+Conference that I planned to attend but had to cancel in very last
+minutes.
+
+Travis Oliphant - his knowledge and experience on Numerical Python
+C/API has been invaluable in early development of the F2PY program.
+His major contributions are call-back mechanism and copying N-D arrays
+of arbitrary types.
+
+Todd Miller - he is responsible for Numarray support in F2PY.
+
+Thanks!
+ Pearu
diff --git a/numpy/f2py/docs/default.css b/numpy/f2py/docs/default.css
new file mode 100644
index 000000000..9289e2826
--- /dev/null
+++ b/numpy/f2py/docs/default.css
@@ -0,0 +1,180 @@
+/*
+:Author: David Goodger
+:Contact: goodger@users.sourceforge.net
+:date: $Date: 2002/08/01 20:52:44 $
+:version: $Revision: 1.1 $
+:copyright: This stylesheet has been placed in the public domain.
+
+Default cascading style sheet for the HTML output of Docutils.
+*/
+
+body {
+ background: #FFFFFF ;
+ color: #000000
+}
+
+a.footnote-reference {
+ font-size: smaller ;
+ vertical-align: super }
+
+a.target {
+ color: blue }
+
+a.toc-backref {
+ text-decoration: none ;
+ color: black }
+
+dd {
+ margin-bottom: 0.5em }
+
+div.abstract {
+ margin: 2em 5em }
+
+div.abstract p.topic-title {
+ font-weight: bold ;
+ text-align: center }
+
+div.attention, div.caution, div.danger, div.error, div.hint,
+div.important, div.note, div.tip, div.warning {
+ margin: 2em ;
+ border: medium outset ;
+ padding: 1em }
+
+div.attention p.admonition-title, div.caution p.admonition-title,
+div.danger p.admonition-title, div.error p.admonition-title,
+div.warning p.admonition-title {
+ color: red ;
+ font-weight: bold ;
+ font-family: sans-serif }
+
+div.hint p.admonition-title, div.important p.admonition-title,
+div.note p.admonition-title, div.tip p.admonition-title {
+ font-weight: bold ;
+ font-family: sans-serif }
+
+div.dedication {
+ margin: 2em 5em ;
+ text-align: center ;
+ font-style: italic }
+
+div.dedication p.topic-title {
+ font-weight: bold ;
+ font-style: normal }
+
+div.figure {
+ margin-left: 2em }
+
+div.footer, div.header {
+ font-size: smaller }
+
+div.system-messages {
+ margin: 5em }
+
+div.system-messages h1 {
+ color: red }
+
+div.system-message {
+ border: medium outset ;
+ padding: 1em }
+
+div.system-message p.system-message-title {
+ color: red ;
+ font-weight: bold }
+
+div.topic {
+ margin: 2em }
+
+h1.title {
+ text-align: center }
+
+h2.subtitle {
+ text-align: center }
+
+hr {
+ width: 75% }
+
+ol.simple, ul.simple {
+ margin-bottom: 1em }
+
+ol.arabic {
+ list-style: decimal }
+
+ol.loweralpha {
+ list-style: lower-alpha }
+
+ol.upperalpha {
+ list-style: upper-alpha }
+
+ol.lowerroman {
+ list-style: lower-roman }
+
+ol.upperroman {
+ list-style: upper-roman }
+
+p.caption {
+ font-style: italic }
+
+p.credits {
+ font-style: italic ;
+ font-size: smaller }
+
+p.first {
+ margin-top: 0 }
+
+p.label {
+ white-space: nowrap }
+
+p.topic-title {
+ font-weight: bold }
+
+pre.literal-block, pre.doctest-block {
+ margin-left: 2em ;
+ margin-right: 2em ;
+ background-color: #eeeeee }
+
+span.classifier {
+ font-family: sans-serif ;
+ font-style: oblique }
+
+span.classifier-delimiter {
+ font-family: sans-serif ;
+ font-weight: bold }
+
+span.field-argument {
+ font-style: italic }
+
+span.interpreted {
+ font-family: sans-serif }
+
+span.option-argument {
+ font-style: italic }
+
+span.problematic {
+ color: red }
+
+table {
+ margin-top: 0.5em ;
+ margin-bottom: 0.5em }
+
+table.citation {
+ border-left: solid thin gray ;
+ padding-left: 0.5ex }
+
+table.docinfo {
+ margin: 2em 4em }
+
+table.footnote {
+ border-left: solid thin black ;
+ padding-left: 0.5ex }
+
+td, th {
+ padding-left: 0.5em ;
+ padding-right: 0.5em ;
+ vertical-align: baseline }
+
+td.docinfo-name {
+ font-weight: bold ;
+ text-align: right }
+
+td.field-name {
+ font-weight: bold }
diff --git a/numpy/f2py/docs/docutils.conf b/numpy/f2py/docs/docutils.conf
new file mode 100644
index 000000000..4e5a8425b
--- /dev/null
+++ b/numpy/f2py/docs/docutils.conf
@@ -0,0 +1,16 @@
+[general]
+
+# These entries affect all processing:
+#source-link: 1
+datestamp: %Y-%m-%d %H:%M UTC
+generator: 1
+
+# These entries affect HTML output:
+#stylesheet-path: pearu_style.css
+output-encoding: latin-1
+
+# These entries affect reStructuredText-style PEPs:
+#pep-template: pep-html-template
+#pep-stylesheet-path: stylesheets/pep.css
+#python-home: http://www.python.org
+#no-random: 1
diff --git a/numpy/f2py/docs/hello.f b/numpy/f2py/docs/hello.f
new file mode 100644
index 000000000..3e0dc6d21
--- /dev/null
+++ b/numpy/f2py/docs/hello.f
@@ -0,0 +1,7 @@
+C File hello.f
+ subroutine foo (a)
+ integer a
+ print*, "Hello from Fortran!"
+ print*, "a=",a
+ end
+
diff --git a/numpy/f2py/docs/pyforttest.pyf b/numpy/f2py/docs/pyforttest.pyf
new file mode 100644
index 000000000..79a9ae205
--- /dev/null
+++ b/numpy/f2py/docs/pyforttest.pyf
@@ -0,0 +1,5 @@
+subroutine foo(a,m,n)
+integer m = size(a,1)
+integer n = size(a,2)
+real, intent(inout) :: a(m,n)
+end subroutine foo
diff --git a/numpy/f2py/docs/pytest.py b/numpy/f2py/docs/pytest.py
new file mode 100644
index 000000000..abd3487df
--- /dev/null
+++ b/numpy/f2py/docs/pytest.py
@@ -0,0 +1,10 @@
+#File: pytest.py
+import Numeric
+def foo(a):
+ a = Numeric.array(a)
+ m,n = a.shape
+ for i in range(m):
+ for j in range(n):
+ a[i,j] = a[i,j] + 10*(i+1) + (j+1)
+ return a
+#eof
diff --git a/numpy/f2py/docs/simple.f b/numpy/f2py/docs/simple.f
new file mode 100644
index 000000000..ba468a509
--- /dev/null
+++ b/numpy/f2py/docs/simple.f
@@ -0,0 +1,13 @@
+cFile: simple.f
+ subroutine foo(a,m,n)
+ integer m,n,i,j
+ real a(m,n)
+cf2py intent(in,out) a
+cf2py intent(hide) m,n
+ do i=1,m
+ do j=1,n
+ a(i,j) = a(i,j) + 10*i+j
+ enddo
+ enddo
+ end
+cEOF
diff --git a/numpy/f2py/docs/simple_session.dat b/numpy/f2py/docs/simple_session.dat
new file mode 100644
index 000000000..10d9dc962
--- /dev/null
+++ b/numpy/f2py/docs/simple_session.dat
@@ -0,0 +1,51 @@
+>>> import pytest
+>>> import f2pytest
+>>> import pyforttest
+>>> print f2pytest.foo.__doc__
+foo - Function signature:
+ a = foo(a)
+Required arguments:
+ a : input rank-2 array('f') with bounds (m,n)
+Return objects:
+ a : rank-2 array('f') with bounds (m,n)
+
+>>> print pyforttest.foo.__doc__
+foo(a)
+
+>>> pytest.foo([[1,2],[3,4]])
+array([[12, 14],
+ [24, 26]])
+>>> f2pytest.foo([[1,2],[3,4]]) # F2PY can handle arbitrary input sequences
+array([[ 12., 14.],
+ [ 24., 26.]],'f')
+>>> pyforttest.foo([[1,2],[3,4]])
+Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+pyforttest.error: foo, argument A: Argument intent(inout) must be an array.
+
+>>> import Numeric
+>>> a=Numeric.array([[1,2],[3,4]],'f')
+>>> f2pytest.foo(a)
+array([[ 12., 14.],
+ [ 24., 26.]],'f')
+>>> a # F2PY makes a copy when input array is not Fortran contiguous
+array([[ 1., 2.],
+ [ 3., 4.]],'f')
+>>> a=Numeric.transpose(Numeric.array([[1,3],[2,4]],'f'))
+>>> a
+array([[ 1., 2.],
+ [ 3., 4.]],'f')
+>>> f2pytest.foo(a)
+array([[ 12., 14.],
+ [ 24., 26.]],'f')
+>>> a # F2PY passes Fortran contiguous input array directly to Fortran
+array([[ 12., 14.],
+ [ 24., 26.]],'f')
+# See intent(copy), intent(overwrite), intent(inplace), intent(inout)
+# attributes documentation to enhance the above behavior.
+
+>>> a=Numeric.array([[1,2],[3,4]],'f')
+>>> pyforttest.foo(a)
+>>> a # Huh? Pyfort 8.5 gives wrong results..
+array([[ 12., 23.],
+ [ 15., 26.]],'f')
diff --git a/numpy/f2py/docs/usersguide/allocarr.f90 b/numpy/f2py/docs/usersguide/allocarr.f90
new file mode 100644
index 000000000..e0d6c2ec8
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/allocarr.f90
@@ -0,0 +1,16 @@
+module mod
+ real, allocatable, dimension(:,:) :: b
+contains
+ subroutine foo
+ integer k
+ if (allocated(b)) then
+ print*, "b=["
+ do k = 1,size(b,1)
+ print*, b(k,1:size(b,2))
+ enddo
+ print*, "]"
+ else
+ print*, "b is not allocated"
+ endif
+ end subroutine foo
+end module mod
diff --git a/numpy/f2py/docs/usersguide/allocarr_session.dat b/numpy/f2py/docs/usersguide/allocarr_session.dat
new file mode 100644
index 000000000..fc91959b7
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/allocarr_session.dat
@@ -0,0 +1,27 @@
+>>> import allocarr
+>>> print allocarr.mod.__doc__
+b - 'f'-array(-1,-1), not allocated
+foo - Function signature:
+ foo()
+
+>>> allocarr.mod.foo()
+ b is not allocated
+>>> allocarr.mod.b = [[1,2,3],[4,5,6]] # allocate/initialize b
+>>> allocarr.mod.foo()
+ b=[
+ 1.000000 2.000000 3.000000
+ 4.000000 5.000000 6.000000
+ ]
+>>> allocarr.mod.b # b is Fortran-contiguous
+array([[ 1., 2., 3.],
+ [ 4., 5., 6.]],'f')
+>>> allocarr.mod.b = [[1,2,3],[4,5,6],[7,8,9]] # reallocate/initialize b
+>>> allocarr.mod.foo()
+ b=[
+ 1.000000 2.000000 3.000000
+ 4.000000 5.000000 6.000000
+ 7.000000 8.000000 9.000000
+ ]
+>>> allocarr.mod.b = None # deallocate array
+>>> allocarr.mod.foo()
+ b is not allocated
diff --git a/numpy/f2py/docs/usersguide/array.f b/numpy/f2py/docs/usersguide/array.f
new file mode 100644
index 000000000..ef20c9c20
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/array.f
@@ -0,0 +1,17 @@
+C FILE: ARRAY.F
+ SUBROUTINE FOO(A,N,M)
+C
+C INCREMENT THE FIRST ROW AND DECREMENT THE FIRST COLUMN OF A
+C
+ INTEGER N,M,I,J
+ REAL*8 A(N,M)
+Cf2py intent(in,out,copy) a
+Cf2py integer intent(hide),depend(a) :: n=shape(a,0), m=shape(a,1)
+ DO J=1,M
+ A(1,J) = A(1,J) + 1D0
+ ENDDO
+ DO I=1,N
+ A(I,1) = A(I,1) - 1D0
+ ENDDO
+ END
+C END OF FILE ARRAY.F
diff --git a/numpy/f2py/docs/usersguide/array_session.dat b/numpy/f2py/docs/usersguide/array_session.dat
new file mode 100644
index 000000000..f64933482
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/array_session.dat
@@ -0,0 +1,65 @@
+>>> import arr
+>>> from Numeric import array
+>>> print arr.foo.__doc__
+foo - Function signature:
+ a = foo(a,[overwrite_a])
+Required arguments:
+ a : input rank-2 array('d') with bounds (n,m)
+Optional arguments:
+ overwrite_a := 0 input int
+Return objects:
+ a : rank-2 array('d') with bounds (n,m)
+
+>>> a=arr.foo([[1,2,3],
+... [4,5,6]])
+copied an array using PyArray_CopyFromObject: size=6, elsize=8
+>>> print a
+[[ 1. 3. 4.]
+ [ 3. 5. 6.]]
+>>> a.iscontiguous(), arr.has_column_major_storage(a)
+(0, 1)
+>>> b=arr.foo(a) # even if a is proper-contiguous
+... # and has proper type, a copy is made
+... # forced by intent(copy) attribute
+... # to preserve its original contents
+...
+copied an array using copy_ND_array: size=6, elsize=8
+>>> print a
+[[ 1. 3. 4.]
+ [ 3. 5. 6.]]
+>>> print b
+[[ 1. 4. 5.]
+ [ 2. 5. 6.]]
+>>> b=arr.foo(a,overwrite_a=1) # a is passed directly to Fortran
+... # routine and its contents is discarded
+...
+>>> print a
+[[ 1. 4. 5.]
+ [ 2. 5. 6.]]
+>>> print b
+[[ 1. 4. 5.]
+ [ 2. 5. 6.]]
+>>> a is b # a and b are acctually the same objects
+1
+>>> print arr.foo([1,2,3]) # different rank arrays are allowed
+copied an array using PyArray_CopyFromObject: size=3, elsize=8
+[ 1. 1. 2.]
+>>> print arr.foo([[[1],[2],[3]]])
+copied an array using PyArray_CopyFromObject: size=3, elsize=8
+[ [[ 1.]
+ [ 3.]
+ [ 4.]]]
+>>>
+>>> # Creating arrays with column major data storage order:
+...
+>>> s = arr.as_column_major_storage(array([[1,2,3],[4,5,6]]))
+copied an array using copy_ND_array: size=6, elsize=4
+>>> arr.has_column_major_storage(s)
+1
+>>> print s
+[[1 2 3]
+ [4 5 6]]
+>>> s2 = arr.as_column_major_storage(s)
+>>> s2 is s # an array with column major storage order
+ # is returned immediately
+1 \ No newline at end of file
diff --git a/numpy/f2py/docs/usersguide/calculate.f b/numpy/f2py/docs/usersguide/calculate.f
new file mode 100644
index 000000000..1cda1c8dd
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/calculate.f
@@ -0,0 +1,14 @@
+ subroutine calculate(x,n)
+cf2py intent(callback) func
+ external func
+c The following lines define the signature of func for F2PY:
+cf2py real*8 y
+cf2py y = func(y)
+c
+cf2py intent(in,out,copy) x
+ integer n,i
+ real*8 x(n)
+ do i=1,n
+ x(i) = func(x(i))
+ end do
+ end
diff --git a/numpy/f2py/docs/usersguide/calculate_session.dat b/numpy/f2py/docs/usersguide/calculate_session.dat
new file mode 100644
index 000000000..2fe64f522
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/calculate_session.dat
@@ -0,0 +1,6 @@
+>>> import foo
+>>> foo.calculate(range(5), lambda x: x*x)
+array([ 0., 1., 4., 9., 16.])
+>>> import math
+>>> foo.calculate(range(5), math.exp)
+array([ 1. , 2.71828175, 7.38905621, 20.08553696, 54.59814835])
diff --git a/numpy/f2py/docs/usersguide/callback.f b/numpy/f2py/docs/usersguide/callback.f
new file mode 100644
index 000000000..6e9bfb920
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/callback.f
@@ -0,0 +1,12 @@
+C FILE: CALLBACK.F
+ SUBROUTINE FOO(FUN,R)
+ EXTERNAL FUN
+ INTEGER I
+ REAL*8 R
+Cf2py intent(out) r
+ R = 0D0
+ DO I=-5,5
+ R = R + FUN(I)
+ ENDDO
+ END
+C END OF FILE CALLBACK.F
diff --git a/numpy/f2py/docs/usersguide/callback2.pyf b/numpy/f2py/docs/usersguide/callback2.pyf
new file mode 100644
index 000000000..3d77eed24
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/callback2.pyf
@@ -0,0 +1,19 @@
+! -*- f90 -*-
+python module __user__routines
+ interface
+ function fun(i) result (r)
+ integer :: i
+ real*8 :: r
+ end function fun
+ end interface
+end python module __user__routines
+
+python module callback2
+ interface
+ subroutine foo(f,r)
+ use __user__routines, f=>fun
+ external f
+ real*8 intent(out) :: r
+ end subroutine foo
+ end interface
+end python module callback2
diff --git a/numpy/f2py/docs/usersguide/callback_session.dat b/numpy/f2py/docs/usersguide/callback_session.dat
new file mode 100644
index 000000000..cd2f26084
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/callback_session.dat
@@ -0,0 +1,23 @@
+>>> import callback
+>>> print callback.foo.__doc__
+foo - Function signature:
+ r = foo(fun,[fun_extra_args])
+Required arguments:
+ fun : call-back function
+Optional arguments:
+ fun_extra_args := () input tuple
+Return objects:
+ r : float
+Call-back functions:
+ def fun(i): return r
+ Required arguments:
+ i : input int
+ Return objects:
+ r : float
+
+>>> def f(i): return i*i
+...
+>>> print callback.foo(f)
+110.0
+>>> print callback.foo(lambda i:1)
+11.0
diff --git a/numpy/f2py/docs/usersguide/common.f b/numpy/f2py/docs/usersguide/common.f
new file mode 100644
index 000000000..b098ab20c
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/common.f
@@ -0,0 +1,13 @@
+C FILE: COMMON.F
+ SUBROUTINE FOO
+ INTEGER I,X
+ REAL A
+ COMMON /DATA/ I,X(4),A(2,3)
+ PRINT*, "I=",I
+ PRINT*, "X=[",X,"]"
+ PRINT*, "A=["
+ PRINT*, "[",A(1,1),",",A(1,2),",",A(1,3),"]"
+ PRINT*, "[",A(2,1),",",A(2,2),",",A(2,3),"]"
+ PRINT*, "]"
+ END
+C END OF COMMON.F
diff --git a/numpy/f2py/docs/usersguide/common_session.dat b/numpy/f2py/docs/usersguide/common_session.dat
new file mode 100644
index 000000000..846fdaa07
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/common_session.dat
@@ -0,0 +1,27 @@
+>>> import common
+>>> print common.data.__doc__
+i - 'i'-scalar
+x - 'i'-array(4)
+a - 'f'-array(2,3)
+
+>>> common.data.i = 5
+>>> common.data.x[1] = 2
+>>> common.data.a = [[1,2,3],[4,5,6]]
+>>> common.foo()
+ I= 5
+ X=[ 0 2 0 0]
+ A=[
+ [ 1., 2., 3.]
+ [ 4., 5., 6.]
+ ]
+>>> common.data.a[1] = 45
+>>> common.foo()
+ I= 5
+ X=[ 0 2 0 0]
+ A=[
+ [ 1., 2., 3.]
+ [ 45., 45., 45.]
+ ]
+>>> common.data.a # a is Fortran-contiguous
+array([[ 1., 2., 3.],
+ [ 45., 45., 45.]],'f')
diff --git a/numpy/f2py/docs/usersguide/compile_session.dat b/numpy/f2py/docs/usersguide/compile_session.dat
new file mode 100644
index 000000000..0d8408198
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/compile_session.dat
@@ -0,0 +1,11 @@
+>>> import f2py2e
+>>> fsource = '''
+... subroutine foo
+... print*, "Hello world!"
+... end
+... '''
+>>> f2py2e.compile(fsource,modulename='hello',verbose=0)
+0
+>>> import hello
+>>> hello.foo()
+ Hello world!
diff --git a/numpy/f2py/docs/usersguide/default.css b/numpy/f2py/docs/usersguide/default.css
new file mode 100644
index 000000000..bb7226161
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/default.css
@@ -0,0 +1,180 @@
+/*
+:Author: David Goodger
+:Contact: goodger@users.sourceforge.net
+:date: $Date: 2002/12/07 23:59:33 $
+:version: $Revision: 1.2 $
+:copyright: This stylesheet has been placed in the public domain.
+
+Default cascading style sheet for the HTML output of Docutils.
+*/
+
+body {
+ background: #FFFFFF ;
+ color: #000000
+}
+
+a.footnote-reference {
+ font-size: smaller ;
+ vertical-align: super }
+
+a.target {
+ color: blue }
+
+a.toc-backref {
+ text-decoration: none ;
+ color: black }
+
+dd {
+ margin-bottom: 0.5em }
+
+div.abstract {
+ margin: 2em 5em }
+
+div.abstract p.topic-title {
+ font-weight: bold ;
+ text-align: center }
+
+div.attention, div.caution, div.danger, div.error, div.hint,
+div.important, div.note, div.tip, div.warning {
+ margin: 2em ;
+ border: medium outset ;
+ padding: 1em }
+
+div.attention p.admonition-title, div.caution p.admonition-title,
+div.danger p.admonition-title, div.error p.admonition-title,
+div.warning p.admonition-title {
+ color: red ;
+ font-weight: bold ;
+ font-family: sans-serif }
+
+div.hint p.admonition-title, div.important p.admonition-title,
+div.note p.admonition-title, div.tip p.admonition-title {
+ font-weight: bold ;
+ font-family: sans-serif }
+
+div.dedication {
+ margin: 2em 5em ;
+ text-align: center ;
+ font-style: italic }
+
+div.dedication p.topic-title {
+ font-weight: bold ;
+ font-style: normal }
+
+div.figure {
+ margin-left: 2em }
+
+div.footer, div.header {
+ font-size: smaller }
+
+div.system-messages {
+ margin: 5em }
+
+div.system-messages h1 {
+ color: red }
+
+div.system-message {
+ border: medium outset ;
+ padding: 1em }
+
+div.system-message p.system-message-title {
+ color: red ;
+ font-weight: bold }
+
+div.topic {
+ margin: 2em }
+
+h1.title {
+ text-align: center }
+
+h2.subtitle {
+ text-align: center }
+
+hr {
+ width: 75% }
+
+ol.simple, ul.simple {
+ margin-bottom: 1em }
+
+ol.arabic {
+ list-style: decimal }
+
+ol.loweralpha {
+ list-style: lower-alpha }
+
+ol.upperalpha {
+ list-style: upper-alpha }
+
+ol.lowerroman {
+ list-style: lower-roman }
+
+ol.upperroman {
+ list-style: upper-roman }
+
+p.caption {
+ font-style: italic }
+
+p.credits {
+ font-style: italic ;
+ font-size: smaller }
+
+p.first {
+ margin-top: 0 }
+
+p.label {
+ white-space: nowrap }
+
+p.topic-title {
+ font-weight: bold }
+
+pre.literal-block, pre.doctest-block {
+ margin-left: 2em ;
+ margin-right: 2em ;
+ background-color: #ee9e9e }
+
+span.classifier {
+ font-family: sans-serif ;
+ font-style: oblique }
+
+span.classifier-delimiter {
+ font-family: sans-serif ;
+ font-weight: bold }
+
+span.field-argument {
+ font-style: italic }
+
+span.interpreted {
+ font-family: sans-serif }
+
+span.option-argument {
+ font-style: italic }
+
+span.problematic {
+ color: red }
+
+table {
+ margin-top: 0.5em ;
+ margin-bottom: 0.5em }
+
+table.citation {
+ border-left: solid thin gray ;
+ padding-left: 0.5ex }
+
+table.docinfo {
+ margin: 2em 4em }
+
+table.footnote {
+ border-left: solid thin black ;
+ padding-left: 0.5ex }
+
+td, th {
+ padding-left: 0.5em ;
+ padding-right: 0.5em ;
+ vertical-align: baseline }
+
+td.docinfo-name {
+ font-weight: bold ;
+ text-align: right }
+
+td.field-name {
+ font-weight: bold }
diff --git a/numpy/f2py/docs/usersguide/docutils.conf b/numpy/f2py/docs/usersguide/docutils.conf
new file mode 100644
index 000000000..b772fd137
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/docutils.conf
@@ -0,0 +1,16 @@
+[general]
+
+# These entries affect all processing:
+#source-link: 1
+datestamp: %Y-%m-%d %H:%M UTC
+generator: 1
+
+# These entries affect HTML output:
+#stylesheet-path: f2py_style.css
+output-encoding: latin-1
+
+# These entries affect reStructuredText-style PEPs:
+#pep-template: pep-html-template
+#pep-stylesheet-path: stylesheets/pep.css
+#python-home: http://www.python.org
+#no-random: 1
diff --git a/numpy/f2py/docs/usersguide/extcallback.f b/numpy/f2py/docs/usersguide/extcallback.f
new file mode 100644
index 000000000..9a800628e
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/extcallback.f
@@ -0,0 +1,14 @@
+ subroutine f1()
+ print *, "in f1, calling f2 twice.."
+ call f2()
+ call f2()
+ return
+ end
+
+ subroutine f2()
+cf2py intent(callback, hide) fpy
+ external fpy
+ print *, "in f2, calling f2py.."
+ call fpy()
+ return
+ end
diff --git a/numpy/f2py/docs/usersguide/extcallback_session.dat b/numpy/f2py/docs/usersguide/extcallback_session.dat
new file mode 100644
index 000000000..c22935ea0
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/extcallback_session.dat
@@ -0,0 +1,19 @@
+>>> import pfromf
+>>> pfromf.f2()
+Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+pfromf.error: Callback fpy not defined (as an argument or module pfromf attribute).
+
+>>> def f(): print "python f"
+...
+>>> pfromf.fpy = f
+>>> pfromf.f2()
+ in f2, calling f2py..
+python f
+>>> pfromf.f1()
+ in f1, calling f2 twice..
+ in f2, calling f2py..
+python f
+ in f2, calling f2py..
+python f
+>>> \ No newline at end of file
diff --git a/numpy/f2py/docs/usersguide/fib1.f b/numpy/f2py/docs/usersguide/fib1.f
new file mode 100644
index 000000000..cfbb1eea0
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/fib1.f
@@ -0,0 +1,18 @@
+C FILE: FIB1.F
+ SUBROUTINE FIB(A,N)
+C
+C CALCULATE FIRST N FIBONACCI NUMBERS
+C
+ INTEGER N
+ REAL*8 A(N)
+ DO I=1,N
+ IF (I.EQ.1) THEN
+ A(I) = 0.0D0
+ ELSEIF (I.EQ.2) THEN
+ A(I) = 1.0D0
+ ELSE
+ A(I) = A(I-1) + A(I-2)
+ ENDIF
+ ENDDO
+ END
+C END FILE FIB1.F
diff --git a/numpy/f2py/docs/usersguide/fib1.pyf b/numpy/f2py/docs/usersguide/fib1.pyf
new file mode 100644
index 000000000..3d6cc0a54
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/fib1.pyf
@@ -0,0 +1,12 @@
+! -*- f90 -*-
+python module fib2 ! in
+ interface ! in :fib2
+ subroutine fib(a,n) ! in :fib2:fib1.f
+ real*8 dimension(n) :: a
+ integer optional,check(len(a)>=n),depend(a) :: n=len(a)
+ end subroutine fib
+ end interface
+end python module fib2
+
+! This file was auto-generated with f2py (version:2.28.198-1366).
+! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/numpy/f2py/docs/usersguide/fib2.pyf b/numpy/f2py/docs/usersguide/fib2.pyf
new file mode 100644
index 000000000..4a5ae29f1
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/fib2.pyf
@@ -0,0 +1,9 @@
+! -*- f90 -*-
+python module fib2
+ interface
+ subroutine fib(a,n)
+ real*8 dimension(n),intent(out),depend(n) :: a
+ integer intent(in) :: n
+ end subroutine fib
+ end interface
+end python module fib2
diff --git a/numpy/f2py/docs/usersguide/fib3.f b/numpy/f2py/docs/usersguide/fib3.f
new file mode 100644
index 000000000..08b050cd2
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/fib3.f
@@ -0,0 +1,21 @@
+C FILE: FIB3.F
+ SUBROUTINE FIB(A,N)
+C
+C CALCULATE FIRST N FIBONACCI NUMBERS
+C
+ INTEGER N
+ REAL*8 A(N)
+Cf2py intent(in) n
+Cf2py intent(out) a
+Cf2py depend(n) a
+ DO I=1,N
+ IF (I.EQ.1) THEN
+ A(I) = 0.0D0
+ ELSEIF (I.EQ.2) THEN
+ A(I) = 1.0D0
+ ELSE
+ A(I) = A(I-1) + A(I-2)
+ ENDIF
+ ENDDO
+ END
+C END FILE FIB3.F
diff --git a/numpy/f2py/docs/usersguide/ftype.f b/numpy/f2py/docs/usersguide/ftype.f
new file mode 100644
index 000000000..cabbb9e2d
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/ftype.f
@@ -0,0 +1,9 @@
+C FILE: FTYPE.F
+ SUBROUTINE FOO(N)
+ INTEGER N
+Cf2py integer optional,intent(in) :: n = 13
+ REAL A,X
+ COMMON /DATA/ A,X(3)
+ PRINT*, "IN FOO: N=",N," A=",A," X=[",X(1),X(2),X(3),"]"
+ END
+C END OF FTYPE.F
diff --git a/numpy/f2py/docs/usersguide/ftype_session.dat b/numpy/f2py/docs/usersguide/ftype_session.dat
new file mode 100644
index 000000000..01f9febaf
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/ftype_session.dat
@@ -0,0 +1,21 @@
+>>> import ftype
+>>> print ftype.__doc__
+This module 'ftype' is auto-generated with f2py (version:2.28.198-1366).
+Functions:
+ foo(n=13)
+COMMON blocks:
+ /data/ a,x(3)
+.
+>>> type(ftype.foo),type(ftype.data)
+(<type 'fortran'>, <type 'fortran'>)
+>>> ftype.foo()
+ IN FOO: N= 13 A= 0. X=[ 0. 0. 0.]
+>>> ftype.data.a = 3
+>>> ftype.data.x = [1,2,3]
+>>> ftype.foo()
+ IN FOO: N= 13 A= 3. X=[ 1. 2. 3.]
+>>> ftype.data.x[1] = 45
+>>> ftype.foo(24)
+ IN FOO: N= 24 A= 3. X=[ 1. 45. 3.]
+>>> ftype.data.x
+array([ 1., 45., 3.],'f')
diff --git a/numpy/f2py/docs/usersguide/index.txt b/numpy/f2py/docs/usersguide/index.txt
new file mode 100644
index 000000000..9fafb99fb
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/index.txt
@@ -0,0 +1,1772 @@
+.. -*- rest -*-
+
+//////////////////////////////////////////////////////////////////////
+ F2PY Users Guide and Reference Manual
+//////////////////////////////////////////////////////////////////////
+
+:Author: Pearu Peterson
+:Contact: pearu@cens.ioc.ee
+:Web site: http://cens.ioc.ee/projects/f2py2e/
+:Date: $Date: 2005/04/02 10:03:26 $
+:Revision: $Revision: 1.27 $
+
+
+.. section-numbering::
+
+.. Contents::
+
+
+================
+ Introduction
+================
+
+The purpose of the F2PY_ --*Fortran to Python interface generator*--
+project is to provide a connection between Python and Fortran
+languages. F2PY is a Python_ package (with a command line tool
+``f2py`` and a module ``f2py2e``) that facilitates creating/building
+Python C/API extension modules that make it possible
+
+* to call Fortran 77/90/95 external subroutines and Fortran 90/95
+ module subroutines as well as C functions;
+* to access Fortran 77 ``COMMON`` blocks and Fortran 90/95 module data,
+ including allocatable arrays
+
+from Python. See F2PY_ web site for more information and installation
+instructions.
+
+======================================
+ Three ways to wrap - getting started
+======================================
+
+Wrapping Fortran or C functions to Python using F2PY consists of the
+following steps:
+
+* Creating the so-called signature file that contains descriptions of
+ wrappers to Fortran or C functions, also called as signatures of the
+ functions. In the case of Fortran routines, F2PY can create initial
+ signature file by scanning Fortran source codes and
+ catching all relevant information needed to create wrapper
+ functions.
+
+* Optionally, F2PY created signature files can be edited to optimize
+ wrappers functions, make them "smarter" and more "Pythonic".
+
+* F2PY reads a signature file and writes a Python C/API module containing
+ Fortran/C/Python bindings.
+
+* F2PY compiles all sources and builds an extension module containing
+ the wrappers. In building extension modules, F2PY uses
+ ``scipy_distutils`` that supports a number of Fortran 77/90/95
+ compilers, including Gnu, Intel,
+ Sun Fortre, SGI MIPSpro, Absoft, NAG, Compaq etc. compilers.
+
+Depending on a particular situation, these steps can be carried out
+either by just in one command or step-by-step, some steps can be
+ommited or combined with others.
+
+Below I'll describe three typical approaches of using F2PY.
+The following `example Fortran 77 code`__ will be used for
+illustration:
+
+.. include:: fib1.f
+ :literal:
+
+__ fib1.f
+
+The quick way
+==============
+
+The quickest way to wrap the Fortran subroutine ``FIB`` to Python is
+to run
+
+::
+
+ f2py -c fib1.f -m fib1
+
+This command builds (see ``-c`` flag, execute ``f2py`` without
+arguments to see the explanation of command line options) an extension
+module ``fib1.so`` (see ``-m`` flag) to the current directory. Now, in
+Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``::
+
+ >>> import Numeric
+ >>> import fib1
+ >>> print fib1.fib.__doc__
+ fib - Function signature:
+ fib(a,[n])
+ Required arguments:
+ a : input rank-1 array('d') with bounds (n)
+ Optional arguments:
+ n := len(a) input int
+
+ >>> a=Numeric.zeros(8,'d')
+ >>> fib1.fib(a)
+ >>> print a
+ [ 0. 1. 1. 2. 3. 5. 8. 13.]
+
+.. topic:: Comments
+
+ * Note that F2PY found that the second argument ``n`` is the
+ dimension of the first array argument ``a``. Since by default all
+ arguments are input-only arguments, F2PY concludes that ``n`` can
+ be optional with the default value ``len(a)``.
+
+ * One can use different values for optional ``n``::
+
+ >>> a1=Numeric.zeros(8,'d')
+ >>> fib1.fib(a1,6)
+ >>> print a1
+ [ 0. 1. 1. 2. 3. 5. 0. 0.]
+
+ but an exception is raised when it is incompatible with the input
+ array ``a``::
+
+ >>> fib1.fib(a,10)
+ fib:n=10
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ fib.error: (len(a)>=n) failed for 1st keyword n
+ >>>
+
+ This demonstrates one of the useful features in F2PY, that it,
+ F2PY implements basic compatibility checks between related
+ arguments in order to avoid any unexpected crashes.
+
+ * When a Numeric array, that is Fortran contiguous and has a typecode
+ corresponding to presumed Fortran type, is used as an input array
+ argument, then its C pointer is directly passed to Fortran.
+
+ Otherwise F2PY makes a contiguous copy (with a proper typecode) of
+ the input array and passes C pointer of the copy to Fortran
+ subroutine. As a result, any possible changes to the (copy of)
+ input array have no effect to the original argument, as
+ demonstrated below::
+
+ >>> a=Numeric.ones(8,'i')
+ >>> fib1.fib(a)
+ >>> print a
+ [1 1 1 1 1 1 1 1]
+
+ Clearly, this is not an expected behaviour. The fact that the
+ above example worked with ``typecode='d'`` is considered
+ accidental.
+
+ F2PY provides ``intent(inplace)`` attribute that would modify
+ the attributes of an input array so that any changes made by
+ Fortran routine will be effective also in input argument. For example,
+ if one specifies ``intent(inplace) a`` (see below, how), then
+ the example above would read:
+
+ >>> a=Numeric.ones(8,'i')
+ >>> fib1.fib(a)
+ >>> print a
+ [ 0. 1. 1. 2. 3. 5. 8. 13.]
+
+ However, the recommended way to get changes made by Fortran
+ subroutine back to python is to use ``intent(out)`` attribute. It
+ is more efficient and a cleaner solution.
+
+ * The usage of ``fib1.fib`` in Python is very similar to using
+ ``FIB`` in Fortran. However, using *in situ* output arguments in
+ Python indicates a poor style as there is no safety mechanism
+ in Python with respect to wrong argument types. When using Fortran
+ or C, compilers naturally discover any type mismatches during
+ compile time but in Python the types must be checked in
+ runtime. So, using *in situ* output arguments in Python may cause
+ difficult to find bugs, not to mention that the codes will be less
+ readable when all required type checks are implemented.
+
+ Though the demonstrated way of wrapping Fortran routines to Python
+ is very straightforward, it has several drawbacks (see the comments
+ above). These drawbacks are due to the fact that there is no way
+ that F2PY can determine what is the acctual intention of one or the
+ other argument, is it input or output argument, or both, or
+ something else. So, F2PY conservatively assumes that all arguments
+ are input arguments by default.
+
+ However, there are ways (see below) how to "teach" F2PY about the
+ true intentions (among other things) of function arguments; and then
+ F2PY is able to generate more Pythonic (more explicit, easier to
+ use, and less error prone) wrappers to Fortran functions.
+
+The smart way
+==============
+
+Let's apply the steps of wrapping Fortran functions to Python one by
+one.
+
+* First, we create a signature file from ``fib1.f`` by running
+
+ ::
+
+ f2py fib1.f -m fib2 -h fib1.pyf
+
+ The signature file is saved to ``fib1.pyf`` (see ``-h`` flag) and
+ its contents is shown below.
+
+ .. include:: fib1.pyf
+ :literal:
+
+* Next, we'll teach F2PY that the argument ``n`` is a input argument
+ (use ``intent(in)`` attribute) and that the result, i.e. the
+ contents of ``a`` after calling Fortran function ``FIB``, should be
+ returned to Python (use ``intent(out)`` attribute). In addition, an
+ array ``a`` should be created dynamically using the size given by
+ the input argument ``n`` (use ``depend(n)`` attribute to indicate
+ dependence relation).
+
+ The content of a modified version of ``fib1.pyf`` (saved as
+ ``fib2.pyf``) is as follows:
+
+ .. include:: fib2.pyf
+ :literal:
+
+* And finally, we build the extension module by running
+
+ ::
+
+ f2py -c fib2.pyf fib1.f
+
+In Python::
+
+ >>> import fib2
+ >>> print fib2.fib.__doc__
+ fib - Function signature:
+ a = fib(n)
+ Required arguments:
+ n : input int
+ Return objects:
+ a : rank-1 array('d') with bounds (n)
+
+ >>> print fib2.fib(8)
+ [ 0. 1. 1. 2. 3. 5. 8. 13.]
+
+.. topic:: Comments
+
+ * Clearly, the signature of ``fib2.fib`` now corresponds to the
+ intention of Fortran subroutine ``FIB`` more closely: given the
+ number ``n``, ``fib2.fib`` returns the first ``n`` Fibonacci numbers
+ as a Numeric array. Also, the new Python signature ``fib2.fib``
+ rules out any surprises that we experienced with ``fib1.fib``.
+
+ * Note that by default using single ``intent(out)`` also implies
+ ``intent(hide)``. Argument that has ``intent(hide)`` attribute
+ specified, will not be listed in the argument list of a wrapper
+ function.
+
+The quick and smart way
+========================
+
+The "smart way" of wrapping Fortran functions, as explained above, is
+suitable for wrapping (e.g. third party) Fortran codes for which
+modifications to their source codes are not desirable nor even
+possible.
+
+However, if editing Fortran codes is acceptable, then the generation
+of an intermediate signature file can be skipped in most
+cases. Namely, F2PY specific attributes can be inserted directly to
+Fortran source codes using the so-called F2PY directive. A F2PY
+directive defines special comment lines (starting with ``Cf2py``, for
+example) which are ignored by Fortran compilers but F2PY interprets
+them as normal lines.
+
+Here is shown a `modified version of the example Fortran code`__, saved
+as ``fib3.f``:
+
+.. include:: fib3.f
+ :literal:
+
+__ fib3.f
+
+Building the extension module can be now carried out in one command::
+
+ f2py -c -m fib3 fib3.f
+
+Notice that the resulting wrapper to ``FIB`` is as "smart" as in
+previous case::
+
+ >>> import fib3
+ >>> print fib3.fib.__doc__
+ fib - Function signature:
+ a = fib(n)
+ Required arguments:
+ n : input int
+ Return objects:
+ a : rank-1 array('d') with bounds (n)
+
+ >>> print fib3.fib(8)
+ [ 0. 1. 1. 2. 3. 5. 8. 13.]
+
+
+==================
+ Signature file
+==================
+
+The syntax specification for signature files (.pyf files) is borrowed
+from the Fortran 90/95 language specification. Almost all Fortran
+90/95 standard constructs are understood, both in free and fixed
+format (recall that Fortran 77 is a subset of Fortran 90/95). F2PY
+introduces also some extensions to Fortran 90/95 language
+specification that help designing Fortran to Python interface, make it
+more "Pythonic".
+
+Signature files may contain arbitrary Fortran code (so that Fortran
+codes can be considered as signature files). F2PY silently ignores
+Fortran constructs that are irrelevant for creating the interface.
+However, this includes also syntax errors. So, be careful not making
+ones;-).
+
+In general, the contents of signature files is case-sensitive. When
+scanning Fortran codes and writing a signature file, F2PY lowers all
+cases automatically except in multi-line blocks or when ``--no-lower``
+option is used.
+
+The syntax of signature files is overvied below.
+
+Python module block
+=====================
+
+A signature file may contain one (recommended) or more ``python
+module`` blocks. ``python module`` block describes the contents of
+a Python/C extension module ``<modulename>module.c`` that F2PY
+generates.
+
+Exception: if ``<modulename>`` contains a substring ``__user__``, then
+the corresponding ``python module`` block describes the signatures of
+so-called call-back functions (see `Call-back arguments`_).
+
+A ``python module`` block has the following structure::
+
+ python module <modulename>
+ [<usercode statement>]...
+ [
+ interface
+ <usercode statement>
+ <Fortran block data signatures>
+ <Fortran/C routine signatures>
+ end [interface]
+ ]...
+ [
+ interface
+ module <F90 modulename>
+ [<F90 module data type declarations>]
+ [<F90 module routine signatures>]
+ end [module [<F90 modulename>]]
+ end [interface]
+ ]...
+ end [python module [<modulename>]]
+
+Here brackets ``[]`` indicate a optional part, dots ``...`` indicate
+one or more of a previous part. So, ``[]...`` reads zero or more of a
+previous part.
+
+
+Fortran/C routine signatures
+=============================
+
+The signature of a Fortran routine has the following structure::
+
+ [<typespec>] function | subroutine <routine name> \
+ [ ( [<arguments>] ) ] [ result ( <entityname> ) ]
+ [<argument/variable type declarations>]
+ [<argument/variable attribute statements>]
+ [<use statements>]
+ [<common block statements>]
+ [<other statements>]
+ end [ function | subroutine [<routine name>] ]
+
+From a Fortran routine signature F2PY generates a Python/C extension
+function that has the following signature::
+
+ def <routine name>(<required arguments>[,<optional arguments>]):
+ ...
+ return <return variables>
+
+The signature of a Fortran block data has the following structure::
+
+ block data [ <block data name> ]
+ [<variable type declarations>]
+ [<variable attribute statements>]
+ [<use statements>]
+ [<common block statements>]
+ [<include statements>]
+ end [ block data [<block data name>] ]
+
+Type declarations
+-------------------
+
+ The definition of the ``<argument/variable type declaration>`` part
+ is
+
+ ::
+
+ <typespec> [ [<attrspec>] :: ] <entitydecl>
+
+ where
+
+ ::
+
+ <typespec> := byte | character [<charselector>]
+ | complex [<kindselector>] | real [<kindselector>]
+ | double complex | double precision
+ | integer [<kindselector>] | logical [<kindselector>]
+
+ <charselector> := * <charlen>
+ | ( [len=] <len> [ , [kind=] <kind>] )
+ | ( kind= <kind> [ , len= <len> ] )
+ <kindselector> := * <intlen> | ( [kind=] <kind> )
+
+ <entitydecl> := <name> [ [ * <charlen> ] [ ( <arrayspec> ) ]
+ | [ ( <arrayspec> ) ] * <charlen> ]
+ | [ / <init_expr> / | = <init_expr> ] \
+ [ , <entitydecl> ]
+
+ and
+
+ + ``<attrspec>`` is a comma separated list of attributes_;
+
+ + ``<arrayspec>`` is a comma separated list of dimension bounds;
+
+ + ``<init_expr>`` is a `C expression`__.
+
+ + ``<intlen>`` may be negative integer for ``integer`` type
+ specifications. In such cases ``integer*<negintlen>`` represents
+ unsigned C integers.
+
+__ `C expressions`_
+
+ If an argument has no ``<argument type declaration>``, its type is
+ determined by applying ``implicit`` rules to its name.
+
+
+Statements
+------------
+
+Attribute statements:
+
+ The ``<argument/variable attribute statement>`` is
+ ``<argument/variable type declaration>`` without ``<typespec>``.
+ In addition, in an attribute statement one cannot use other
+ attributes, also ``<entitydecl>`` can be only a list of names.
+
+Use statements:
+
+ The definition of the ``<use statement>`` part is
+
+ ::
+
+ use <modulename> [ , <rename_list> | , ONLY : <only_list> ]
+
+ where
+
+ ::
+
+ <rename_list> := <local_name> => <use_name> [ , <rename_list> ]
+
+ Currently F2PY uses ``use`` statement only for linking call-back
+ modules and ``external`` arguments (call-back functions), see
+ `Call-back arguments`_.
+
+Common block statements:
+
+ The definition of the ``<common block statement>`` part is
+
+ ::
+
+ common / <common name> / <shortentitydecl>
+
+ where
+
+ ::
+
+ <shortentitydecl> := <name> [ ( <arrayspec> ) ] [ , <shortentitydecl> ]
+
+ One ``python module`` block should not contain two or more
+ ``common`` blocks with the same name. Otherwise, the latter ones are
+ ignored. The types of variables in ``<shortentitydecl>`` are defined
+ using ``<argument type declarations>``. Note that the corresponding
+ ``<argument type declarations>`` may contain array specifications;
+ then you don't need to specify these in ``<shortentitydecl>``.
+
+Other statements:
+
+ The ``<other statement>`` part refers to any other Fortran language
+ constructs that are not described above. F2PY ignores most of them
+ except
+
+ + ``call`` statements and function calls of ``external`` arguments
+ (`more details`__?);
+
+__ external_
+
+ + ``include`` statements
+
+ ::
+
+ include '<filename>'
+ include "<filename>"
+
+ If a file ``<filename>`` does not exist, the ``include``
+ statement is ignored. Otherwise, the file ``<filename>`` is
+ included to a signature file. ``include`` statements can be used
+ in any part of a signature file, also outside the Fortran/C
+ routine signature blocks.
+
+ + ``implicit`` statements
+
+ ::
+
+ implicit none
+ implicit <list of implicit maps>
+
+ where
+
+ ::
+
+ <implicit map> := <typespec> ( <list of letters or range of letters> )
+
+ Implicit rules are used to deterimine the type specification of
+ a variable (from the first-letter of its name) if the variable
+ is not defined using ``<variable type declaration>``. Default
+ implicit rule is given by
+
+ ::
+
+ implicit real (a-h,o-z,$_), integer (i-m)
+
+ + ``entry`` statements
+
+ ::
+
+ entry <entry name> [([<arguments>])]
+
+ F2PY generates wrappers to all entry names using the signature
+ of the routine block.
+
+ Tip: ``entry`` statement can be used to describe the signature
+ of an arbitrary routine allowing F2PY to generate a number of
+ wrappers from only one routine block signature. There are few
+ restrictions while doing this: ``fortranname`` cannot be used,
+ ``callstatement`` and ``callprotoargument`` can be used only if
+ they are valid for all entry routines, etc.
+
+ In addition, F2PY introduces the following statements:
+
+ + ``threadsafe``
+ Use ``Py_BEGIN_ALLOW_THREADS .. Py_END_ALLOW_THREADS`` block
+ around the call to Fortran/C function.
+
+ + ``callstatement <C-expr|multi-line block>``
+ Replace F2PY generated call statement to Fortran/C function with
+ ``<C-expr|multi-line block>``. The wrapped Fortran/C function
+ is available as ``(*f2py_func)``. To raise an exception, set
+ ``f2py_success = 0`` in ``<C-expr|multi-line block>``.
+
+ + ``callprotoargument <C-typespecs>``
+ When ``callstatement`` statement is used then F2PY may not
+ generate proper prototypes for Fortran/C functions (because
+ ``<C-expr>`` may contain any function calls and F2PY has no way
+ to determine what should be the proper prototype). With this
+ statement you can explicitely specify the arguments of the
+ corresponding prototype::
+
+ extern <return type> FUNC_F(<routine name>,<ROUTINE NAME>)(<callprotoargument>);
+
+ + ``fortranname [<acctual Fortran/C routine name>]``
+ You can use arbitrary ``<routine name>`` for a given Fortran/C
+ function. Then you have to specify
+ ``<acctual Fortran/C routine name>`` with this statement.
+
+ If ``fortranname`` statement is used without
+ ``<acctual Fortran/C routine name>`` then a dummy wrapper is
+ generated.
+
+ + ``usercode <multi-line block>``
+ When used inside ``python module`` block, then given C code
+ will be inserted to generated C/API source just before
+ wrapper function definitions. Here you can define arbitrary
+ C functions to be used in initialization of optional arguments,
+ for example. If ``usercode`` is used twise inside ``python
+ module`` block then the second multi-line block is inserted
+ after the definition of external routines.
+
+ When used inside ``<routine singature>``, then given C code will
+ be inserted to the corresponding wrapper function just after
+ declaring variables but before any C statements. So, ``usercode``
+ follow-up can contain both declarations and C statements.
+
+ When used inside the first ``interface`` block, then given C
+ code will be inserted at the end of the initialization
+ function of the extension module. Here you can modify extension
+ modules dictionary. For example, for defining additional
+ variables etc.
+
+ + ``pymethoddef <multi-line block>``
+ Multiline block will be inserted to the definition of
+ module methods ``PyMethodDef``-array. It must be a
+ comma-separated list of C arrays (see `Extending and Embedding`__
+ Python documentation for details).
+ ``pymethoddef`` statement can be used only inside
+ ``python module`` block.
+
+ __ http://www.python.org/doc/current/ext/ext.html
+
+Attributes
+------------
+
+The following attributes are used by F2PY:
+
+``optional``
+ The corresponding argument is moved to the end of ``<optional
+ arguments>`` list. A default value for an optional argument can be
+ specified ``<init_expr>``, see ``entitydecl`` definition. Note that
+ the default value must be given as a valid C expression.
+
+ Note that whenever ``<init_expr>`` is used, ``optional`` attribute
+ is set automatically by F2PY.
+
+ For an optional array argument, all its dimensions must be bounded.
+
+``required``
+ The corresponding argument is considered as a required one. This is
+ default. You need to specify ``required`` only if there is a need to
+ disable automatic ``optional`` setting when ``<init_expr>`` is used.
+
+ If Python ``None`` object is used as an required argument, the
+ argument is treated as optional. That is, in the case of array
+ argument, the memory is allocated. And if ``<init_expr>`` is given,
+ the corresponding initialization is carried out.
+
+``dimension(<arrayspec>)``
+ The corresponding variable is considered as an array with given
+ dimensions in ``<arrayspec>``.
+
+``intent(<intentspec>)``
+ This specifies the "intention" of the corresponding
+ argument. ``<intentspec>`` is a comma separated list of the
+ following keys:
+
+ + ``in``
+ The argument is considered as an input-only argument. It means
+ that the value of the argument is passed to Fortran/C function and
+ that function is expected not to change the value of an argument.
+
+ + ``inout``
+ The argument is considered as an input/output or *in situ*
+ output argument. ``intent(inout)`` arguments can be only
+ "contiguous" Numeric arrays with proper type and size. Here
+ "contiguous" can be either in Fortran or C sense. The latter one
+ coincides with the contiguous concept used in Numeric and is
+ effective only if ``intent(c)`` is used. Fortran-contiguousness
+ is assumed by default.
+
+ Using ``intent(inout)`` is generally not recommended, use
+ ``intent(in,out)`` instead. See also ``intent(inplace)`` attribute.
+
+ + ``inplace``
+ The argument is considered as an input/output or *in situ*
+ output argument. ``intent(inplace)`` arguments must be
+ Numeric arrays with proper size. If the type of an array is
+ not "proper" or the array is non-contiguous then the array
+ will be changed in-place to fix the type and make it contiguous.
+
+ Using ``intent(inplace)`` is generally not recommended either.
+ For example, when slices have been taken from an
+ ``intent(inplace)`` argument then after in-place changes,
+ slices data pointers may point to unallocated memory area.
+
+ + ``out``
+ The argument is considered as an return variable. It is appended
+ to the ``<returned variables>`` list. Using ``intent(out)``
+ sets ``intent(hide)`` automatically, unless also
+ ``intent(in)`` or ``intent(inout)`` were used.
+
+ By default, returned multidimensional arrays are
+ Fortran-contiguous. If ``intent(c)`` is used, then returned
+ multi-dimensional arrays are C-contiguous.
+
+ + ``hide``
+ The argument is removed from the list of required or optional
+ arguments. Typically ``intent(hide)`` is used with ``intent(out)``
+ or when ``<init_expr>`` completely determines the value of the
+ argument like in the following example::
+
+ integer intent(hide),depend(a) :: n = len(a)
+ real intent(in),dimension(n) :: a
+
+ + ``c``
+ The argument is treated as a C scalar or C array argument. In
+ the case of a scalar argument, its value is passed to C function
+ as a C scalar argument (recall that Fortran scalar arguments are
+ actually C pointer arguments). In the case of an array
+ argument, the wrapper function is assumed to treat
+ multi-dimensional arrays as C-contiguous arrays.
+
+ There is no need to use ``intent(c)`` for one-dimensional
+ arrays, no matter if the wrapped function is either a Fortran or
+ a C function. This is because the concepts of Fortran- and
+ C-contiguousness overlap in one-dimensional cases.
+
+ If ``intent(c)`` is used as an statement but without entity
+ declaration list, then F2PY adds ``intent(c)`` attibute to all
+ arguments.
+
+ Also, when wrapping C functions, one must use ``intent(c)``
+ attribute for ``<routine name>`` in order to disable Fortran
+ specific ``F_FUNC(..,..)`` macros.
+
+ + ``cache``
+ The argument is treated as a junk of memory. No Fortran nor C
+ contiguousness checks are carried out. Using ``intent(cache)``
+ makes sense only for array arguments, also in connection with
+ ``intent(hide)`` or ``optional`` attributes.
+
+ + ``copy``
+ Ensure that the original contents of ``intent(in)`` argument is
+ preserved. Typically used in connection with ``intent(in,out)``
+ attribute. F2PY creates an optional argument
+ ``overwrite_<argument name>`` with the default value ``0``.
+
+ + ``overwrite``
+ The original contents of the ``intent(in)`` argument may be
+ altered by the Fortran/C function. F2PY creates an optional
+ argument ``overwrite_<argument name>`` with the default value
+ ``1``.
+
+ + ``out=<new name>``
+ Replace the return name with ``<new name>`` in the ``__doc__``
+ string of a wrapper function.
+
+ + ``callback``
+ Construct an external function suitable for calling Python function
+ from Fortran. ``intent(callback)`` must be specified before the
+ corresponding ``external`` statement. If 'argument' is not in
+ argument list then it will be added to Python wrapper but only
+ initializing external function.
+
+ Use ``intent(callback)`` in situations where a Fortran/C code
+ assumes that a user implements a function with given prototype
+ and links it to an executable. Don't use ``intent(callback)``
+ if function appears in the argument list of a Fortran routine.
+
+ With ``intent(hide)`` or ``optional`` attributes specified and
+ using a wrapper function without specifying the callback argument
+ in argument list then call-back function is looked in the
+ namespace of F2PY generated extension module where it can be
+ set as a module attribute by a user.
+
+ + ``aux``
+ Define auxiliary C variable in F2PY generated wrapper function.
+ Useful to save parameter values so that they can be accessed
+ in initialization expression of other variables. Note that
+ ``intent(aux)`` silently implies ``intent(c)``.
+
+ The following rules apply:
+
+ + If no ``intent(in | inout | out | hide)`` is specified,
+ ``intent(in)`` is assumed.
+ + ``intent(in,inout)`` is ``intent(in)``.
+ + ``intent(in,hide)`` or ``intent(inout,hide)`` is
+ ``intent(hide)``.
+ + ``intent(out)`` is ``intent(out,hide)`` unless ``intent(in)`` or
+ ``intent(inout)`` is specified.
+ + If ``intent(copy)`` or ``intent(overwrite)`` is used, then an
+ additional optional argument is introduced with a name
+ ``overwrite_<argument name>`` and a default value 0 or 1, respectively.
+ + ``intent(inout,inplace)`` is ``intent(inplace)``.
+ + ``intent(in,inplace)`` is ``intent(inplace)``.
+ + ``intent(hide)`` disables ``optional`` and ``required``.
+
+``check([<C-booleanexpr>])``
+ Perform consistency check of arguments by evaluating
+ ``<C-booleanexpr>``; if ``<C-booleanexpr>`` returns 0, an exception
+ is raised.
+
+ If ``check(..)`` is not used then F2PY generates few standard checks
+ (e.g. in a case of an array argument, check for the proper shape
+ and size) automatically. Use ``check()`` to disable checks generated
+ by F2PY.
+
+``depend([<names>])``
+ This declares that the corresponding argument depends on the values
+ of variables in the list ``<names>``. For example, ``<init_expr>``
+ may use the values of other arguments. Using information given by
+ ``depend(..)`` attributes, F2PY ensures that arguments are
+ initialized in a proper order. If ``depend(..)`` attribute is not
+ used then F2PY determines dependence relations automatically. Use
+ ``depend()`` to disable dependence relations generated by F2PY.
+
+ When you edit dependence relations that were initially generated by
+ F2PY, be careful not to break the dependence relations of other
+ relevant variables. Another thing to watch out is cyclic
+ dependencies. F2PY is able to detect cyclic dependencies
+ when constructing wrappers and it complains if any are found.
+
+``allocatable``
+ The corresponding variable is Fortran 90 allocatable array defined
+ as Fortran 90 module data.
+
+.. _external:
+
+``external``
+ The corresponding argument is a function provided by user. The
+ signature of this so-called call-back function can be defined
+
+ - in ``__user__`` module block,
+ - or by demonstrative (or real, if the signature file is a real Fortran
+ code) call in the ``<other statements>`` block.
+
+ For example, F2PY generates from
+
+ ::
+
+ external cb_sub, cb_fun
+ integer n
+ real a(n),r
+ call cb_sub(a,n)
+ r = cb_fun(4)
+
+ the following call-back signatures::
+
+ subroutine cb_sub(a,n)
+ real dimension(n) :: a
+ integer optional,check(len(a)>=n),depend(a) :: n=len(a)
+ end subroutine cb_sub
+ function cb_fun(e_4_e) result (r)
+ integer :: e_4_e
+ real :: r
+ end function cb_fun
+
+ The corresponding user-provided Python function are then::
+
+ def cb_sub(a,[n]):
+ ...
+ return
+ def cb_fun(e_4_e):
+ ...
+ return r
+
+ See also ``intent(callback)`` attribute.
+
+``parameter``
+ The corresponding variable is a parameter and it must have a fixed
+ value. F2PY replaces all parameter occurrences by their
+ corresponding values.
+
+Extensions
+============
+
+F2PY directives
+-----------------
+
+The so-called F2PY directives allow using F2PY signature file
+constructs also in Fortran 77/90 source codes. With this feature you
+can skip (almost) completely intermediate signature file generations
+and apply F2PY directly to Fortran source codes.
+
+F2PY directive has the following form::
+
+ <comment char>f2py ...
+
+where allowed comment characters for fixed and free format Fortran
+codes are ``cC*!#`` and ``!``, respectively. Everything that follows
+``<comment char>f2py`` is ignored by a compiler but read by F2PY as a
+normal Fortran (non-comment) line:
+
+ When F2PY finds a line with F2PY directive, the directive is first
+ replaced by 5 spaces and then the line is reread.
+
+For fixed format Fortran codes, ``<comment char>`` must be at the
+first column of a file, of course. For free format Fortran codes,
+F2PY directives can appear anywhere in a file.
+
+C expressions
+--------------
+
+C expressions are used in the following parts of signature files:
+
+* ``<init_expr>`` of variable initialization;
+* ``<C-booleanexpr>`` of the ``check`` attribute;
+* ``<arrayspec> of the ``dimension`` attribute;
+* ``callstatement`` statement, here also a C multi-line block can be used.
+
+A C expression may contain:
+
+* standard C constructs;
+* functions from ``math.h`` and ``Python.h``;
+* variables from the argument list, presumably initialized before
+ according to given dependence relations;
+* the following CPP macros:
+
+ ``rank(<name>)``
+ Returns the rank of an array ``<name>``.
+ ``shape(<name>,<n>)``
+ Returns the ``<n>``-th dimension of an array ``<name>``.
+ ``len(<name>)``
+ Returns the lenght of an array ``<name>``.
+ ``size(<name>)``
+ Returns the size of an array ``<name>``.
+ ``slen(<name>)``
+ Returns the length of a string ``<name>``.
+
+For initializing an array ``<array name>``, F2PY generates a loop over
+all indices and dimensions that executes the following
+pseudo-statement::
+
+ <array name>(_i[0],_i[1],...) = <init_expr>;
+
+where ``_i[<i>]`` refers to the ``<i>``-th index value and that runs
+from ``0`` to ``shape(<array name>,<i>)-1``.
+
+For example, a function ``myrange(n)`` generated from the following
+signature
+
+::
+
+ subroutine myrange(a,n)
+ fortranname ! myrange is a dummy wrapper
+ integer intent(in) :: n
+ real*8 intent(c,out),dimension(n),depend(n) :: a = _i[0]
+ end subroutine myrange
+
+is equivalent to ``Numeric.arange(n,typecode='d')``.
+
+.. topic:: Warning!
+
+ F2PY may lower cases also in C expressions when scanning Fortran codes
+ (see ``--[no]-lower`` option).
+
+Multi-line blocks
+------------------
+
+A multi-line block starts with ``'''`` (triple single-quotes) and ends
+with ``'''`` in some *strictly* subsequent line. Multi-line blocks can
+be used only within .pyf files. The contents of a multi-line block can
+be arbitrary (except that it cannot contain ``'''``) and no
+transformations (e.g. lowering cases) are applied to it.
+
+Currently, multi-line blocks can be used in the following constructs:
+
++ as a C expression of the ``callstatement`` statement;
+
++ as a C type specification of the ``callprotoargument`` statement;
+
++ as a C code block of the ``usercode`` statement;
+
++ as a list of C arrays of the ``pymethoddef`` statement;
+
++ as documentation string.
+
+==================================
+Using F2PY bindings in Python
+==================================
+
+All wrappers (to Fortran/C routines or to common blocks or to Fortran
+90 module data) generated by F2PY are exposed to Python as ``fortran``
+type objects. Routine wrappers are callable ``fortran`` type objects
+while wrappers to Fortran data have attributes referring to data
+objects.
+
+All ``fortran`` type object have attribute ``_cpointer`` that contains
+CObject referring to the C pointer of the corresponding Fortran/C
+function or variable in C level. Such CObjects can be used as an
+callback argument of F2PY generated functions to bypass Python C/API
+layer of calling Python functions from Fortran or C when the
+computational part of such functions is implemented in C or Fortran
+and wrapped with F2PY (or any other tool capable of providing CObject
+of a function).
+
+.. topic:: Example
+
+ Consider a `Fortran 77 file`__ ``ftype.f``:
+
+ .. include:: ftype.f
+ :literal:
+
+ and build a wrapper using::
+
+ f2py -c ftype.f -m ftype
+
+ __ ftype.f
+
+ In Python:
+
+ .. include:: ftype_session.dat
+ :literal:
+
+
+Scalar arguments
+=================
+
+In general, a scalar argument of a F2PY generated wrapper function can
+be ordinary Python scalar (integer, float, complex number) as well as
+an arbitrary sequence object (list, tuple, array, string) of
+scalars. In the latter case, the first element of the sequence object
+is passed to Fortran routine as a scalar argument.
+
+Note that when type-casting is required and there is possible loss of
+information (e.g. when type-casting float to integer or complex to
+float), F2PY does not raise any exception. In complex to real
+type-casting only the real part of a complex number is used.
+
+``intent(inout)`` scalar arguments are assumed to be array objects in
+order to *in situ* changes to be effective. It is recommended to use
+arrays with proper type but also other types work.
+
+.. topic:: Example
+
+ Consider the following `Fortran 77 code`__:
+
+ .. include:: scalar.f
+ :literal:
+
+ and wrap it using ``f2py -c -m scalar scalar.f``.
+
+ __ scalar.f
+
+ In Python:
+
+ .. include:: scalar_session.dat
+ :literal:
+
+
+String arguments
+=================
+
+F2PY generated wrapper functions accept (almost) any Python object as
+a string argument, ``str`` is applied for non-string objects.
+Exceptions are Numeric arrays that must have type code ``'c'`` or
+``'1'`` when used as string arguments.
+
+A string can have arbitrary length when using it as a string argument
+to F2PY generated wrapper function. If the length is greater than
+expected, the string is truncated. If the length is smaller that
+expected, additional memory is allocated and filled with ``\0``.
+
+Because Python strings are immutable, an ``intent(inout)`` argument
+expects an array version of a string in order to *in situ* changes to
+be effective.
+
+.. topic:: Example
+
+ Consider the following `Fortran 77 code`__:
+
+ .. include:: string.f
+ :literal:
+
+ and wrap it using ``f2py -c -m mystring string.f``.
+
+ __ string.f
+
+ Python session:
+
+ .. include:: string_session.dat
+ :literal:
+
+
+Array arguments
+================
+
+In general, array arguments of F2PY generated wrapper functions accept
+arbitrary sequences that can be transformed to Numeric array objects.
+An exception is ``intent(inout)`` array arguments that always must be
+proper-contiguous and have proper type, otherwise an exception is
+raised. Another exception is ``intent(inplace)`` array arguments that
+attributes will be changed in-situ if the argument has different type
+than expected (see ``intent(inplace)`` attribute for more
+information).
+
+In general, if a Numeric array is proper-contiguous and has a proper
+type then it is directly passed to wrapped Fortran/C function.
+Otherwise, an element-wise copy of an input array is made and the
+copy, being proper-contiguous and with proper type, is used as an
+array argument.
+
+There are two types of proper-contiguous Numeric arrays:
+
+* Fortran-contiguous arrays when data is stored column-wise,
+ i.e. indexing of data as stored in memory starts from the lowest
+ dimension;
+* C-contiguous or simply contiguous arrays when data is stored
+ row-wise, i.e. indexing of data as stored in memory starts from the
+ highest dimension.
+
+For one-dimensional arrays these notions coincide.
+
+For example, an 2x2 array ``A`` is Fortran-contiguous if its elements
+are stored in memory in the following order::
+
+ A[0,0] A[1,0] A[0,1] A[1,1]
+
+and C-contiguous if the order is as follows::
+
+ A[0,0] A[0,1] A[1,0] A[1,1]
+
+To test whether an array is C-contiguous, use ``.iscontiguous()``
+method of Numeric arrays. To test for Fortran-contiguousness, all
+F2PY generated extension modules provide a function
+``has_column_major_storage(<array>)``. This function is equivalent to
+``Numeric.transpose(<array>).iscontiguous()`` but more efficient.
+
+Usually there is no need to worry about how the arrays are stored in
+memory and whether the wrapped functions, being either Fortran or C
+functions, assume one or another storage order. F2PY automatically
+ensures that wrapped functions get arguments with proper storage
+order; the corresponding algorithm is designed to make copies of
+arrays only when absolutely necessary. However, when dealing with very
+large multi-dimensional input arrays with sizes close to the size of
+the physical memory in your computer, then a care must be taken to use
+always proper-contiguous and proper type arguments.
+
+To transform input arrays to column major storage order before passing
+them to Fortran routines, use a function
+``as_column_major_storage(<array>)`` that is provided by all F2PY
+generated extension modules.
+
+.. topic:: Example
+
+ Consider `Fortran 77 code`__:
+
+ .. include:: array.f
+ :literal:
+
+ and wrap it using ``f2py -c -m arr array.f -DF2PY_REPORT_ON_ARRAY_COPY=1``.
+
+ __ array.f
+
+ In Python:
+
+ .. include:: array_session.dat
+ :literal:
+
+Call-back arguments
+====================
+
+F2PY supports calling Python functions from Fortran or C codes.
+
+
+.. topic:: Example
+
+ Consider the following `Fortran 77 code`__
+
+ .. include:: callback.f
+ :literal:
+
+ and wrap it using ``f2py -c -m callback callback.f``.
+
+ __ callback.f
+
+ In Python:
+
+ .. include:: callback_session.dat
+ :literal:
+
+In the above example F2PY was able to guess accurately the signature
+of a call-back function. However, sometimes F2PY cannot establish the
+signature as one would wish and then the signature of a call-back
+function must be modified in the signature file manually. Namely,
+signature files may contain special modules (the names of such modules
+contain a substring ``__user__``) that collect various signatures of
+call-back functions. Callback arguments in routine signatures have
+attribute ``external`` (see also ``intent(callback)`` attribute). To
+relate a callback argument and its signature in ``__user__`` module
+block, use ``use`` statement as illustrated below. The same signature
+of a callback argument can be referred in different routine
+signatures.
+
+.. topic:: Example
+
+ We use the same `Fortran 77 code`__ as in previous example but now
+ we'll pretend that F2PY was not able to guess the signatures of
+ call-back arguments correctly. First, we create an initial signature
+ file ``callback2.pyf`` using F2PY::
+
+ f2py -m callback2 -h callback2.pyf callback.f
+
+ Then modify it as follows
+
+ .. include:: callback2.pyf
+ :literal:
+
+ Finally, build the extension module using::
+
+ f2py -c callback2.pyf callback.f
+
+ An example Python session would be identical to the previous example
+ except that argument names would differ.
+
+ __ callback.f
+
+Sometimes a Fortran package may require that users provide routines
+that the package will use. F2PY can construct an interface to such
+routines so that Python functions could be called from Fortran.
+
+.. topic:: Example
+
+ Consider the following `Fortran 77 subroutine`__ that takes an array
+ and applies a function ``func`` to its elements.
+
+ .. include:: calculate.f
+ :literal:
+
+ __ calculate.f
+
+ It is expected that function ``func`` has been defined
+ externally. In order to use a Python function as ``func``, it must
+ have an attribute ``intent(callback)`` (it must be specified before
+ the ``external`` statement).
+
+ Finally, build an extension module using::
+
+ f2py -c -m foo calculate.f
+
+ In Python:
+
+ .. include:: calculate_session.dat
+ :literal:
+
+The function is included as an argument to the python function call to
+the FORTRAN subroutine eventhough it was NOT in the FORTRAN subroutine argument
+list. The "external" refers to the C function generated by f2py, not the python
+function itself. The python function must be supplied to the C function.
+
+The callback function may also be explicitly set in the module.
+Then it is not necessary to pass the function in the argument list to
+the FORTRAN function. This may be desired if the FORTRAN function calling
+the python callback function is itself called by another FORTRAN function.
+
+.. topic:: Example
+
+ Consider the following `Fortran 77 subroutine`__.
+
+ .. include:: extcallback.f
+ :literal:
+
+ __ extcallback.f
+
+ and wrap it using ``f2py -c -m pfromf extcallback.f``.
+
+ In Python:
+
+ .. include:: extcallback_session.dat
+ :literal:
+
+Resolving arguments to call-back functions
+------------------------------------------
+
+F2PY generated interface is very flexible with respect to call-back
+arguments. For each call-back argument an additional optional
+argument ``<name>_extra_args`` is introduced by F2PY. This argument
+can be used to pass extra arguments to user provided call-back
+arguments.
+
+If a F2PY generated wrapper function expects the following call-back
+argument::
+
+ def fun(a_1,...,a_n):
+ ...
+ return x_1,...,x_k
+
+but the following Python function
+
+::
+
+ def gun(b_1,...,b_m):
+ ...
+ return y_1,...,y_l
+
+is provided by an user, and in addition,
+
+::
+
+ fun_extra_args = (e_1,...,e_p)
+
+is used, then the following rules are applied when a Fortran or C
+function calls the call-back argument ``gun``:
+
+* If ``p==0`` then ``gun(a_1,...,a_q)`` is called, here
+ ``q=min(m,n)``.
+* If ``n+p<=m`` then ``gun(a_1,...,a_n,e_1,...,e_p)`` is called.
+* If ``p<=m<n+p`` then ``gun(a_1,...,a_q,e_1,...,e_p)`` is called, here
+ ``q=m-p``.
+* If ``p>m`` then ``gun(e_1,...,e_m)`` is called.
+* If ``n+p`` is less than the number of required arguments to ``gun``
+ then an exception is raised.
+
+The function ``gun`` may return any number of objects as a tuple. Then
+following rules are applied:
+
+* If ``k<l``, then ``y_{k+1},...,y_l`` are ignored.
+* If ``k>l``, then only ``x_1,...,x_l`` are set.
+
+
+
+Common blocks
+==============
+
+F2PY generates wrappers to ``common`` blocks defined in a routine
+signature block. Common blocks are visible by all Fortran codes linked
+with the current extension module, but not to other extension modules
+(this restriction is due to how Python imports shared libraries). In
+Python, the F2PY wrappers to ``common`` blocks are ``fortran`` type
+objects that have (dynamic) attributes related to data members of
+common blocks. When accessed, these attributes return as Numeric array
+objects (multi-dimensional arrays are Fortran-contiguous) that
+directly link to data members in common blocks. Data members can be
+changed by direct assignment or by in-place changes to the
+corresponding array objects.
+
+.. topic:: Example
+
+ Consider the following `Fortran 77 code`__
+
+ .. include:: common.f
+ :literal:
+
+ and wrap it using ``f2py -c -m common common.f``.
+
+ __ common.f
+
+ In Python:
+
+ .. include:: common_session.dat
+ :literal:
+
+Fortran 90 module data
+=======================
+
+The F2PY interface to Fortran 90 module data is similar to Fortran 77
+common blocks.
+
+.. topic:: Example
+
+ Consider the following `Fortran 90 code`__
+
+ .. include:: moddata.f90
+ :literal:
+
+ and wrap it using ``f2py -c -m moddata moddata.f90``.
+
+ __ moddata.f90
+
+ In Python:
+
+ .. include:: moddata_session.dat
+ :literal:
+
+Allocatable arrays
+-------------------
+
+F2PY has basic support for Fortran 90 module allocatable arrays.
+
+.. topic:: Example
+
+ Consider the following `Fortran 90 code`__
+
+ .. include:: allocarr.f90
+ :literal:
+
+ and wrap it using ``f2py -c -m allocarr allocarr.f90``.
+
+ __ allocarr.f90
+
+ In Python:
+
+ .. include:: allocarr_session.dat
+ :literal:
+
+
+===========
+Using F2PY
+===========
+
+F2PY can be used either as a command line tool ``f2py`` or as a Python
+module ``f2py2e``.
+
+Command ``f2py``
+=================
+
+When used as a command line tool, ``f2py`` has three major modes,
+distinguished by the usage of ``-c`` and ``-h`` switches:
+
+1. To scan Fortran sources and generate a signature file, use
+
+ ::
+
+ f2py -h <filename.pyf> <options> <fortran files> \
+ [[ only: <fortran functions> : ] \
+ [ skip: <fortran functions> : ]]... \
+ [<fortran files> ...]
+
+ Note that a Fortran source file can contain many routines, and not
+ necessarily all routines are needed to be used from Python. So, you
+ can either specify which routines should be wrapped (in ``only: .. :``
+ part) or which routines F2PY should ignored (in ``skip: .. :`` part).
+
+ If ``<filename.pyf>`` is specified as ``stdout`` then signatures
+ are send to standard output instead of a file.
+
+ Among other options (see below), the following options can be used
+ in this mode:
+
+ ``--overwrite-signature``
+ Overwrite existing signature file.
+
+2. To construct an extension module, use
+
+ ::
+
+ f2py <options> <fortran files> \
+ [[ only: <fortran functions> : ] \
+ [ skip: <fortran functions> : ]]... \
+ [<fortran files> ...]
+
+ The constructed extension module is saved as
+ ``<modulename>module.c`` to the current directory.
+
+ Here ``<fortran files>`` may also contain signature files.
+ Among other options (see below), the following options can be used
+ in this mode:
+
+ ``--debug-capi``
+ Add debugging hooks to the extension module. When using this
+ extension module, various information about the wrapper is printed
+ to standard output, for example, the values of variables, the
+ steps taken, etc.
+
+ ``-include'<includefile>'``
+ Add a CPP ``#include`` statement to the extension module source.
+ ``<includefile>`` should be given in one of the following forms::
+
+ "filename.ext"
+ <filename.ext>
+
+ The include statement is inserted just before the wrapper
+ functions. This feature enables using arbitrary C functions
+ (defined in ``<includefile>``) in F2PY generated wrappers.
+
+ This option is deprecated. Use ``usercode`` statement to specify
+ C codelets directly in signature filess
+
+
+ ``--[no-]wrap-functions``
+
+ Create Fortran subroutine wrappers to Fortran functions.
+ ``--wrap-functions`` is default because it ensures maximum
+ portability and compiler independence.
+
+ ``--include-paths <path1>:<path2>:..``
+ Search include files from given directories.
+
+ ``--help-link [<list of resources names>]``
+ List system resources found by ``scipy_distutils/system_info.py``.
+ For example, try ``f2py --help-link lapack_opt``.
+
+3. To build an extension module, use
+
+ ::
+
+ f2py -c <options> <fortran files> \
+ [[ only: <fortran functions> : ] \
+ [ skip: <fortran functions> : ]]... \
+ [ <fortran/c source files> ] [ <.o, .a, .so files> ]
+
+ If ``<fortran files>`` contains a signature file, then a source for
+ an extension module is constructed, all Fortran and C sources are
+ compiled, and finally all object and library files are linked to the
+ extension module ``<modulename>.so`` which is saved into the current
+ directory.
+
+ If ``<fortran files>`` does not contain a signature file, then an
+ extension module is constructed by scanning all Fortran source codes
+ for routine signatures.
+
+ Among other options (see below) and options described in previous
+ mode, the following options can be used in this mode:
+
+ ``--help-fcompiler``
+ List available Fortran compilers.
+ ``--help-compiler`` [depreciated]
+ List available Fortran compilers.
+ ``--fcompiler=<Vendor>``
+ Specify Fortran compiler type by vendor.
+ ``--f77exec=<path>``
+ Specify the path to F77 compiler
+ ``--fcompiler-exec=<path>`` [depreciated]
+ Specify the path to F77 compiler
+ ``--f90exec=<path>``
+ Specify the path to F90 compiler
+ ``--f90compiler-exec=<path>`` [depreciated]
+ Specify the path to F90 compiler
+
+ ``--f77flags=<string>``
+ Specify F77 compiler flags
+ ``--f90flags=<string>``
+ Specify F90 compiler flags
+ ``--opt=<string>``
+ Specify optimization flags
+ ``--arch=<string>``
+ Specify architecture specific optimization flags
+ ``--noopt``
+ Compile without optimization
+ ``--noarch``
+ Compile without arch-dependent optimization
+ ``--debug``
+ Compile with debugging information
+
+ ``-l<libname>``
+ Use the library ``<libname>`` when linking.
+ ``-D<macro>[=<defn=1>]``
+ Define macro ``<macro>`` as ``<defn>``.
+ ``-U<macro>``
+ Define macro ``<macro>``
+ ``-I<dir>``
+ Append directory ``<dir>`` to the list of directories searched for
+ include files.
+ ``-L<dir>``
+ Add directory ``<dir>`` to the list of directories to be searched
+ for ``-l``.
+
+ ``link-<resource>``
+
+ Link extension module with <resource> as defined by
+ ``scipy_distutils/system_info.py``. E.g. to link with optimized
+ LAPACK libraries (vecLib on MacOSX, ATLAS elsewhere), use
+ ``--link-lapack_opt``. See also ``--help-link`` switch.
+
+ When building an extension module, a combination of the following
+ macros may be required for non-gcc Fortran compilers::
+
+ -DPREPEND_FORTRAN
+ -DNO_APPEND_FORTRAN
+ -DUPPERCASE_FORTRAN
+
+ To test the performance of F2PY generated interfaces, use
+ ``-DF2PY_REPORT_ATEXIT``. Then a report of various timings is
+ printed out at the exit of Python. This feature may not work on
+ all platforms, currently only Linux platform is supported.
+
+ To see whether F2PY generated interface performs copies of array
+ arguments, use ``-DF2PY_REPORT_ON_ARRAY_COPY=<int>``. When the size
+ of an array argument is larger than ``<int>``, a message about
+ the coping is sent to ``stderr``.
+
+Other options:
+
+``-m <modulename>``
+ Name of an extension module. Default is ``untitled``. Don't use this option
+ if a signature file (*.pyf) is used.
+``--[no-]lower``
+ Do [not] lower the cases in ``<fortran files>``. By default,
+ ``--lower`` is assumed with ``-h`` switch, and ``--no-lower``
+ without the ``-h`` switch.
+``--build-dir <dirname>``
+ All F2PY generated files are created in ``<dirname>``. Default is
+ ``tempfile.mktemp()``.
+``--quiet``
+ Run quietly.
+``--verbose``
+ Run with extra verbosity.
+``-v``
+ Print f2py version ID and exit.
+
+Execute ``f2py`` without any options to get an up-to-date list of
+available options.
+
+Python module ``f2py2e``
+=========================
+
+.. topic:: Warning
+
+ The current Python interface to ``f2py2e`` module is not mature and
+ may change in future depending on users needs.
+
+The following functions are provided by the ``f2py2e`` module:
+
+``run_main(<list>)``
+ Equivalent to running::
+
+ f2py <args>
+
+ where ``<args>=string.join(<list>,' ')``, but in Python. Unless
+ ``-h`` is used, this function returns a dictionary containing
+ information on generated modules and their dependencies on source
+ files. For example, the command ``f2py -m scalar scalar.f`` can be
+ executed from Python as follows
+
+ .. include:: run_main_session.dat
+ :literal:
+
+ You cannot build extension modules with this function, that is,
+ using ``-c`` is not allowed. Use ``compile`` command instead, see
+ below.
+
+``compile(source, modulename='untitled', extra_args='', verbose=1, source_fn=None)``
+
+ Build extension module from Fortran 77 source string ``source``.
+ Return 0 if successful.
+ Note that this function actually calls ``f2py -c ..`` from shell to
+ ensure safety of the current Python process.
+ For example,
+
+ .. include:: compile_session.dat
+ :literal:
+
+==========================
+Using ``scipy_distutils``
+==========================
+
+``scipy_distutils`` is part of the SciPy_ project and aims to extend
+standard Python ``distutils`` to deal with Fortran sources and F2PY
+signature files, e.g. compile Fortran sources, call F2PY to construct
+extension modules, etc.
+
+.. topic:: Example
+
+ Consider the following `setup file`__:
+
+ .. include:: setup_example.py
+ :literal:
+
+ Running
+
+ ::
+
+ python setup_example.py build
+
+ will build two extension modules ``scalar`` and ``fib2`` to the
+ build directory.
+
+ __ setup_example.py
+
+``scipy_distutils`` extends ``distutils`` with the following features:
+
+* ``Extension`` class argument ``sources`` may contain Fortran source
+ files. In addition, the list ``sources`` may contain at most one
+ F2PY signature file, and then the name of an Extension module must
+ match with the ``<modulename>`` used in signature file. It is
+ assumed that an F2PY signature file contains exactly one ``python
+ module`` block.
+
+ If ``sources`` does not contain a signature files, then F2PY is used
+ to scan Fortran source files for routine signatures to construct the
+ wrappers to Fortran codes.
+
+ Additional options to F2PY process can be given using ``Extension``
+ class argument ``f2py_options``.
+
+``scipy_distutils`` 0.2.2 and up
+================================
+
+* The following new ``distutils`` commands are defined:
+
+ ``build_src``
+ to construct Fortran wrapper extension modules, among many other things.
+ ``config_fc``
+ to change Fortran compiler options
+
+ as well as ``build_ext`` and ``build_clib`` commands are enhanced
+ to support Fortran sources.
+
+ Run
+
+ ::
+
+ python <setup.py file> config_fc build_src build_ext --help
+
+ to see available options for these commands.
+
+* When building Python packages containing Fortran sources, then one
+ can choose different Fortran compilers by using ``build_ext``
+ command option ``--fcompiler=<Vendor>``. Here ``<Vendor>`` can be one of the
+ following names::
+
+ absoft sun mips intel intelv intele intelev nag compaq compaqv gnu vast pg hpux
+
+ See ``scipy_distutils/fcompiler.py`` for up-to-date list of
+ supported compilers or run
+
+ ::
+
+ f2py -c --help-fcompiler
+
+``scipy_distutils`` pre 0.2.2
+=============================
+
+* The following new ``distutils`` commands are defined:
+
+ ``build_flib``
+ to build f77/f90 libraries used by Python extensions;
+ ``run_f2py``
+ to construct Fortran wrapper extension modules.
+
+ Run
+
+ ::
+
+ python <setup.py file> build_flib run_f2py --help
+
+ to see available options for these commands.
+
+* When building Python packages containing Fortran sources, then one
+ can choose different Fortran compilers either by using ``build_flib``
+ command option ``--fcompiler=<Vendor>`` or by defining environment
+ variable ``FC_VENDOR=<Vendor>``. Here ``<Vendor>`` can be one of the
+ following names::
+
+ Absoft Sun SGI Intel Itanium NAG Compaq Digital Gnu VAST PG
+
+ See ``scipy_distutils/command/build_flib.py`` for up-to-date list of
+ supported compilers.
+
+======================
+ Extended F2PY usages
+======================
+
+Adding self-written functions to F2PY generated modules
+=======================================================
+
+Self-written Python C/API functions can be defined inside
+signature files using ``usercode`` and ``pymethoddef`` statements
+(they must be used inside the ``python module`` block). For
+example, the following signature file ``spam.pyf``
+
+.. include:: spam.pyf
+ :literal:
+
+wraps the C library function ``system()``::
+
+ f2py -c spam.pyf
+
+In Python:
+
+.. include:: spam_session.dat
+ :literal:
+
+Modifying the dictionary of a F2PY generated module
+===================================================
+
+The following example illustrates how to add an user-defined
+variables to a F2PY generated extension module. Given the following
+signature file
+
+.. include:: var.pyf
+ :literal:
+
+compile it as ``f2py -c var.pyf``.
+
+Notice that the second ``usercode`` statement must be defined inside
+an ``interface`` block and where the module dictionary is available through
+the variable ``d`` (see ``f2py var.pyf``-generated ``varmodule.c`` for
+additional details).
+
+In Python:
+
+.. include:: var_session.dat
+ :literal:
+
+.. References
+ ==========
+.. _F2PY: http://cens.ioc.ee/projects/f2py2e/
+.. _Python: http://www.python.org/
+.. _NumPy: http://www.numpy.org/
+.. _SciPy: http://www.scipy.org/
diff --git a/numpy/f2py/docs/usersguide/moddata.f90 b/numpy/f2py/docs/usersguide/moddata.f90
new file mode 100644
index 000000000..0e98f0467
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/moddata.f90
@@ -0,0 +1,18 @@
+module mod
+ integer i
+ integer :: x(4)
+ real, dimension(2,3) :: a
+ real, allocatable, dimension(:,:) :: b
+contains
+ subroutine foo
+ integer k
+ print*, "i=",i
+ print*, "x=[",x,"]"
+ print*, "a=["
+ print*, "[",a(1,1),",",a(1,2),",",a(1,3),"]"
+ print*, "[",a(2,1),",",a(2,2),",",a(2,3),"]"
+ print*, "]"
+ print*, "Setting a(1,2)=a(1,2)+3"
+ a(1,2) = a(1,2)+3
+ end subroutine foo
+end module mod
diff --git a/numpy/f2py/docs/usersguide/moddata_session.dat b/numpy/f2py/docs/usersguide/moddata_session.dat
new file mode 100644
index 000000000..1ec212f8b
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/moddata_session.dat
@@ -0,0 +1,23 @@
+>>> import moddata
+>>> print moddata.mod.__doc__
+i - 'i'-scalar
+x - 'i'-array(4)
+a - 'f'-array(2,3)
+foo - Function signature:
+ foo()
+
+
+>>> moddata.mod.i = 5
+>>> moddata.mod.x[:2] = [1,2]
+>>> moddata.mod.a = [[1,2,3],[4,5,6]]
+>>> moddata.mod.foo()
+ i= 5
+ x=[ 1 2 0 0 ]
+ a=[
+ [ 1.000000 , 2.000000 , 3.000000 ]
+ [ 4.000000 , 5.000000 , 6.000000 ]
+ ]
+ Setting a(1,2)=a(1,2)+3
+>>> moddata.mod.a # a is Fortran-contiguous
+array([[ 1., 5., 3.],
+ [ 4., 5., 6.]],'f')
diff --git a/numpy/f2py/docs/usersguide/run_main_session.dat b/numpy/f2py/docs/usersguide/run_main_session.dat
new file mode 100644
index 000000000..29ecc3dfe
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/run_main_session.dat
@@ -0,0 +1,14 @@
+>>> import f2py2e
+>>> r=f2py2e.run_main(['-m','scalar','docs/usersguide/scalar.f'])
+Reading fortran codes...
+ Reading file 'docs/usersguide/scalar.f'
+Post-processing...
+ Block: scalar
+ Block: FOO
+Building modules...
+ Building module "scalar"...
+ Wrote C/API module "scalar" to file "./scalarmodule.c"
+>>> print r
+{'scalar': {'h': ['/home/users/pearu/src_cvs/f2py2e/src/fortranobject.h'],
+ 'csrc': ['./scalarmodule.c',
+ '/home/users/pearu/src_cvs/f2py2e/src/fortranobject.c']}}
diff --git a/numpy/f2py/docs/usersguide/scalar.f b/numpy/f2py/docs/usersguide/scalar.f
new file mode 100644
index 000000000..c22f639ed
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/scalar.f
@@ -0,0 +1,12 @@
+C FILE: SCALAR.F
+ SUBROUTINE FOO(A,B)
+ REAL*8 A, B
+Cf2py intent(in) a
+Cf2py intent(inout) b
+ PRINT*, " A=",A," B=",B
+ PRINT*, "INCREMENT A AND B"
+ A = A + 1D0
+ B = B + 1D0
+ PRINT*, "NEW A=",A," B=",B
+ END
+C END OF FILE SCALAR.F
diff --git a/numpy/f2py/docs/usersguide/scalar_session.dat b/numpy/f2py/docs/usersguide/scalar_session.dat
new file mode 100644
index 000000000..4fe8c03b1
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/scalar_session.dat
@@ -0,0 +1,21 @@
+>>> import scalar
+>>> print scalar.foo.__doc__
+foo - Function signature:
+ foo(a,b)
+Required arguments:
+ a : input float
+ b : in/output rank-0 array(float,'d')
+
+>>> scalar.foo(2,3)
+ A= 2. B= 3.
+ INCREMENT A AND B
+ NEW A= 3. B= 4.
+>>> import Numeric
+>>> a=Numeric.array(2) # these are integer rank-0 arrays
+>>> b=Numeric.array(3)
+>>> scalar.foo(a,b)
+ A= 2. B= 3.
+ INCREMENT A AND B
+ NEW A= 3. B= 4.
+>>> print a,b # note that only b is changed in situ
+2 4 \ No newline at end of file
diff --git a/numpy/f2py/docs/usersguide/setup_example.py b/numpy/f2py/docs/usersguide/setup_example.py
new file mode 100644
index 000000000..a7d27403a
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/setup_example.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+# File: setup_example.py
+
+from scipy_distutils.core import Extension
+
+ext1 = Extension(name = 'scalar',
+ sources = ['scalar.f'])
+ext2 = Extension(name = 'fib2',
+ sources = ['fib2.pyf','fib1.f'])
+
+if __name__ == "__main__":
+ from scipy_distutils.core import setup
+ setup(name = 'f2py_example',
+ description = "F2PY Users Guide examples",
+ author = "Pearu Peterson",
+ author_email = "pearu@cens.ioc.ee",
+ ext_modules = [ext1,ext2]
+ )
+# End of setup_example.py
diff --git a/numpy/f2py/docs/usersguide/spam.pyf b/numpy/f2py/docs/usersguide/spam.pyf
new file mode 100644
index 000000000..21ea18b77
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/spam.pyf
@@ -0,0 +1,19 @@
+! -*- f90 -*-
+python module spam
+ usercode '''
+ static char doc_spam_system[] = "Execute a shell command.";
+ static PyObject *spam_system(PyObject *self, PyObject *args)
+ {
+ char *command;
+ int sts;
+
+ if (!PyArg_ParseTuple(args, "s", &command))
+ return NULL;
+ sts = system(command);
+ return Py_BuildValue("i", sts);
+ }
+ '''
+ pymethoddef '''
+ {"system", spam_system, METH_VARARGS, doc_spam_system},
+ '''
+end python module spam
diff --git a/numpy/f2py/docs/usersguide/spam_session.dat b/numpy/f2py/docs/usersguide/spam_session.dat
new file mode 100644
index 000000000..7f99d13f9
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/spam_session.dat
@@ -0,0 +1,5 @@
+>>> import spam
+>>> status = spam.system('whoami')
+pearu
+>> status = spam.system('blah')
+sh: line 1: blah: command not found \ No newline at end of file
diff --git a/numpy/f2py/docs/usersguide/string.f b/numpy/f2py/docs/usersguide/string.f
new file mode 100644
index 000000000..9246f02e7
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/string.f
@@ -0,0 +1,21 @@
+C FILE: STRING.F
+ SUBROUTINE FOO(A,B,C,D)
+ CHARACTER*5 A, B
+ CHARACTER*(*) C,D
+Cf2py intent(in) a,c
+Cf2py intent(inout) b,d
+ PRINT*, "A=",A
+ PRINT*, "B=",B
+ PRINT*, "C=",C
+ PRINT*, "D=",D
+ PRINT*, "CHANGE A,B,C,D"
+ A(1:1) = 'A'
+ B(1:1) = 'B'
+ C(1:1) = 'C'
+ D(1:1) = 'D'
+ PRINT*, "A=",A
+ PRINT*, "B=",B
+ PRINT*, "C=",C
+ PRINT*, "D=",D
+ END
+C END OF FILE STRING.F
diff --git a/numpy/f2py/docs/usersguide/string_session.dat b/numpy/f2py/docs/usersguide/string_session.dat
new file mode 100644
index 000000000..64ebcb3f4
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/string_session.dat
@@ -0,0 +1,27 @@
+>>> import mystring
+>>> print mystring.foo.__doc__
+foo - Function signature:
+ foo(a,b,c,d)
+Required arguments:
+ a : input string(len=5)
+ b : in/output rank-0 array(string(len=5),'c')
+ c : input string(len=-1)
+ d : in/output rank-0 array(string(len=-1),'c')
+
+>>> import Numeric
+>>> a=Numeric.array('123')
+>>> b=Numeric.array('123')
+>>> c=Numeric.array('123')
+>>> d=Numeric.array('123')
+>>> mystring.foo(a,b,c,d)
+ A=123
+ B=123
+ C=123
+ D=123
+ CHANGE A,B,C,D
+ A=A23
+ B=B23
+ C=C23
+ D=D23
+>>> a.tostring(),b.tostring(),c.tostring(),d.tostring()
+('123', 'B23', '123', 'D23') \ No newline at end of file
diff --git a/numpy/f2py/docs/usersguide/var.pyf b/numpy/f2py/docs/usersguide/var.pyf
new file mode 100644
index 000000000..8275ff3af
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/var.pyf
@@ -0,0 +1,11 @@
+! -*- f90 -*-
+python module var
+ usercode '''
+ int BAR = 5;
+ '''
+ interface
+ usercode '''
+ PyDict_SetItemString(d,"BAR",PyInt_FromLong(BAR));
+ '''
+ end interface
+end python module
diff --git a/numpy/f2py/docs/usersguide/var_session.dat b/numpy/f2py/docs/usersguide/var_session.dat
new file mode 100644
index 000000000..fb0f798bf
--- /dev/null
+++ b/numpy/f2py/docs/usersguide/var_session.dat
@@ -0,0 +1,3 @@
+>>> import var
+>>> var.BAR
+5 \ No newline at end of file
diff --git a/numpy/f2py/f2py.1 b/numpy/f2py/f2py.1
new file mode 100644
index 000000000..3b9f054af
--- /dev/null
+++ b/numpy/f2py/f2py.1
@@ -0,0 +1,209 @@
+.TH "F2PY" 1
+.SH NAME
+f2py \- Fortran to Python interface generator
+.SH SYNOPSIS
+(1) To construct extension module sources:
+
+.B f2py
+[<options>] <fortran files> [[[only:]||[skip:]] <fortran functions> ] [: <fortran files> ...]
+
+(2) To compile fortran files and build extension modules:
+
+.B f2py
+-c [<options>, <config_fc options>, <extra options>] <fortran files>
+
+(3) To generate signature files:
+
+.B f2py
+-h <filename.pyf> ...< same options as in (1) >
+.SH DESCRIPTION
+This program generates a Python C/API file (<modulename>module.c)
+that contains wrappers for given Fortran or C functions so that they
+can be called from Python.
+With the -c option the corresponding
+extension modules are built.
+.SH OPTIONS
+.TP
+.B \-h <filename>
+Write signatures of the fortran routines to file <filename> and
+exit. You can then edit <filename> and use it instead of <fortran
+files>. If <filename>==stdout then the signatures are printed to
+stdout.
+.TP
+.B <fortran functions>
+Names of fortran routines for which Python C/API functions will be
+generated. Default is all that are found in <fortran files>.
+.TP
+.B skip:
+Ignore fortran functions that follow until `:'.
+.TP
+.B only:
+Use only fortran functions that follow until `:'.
+.TP
+.B :
+Get back to <fortran files> mode.
+.TP
+.B \-m <modulename>
+Name of the module; f2py generates a Python/C API file
+<modulename>module.c or extension module <modulename>. Default is
+\'untitled\'.
+.TP
+.B \-\-[no\-]lower
+Do [not] lower the cases in <fortran files>. By default, --lower is
+assumed with -h key, and --no-lower without -h key.
+.TP
+.B \-\-build\-dir <dirname>
+All f2py generated files are created in <dirname>. Default is tempfile.mktemp().
+.TP
+.B \-\-overwrite\-signature
+Overwrite existing signature file.
+.TP
+.B \-\-[no\-]latex\-doc
+Create (or not) <modulename>module.tex. Default is --no-latex-doc.
+.TP
+.B \-\-short\-latex
+Create 'incomplete' LaTeX document (without commands \\documentclass,
+\\tableofcontents, and \\begin{document}, \\end{document}).
+.TP
+.B \-\-[no\-]rest\-doc
+Create (or not) <modulename>module.rst. Default is --no-rest-doc.
+.TP
+.B \-\-debug\-capi
+Create C/API code that reports the state of the wrappers during
+runtime. Useful for debugging.
+.TP
+.B \-include\'<includefile>\'
+Add CPP #include statement to the C/API code. <includefile> should be
+in the format of either `"filename.ext"' or `<filename.ext>'. As a
+result <includefile> will be included just before wrapper functions
+part in the C/API code. The option is depreciated, use `usercode`
+statement in signature files instead.
+.TP
+.B \-\-[no\-]wrap\-functions
+Create Fortran subroutine wrappers to Fortran 77
+functions. --wrap-functions is default because it ensures maximum
+portability/compiler independence.
+.TP
+.B \-\-help\-link [..]
+List system resources found by system_info.py. [..] may contain
+a list of resources names. See also --link-<resource> switch below.
+.TP
+.B \-\-quiet
+Run quietly.
+.TP
+.B \-\-verbose
+Run with extra verbosity.
+.TP
+.B \-v
+Print f2py version ID and exit.
+.TP
+.B \-\-include_paths path1:path2:...
+Search include files (that f2py will scan) from the given directories.
+.SH "CONFIG_FC OPTIONS"
+The following options are effective only when -c switch is used.
+.TP
+.B \-\-help-compiler
+List available Fortran compilers [DEPRECIATED].
+.TP
+.B \-\-fcompiler=<name>
+Specify Fortran compiler type by vendor.
+.TP
+.B \-\-compiler=<name>
+Specify C compiler type (as defined by distutils)
+.TP
+.B \-\-fcompiler-exec=<path>
+Specify the path to F77 compiler [DEPRECIATED].
+.TP
+.B \-\-f90compiler\-exec=<path>
+Specify the path to F90 compiler [DEPRECIATED].
+.TP
+.B \-\-help\-fcompiler
+List available Fortran compilers and exit.
+.TP
+.B \-\-f77exec=<path>
+Specify the path to F77 compiler.
+.TP
+.B \-\-f90exec=<path>
+Specify the path to F90 compiler.
+.TP
+.B \-\-f77flags="..."
+Specify F77 compiler flags.
+.TP
+.B \-\-f90flags="..."
+Specify F90 compiler flags.
+.TP
+.B \-\-opt="..."
+Specify optimization flags.
+.TP
+.B \-\-arch="..."
+Specify architecture specific optimization flags.
+.TP
+.B \-\-noopt
+Compile without optimization.
+.TP
+.B \-\-noarch
+Compile without arch-dependent optimization.
+.TP
+.B \-\-debug
+Compile with debugging information.
+.SH "EXTRA OPTIONS"
+The following options are effective only when -c switch is used.
+.TP
+.B \-\-link-<resource>
+Link extension module with <resource> as defined by
+scipy_distutils/system_info.py. E.g. to link with optimized LAPACK
+libraries (vecLib on MacOSX, ATLAS elsewhere), use
+--link-lapack_opt. See also --help-link switch.
+
+.TP
+.B -L/path/to/lib/ -l<libname>
+.TP
+.B -D<define> -U<name> -I/path/to/include/
+.TP
+.B <filename>.o <filename>.so <filename>.a
+
+.TP
+.B -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN -DUNDERSCORE_G77
+Macros that might be required with non-gcc Fortran compilers.
+
+.TP
+.B -DF2PY_REPORT_ATEXIT
+To print out a performance report of F2PY interface when python
+exits. Available for Linux.
+
+.TP
+.B -DF2PY_REPORT_ON_ARRAY_COPY=<int>
+To send a message to stderr whenever F2PY interface makes a copy of an
+array. Integer <int> sets the threshold for array sizes when a message
+should be shown.
+
+.SH REQUIREMENTS
+Python 1.5.2 or higher (2.x is supported).
+
+Numerical Python 13 or higher (20.x,21.x,22.x,23.x are supported).
+
+Optional Numarray 0.9 or higher partially supported.
+
+scipy_distutils from Scipy (can be downloaded from F2PY homepage)
+.SH "SEE ALSO"
+python(1)
+.SH BUGS
+For instructions on reporting bugs, see
+
+ http://cens.ioc.ee/projects/f2py2e/FAQ.html
+.SH AUTHOR
+Pearu Peterson <pearu@cens.ioc.ee>
+.SH "INTERNET RESOURCES"
+Main website: http://cens.ioc.ee/projects/f2py2e/
+
+User's Guide: http://cens.ioc.ee/projects/f2py2e/usersguide/
+
+Mailing list: http://cens.ioc.ee/mailman/listinfo/f2py-users/
+
+Scipy website: http://www.scipy.org
+.SH COPYRIGHT
+Copyright (c) 1999, 2000, 2001, 2002, 2003, 2004, 2005 Pearu Peterson
+.SH LICENSE
+LGPL (see http://www.fsf.org)
+.SH VERSION
+2.45.241
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
new file mode 100755
index 000000000..6230590d1
--- /dev/null
+++ b/numpy/f2py/f2py2e.py
@@ -0,0 +1,555 @@
+#!/usr/bin/env python
+"""
+
+f2py2e - Fortran to Python C/API generator. 2nd Edition.
+ See __usage__ below.
+
+Copyright 1999--2005 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@cens.ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/05/06 08:31:19 $
+Pearu Peterson
+"""
+__version__ = "$Revision: 1.90 $"[10:-1]
+
+import __version__
+f2py_version = __version__.version
+
+import sys,os,string,pprint,shutil,types,re
+errmess=sys.stderr.write
+#outmess=sys.stdout.write
+show=pprint.pprint
+
+import crackfortran
+import rules
+import cb_rules
+import common_rules
+import auxfuncs
+import cfuncs
+import capi_maps
+import func2subr
+import f90mod_rules
+
+outmess = auxfuncs.outmess
+
+try:
+ from scipy import __core_version__ as scipy_core_version
+except ImportError:
+ scipy_distutils_version = 'N/A'
+
+__usage__ = """\
+Usage:
+
+1) To construct extension module sources:
+
+ f2py [<options>] <fortran files> [[[only:]||[skip:]] \\
+ <fortran functions> ] \\
+ [: <fortran files> ...]
+
+2) To compile fortran files and build extension modules:
+
+ f2py -c [<options>, <build_flib options>, <extra options>] <fortran files>
+
+3) To generate signature files:
+
+ f2py -h <filename.pyf> ...< same options as in (1) >
+
+Description: This program generates a Python C/API file (<modulename>module.c)
+ that contains wrappers for given fortran functions so that they
+ can be called from Python. With the -c option the corresponding
+ extension modules are built.
+
+Options:
+
+ -h <filename> Write signatures of the fortran routines to file <filename>
+ and exit. You can then edit <filename> and use it instead
+ of <fortran files>. If <filename>==stdout then the
+ signatures are printed to stdout.
+ <fortran functions> Names of fortran routines for which Python C/API
+ functions will be generated. Default is all that are found
+ in <fortran files>.
+ <fortran files> Paths to fortran/signature files that will be scanned for
+ <fortran functions> in order to determine their signatures.
+ skip: Ignore fortran functions that follow until `:'.
+ only: Use only fortran functions that follow until `:'.
+ : Get back to <fortran files> mode.
+
+ -m <modulename> Name of the module; f2py generates a Python/C API
+ file <modulename>module.c or extension module <modulename>.
+ Default is 'untitled'.
+
+ --[no-]lower Do [not] lower the cases in <fortran files>. By default,
+ --lower is assumed with -h key, and --no-lower without -h key.
+
+ --build-dir <dirname> All f2py generated files are created in <dirname>.
+ Default is tempfile.mktemp().
+
+ --overwrite-signature Overwrite existing signature file.
+
+ --[no-]latex-doc Create (or not) <modulename>module.tex.
+ Default is --no-latex-doc.
+ --short-latex Create 'incomplete' LaTeX document (without commands
+ \\documentclass, \\tableofcontents, and \\begin{document},
+ \\end{document}).
+
+ --[no-]rest-doc Create (or not) <modulename>module.rst.
+ Default is --no-rest-doc.
+
+ --debug-capi Create C/API code that reports the state of the wrappers
+ during runtime. Useful for debugging.
+
+ --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77
+ functions. --wrap-functions is default because it ensures
+ maximum portability/compiler independence.
+
+ --include_paths <path1>:<path2>:... Search include files from the given
+ directories.
+
+ --help-link [..] List system resources found by system_info.py. See also
+ --link-<resource> switch below. [..] is optional list
+ of resources names. E.g. try 'f2py --help-link lapack_opt'.
+
+ --quiet Run quietly.
+ --verbose Run with extra verbosity.
+ -v Print f2py version ID and exit.
+
+
+scipy.distutils options (only effective with -c):
+
+ --fcompiler= Specify Fortran compiler type by vendor
+ --compiler= Specify C compiler type (as defined by distutils)
+
+ --help-fcompiler List available Fortran compilers and exit
+ --f77exec= Specify the path to F77 compiler
+ --f90exec= Specify the path to F90 compiler
+ --f77flags= Specify F77 compiler flags
+ --f90flags= Specify F90 compiler flags
+ --opt= Specify optimization flags
+ --arch= Specify architecture specific optimization flags
+ --noopt Compile without optimization
+ --noarch Compile without arch-dependent optimization
+ --debug Compile with debugging information
+
+Extra options (only effective with -c):
+
+ --link-<resource> Link extension module with <resource> as defined
+ by scipy.distutils/system_info.py. E.g. to link
+ with optimized LAPACK libraries (vecLib on MacOSX,
+ ATLAS elsewhere), use --link-lapack_opt.
+ See also --help-link switch.
+
+ -L/path/to/lib/ -l<libname>
+ -D<define> -U<name>
+ -I/path/to/include/
+ <filename>.o <filename>.so <filename>.a
+
+ Using the following macros may be required with non-gcc Fortran
+ compilers:
+ -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN
+ -DUNDERSCORE_G77
+
+ When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY
+ interface is printed out at exit (platforms: Linux).
+
+ When using -DF2PY_REPORT_ON_ARRAY_COPY=<int>, a message is
+ sent to stderr whenever F2PY interface makes a copy of an
+ array. Integer <int> sets the threshold for array sizes when
+ a message should be shown.
+
+Version: %s
+scipy_core Version: %s
+Requires: Python 2.3 or higher.
+License: LGPL (see http://www.fsf.org)
+Copyright 1999 - 2005 Pearu Peterson all rights reserved.
+http://cens.ioc.ee/projects/f2py2e/"""%(f2py_version, scipy_core_version)
+
+
+def scaninputline(inputline):
+ files,funcs,skipfuncs,onlyfuncs,debug=[],[],[],[],[]
+ f,f2,f3,f4,f5,f6,f7=1,0,0,0,0,0,0
+ verbose = 1
+ dolc=-1
+ dolatexdoc = 0
+ dorestdoc = 0
+ wrapfuncs = 1
+ buildpath = '.'
+ include_paths = []
+ signsfile,modulename=None,None
+ options = {'buildpath':buildpath}
+ for l in inputline:
+ if l=='': pass
+ elif l=='only:': f=0
+ elif l=='skip:': f=-1
+ elif l==':': f=1;f4=0
+ elif l[:8]=='--debug-': debug.append(l[8:])
+ elif l=='--lower': dolc=1
+ elif l=='--build-dir': f6=1
+ elif l=='--no-lower': dolc=0
+ elif l=='--quiet': verbose = 0
+ elif l=='--verbose': verbose += 1
+ elif l=='--latex-doc': dolatexdoc=1
+ elif l=='--no-latex-doc': dolatexdoc=0
+ elif l=='--rest-doc': dorestdoc=1
+ elif l=='--no-rest-doc': dorestdoc=0
+ elif l=='--wrap-functions': wrapfuncs=1
+ elif l=='--no-wrap-functions': wrapfuncs=0
+ elif l=='--short-latex': options['shortlatex']=1
+ elif l=='--overwrite-signature': options['h-overwrite']=1
+ elif l=='-h': f2=1
+ elif l=='-m': f3=1
+ elif l[:2]=='-v':
+ print f2py_version
+ sys.exit()
+ elif l=='--show-compilers':
+ f5=1
+ elif l[:8]=='-include':
+ cfuncs.outneeds['userincludes'].append(l[9:-1])
+ cfuncs.userincludes[l[9:-1]]='#include '+l[8:]
+ elif l[:15]=='--include_paths':
+ f7=1
+ elif l[0]=='-':
+ errmess('Unknown option %s\n'%`l`)
+ sys.exit()
+ elif f2: f2=0;signsfile=l
+ elif f3: f3=0;modulename=l
+ elif f6: f6=0;buildpath=l
+ elif f7: f7=0;include_paths.extend(l.split(os.pathsep))
+ elif f==1:
+ try:
+ open(l).close()
+ files.append(l)
+ except IOError,detail:
+ errmess('IOError: %s. Skipping file "%s".\n'%(str(detail),l))
+ elif f==-1: skipfuncs.append(l)
+ elif f==0: onlyfuncs.append(l)
+ if not f5 and not files and not modulename:
+ print __usage__
+ sys.exit()
+ if not os.path.isdir(buildpath):
+ if not verbose:
+ outmess('Creating build directory %s'%(buildpath))
+ os.mkdir(buildpath)
+ if signsfile:
+ signsfile = os.path.join(buildpath,signsfile)
+ if signsfile and os.path.isfile(signsfile) and not options.has_key('h-overwrite'):
+ errmess('Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n'%(signsfile))
+ sys.exit()
+
+ options['debug']=debug
+ options['verbose']=verbose
+ if dolc==-1 and not signsfile: options['do-lower']=0
+ else: options['do-lower']=dolc
+ if modulename: options['module']=modulename
+ if signsfile: options['signsfile']=signsfile
+ if onlyfuncs: options['onlyfuncs']=onlyfuncs
+ if skipfuncs: options['skipfuncs']=skipfuncs
+ options['dolatexdoc'] = dolatexdoc
+ options['dorestdoc'] = dorestdoc
+ options['wrapfuncs'] = wrapfuncs
+ options['buildpath']=buildpath
+ options['include_paths']=include_paths
+ return files,options
+
+def callcrackfortran(files,options):
+ rules.options=options
+ funcs=[]
+ crackfortran.debug=options['debug']
+ crackfortran.verbose=options['verbose']
+ if options.has_key('module'):
+ crackfortran.f77modulename=options['module']
+ if options.has_key('skipfuncs'):
+ crackfortran.skipfuncs=options['skipfuncs']
+ if options.has_key('onlyfuncs'):
+ crackfortran.onlyfuncs=options['onlyfuncs']
+ crackfortran.include_paths[:]=options['include_paths']
+ crackfortran.dolowercase=options['do-lower']
+ postlist=crackfortran.crackfortran(files)
+ if options.has_key('signsfile'):
+ outmess('Saving signatures to file "%s"\n'%(options['signsfile']))
+ pyf=crackfortran.crack2fortran(postlist)
+ if options['signsfile'][-6:]=='stdout':
+ sys.stdout.write(pyf)
+ else:
+ f=open(options['signsfile'],'w')
+ f.write(pyf)
+ f.close()
+ return postlist
+
+def buildmodules(list):
+ cfuncs.buildcfuncs()
+ outmess('Building modules...\n')
+ modules,mnames,isusedby=[],[],{}
+ for i in range(len(list)):
+ if string.find(list[i]['name'],'__user__')>=0:
+ cb_rules.buildcallbacks(list[i])
+ else:
+ if list[i].has_key('use'):
+ for u in list[i]['use'].keys():
+ if not isusedby.has_key(u): isusedby[u]=[]
+ isusedby[u].append(list[i]['name'])
+ modules.append(list[i])
+ mnames.append(list[i]['name'])
+ ret = {}
+ for i in range(len(mnames)):
+ if isusedby.has_key(mnames[i]):
+ outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i],string.join(map(lambda s:'"%s"'%s,isusedby[mnames[i]]),',')))
+ else:
+ um=[]
+ if modules[i].has_key('use'):
+ for u in modules[i]['use'].keys():
+ if isusedby.has_key(u) and u in mnames:
+ um.append(modules[mnames.index(u)])
+ else:
+ outmess('\tModule "%s" uses nonexisting "%s" which will be ignored.\n'%(mnames[i],u))
+ ret[mnames[i]] = {}
+ dict_append(ret[mnames[i]],rules.buildmodule(modules[i],um))
+ return ret
+
+def dict_append(d_out,d_in):
+ for (k,v) in d_in.items():
+ if not d_out.has_key(k):
+ d_out[k] = []
+ if type(v) is types.ListType:
+ d_out[k] = d_out[k] + v
+ else:
+ d_out[k].append(v)
+
+def run_main(comline_list):
+ """Run f2py as if string.join(comline_list,' ') is used as a command line.
+ In case of using -h flag, return None.
+ """
+ reload(crackfortran)
+ f2pydir=os.path.dirname(os.path.abspath(cfuncs.__file__))
+ fobjhsrc = os.path.join(f2pydir,'src','fortranobject.h')
+ fobjcsrc = os.path.join(f2pydir,'src','fortranobject.c')
+ files,options=scaninputline(comline_list)
+ auxfuncs.options=options
+ postlist=callcrackfortran(files,options)
+ isusedby={}
+ for i in range(len(postlist)):
+ if postlist[i].has_key('use'):
+ for u in postlist[i]['use'].keys():
+ if not isusedby.has_key(u): isusedby[u]=[]
+ isusedby[u].append(postlist[i]['name'])
+ for i in range(len(postlist)):
+ if postlist[i]['block']=='python module' and string.find(postlist[i]['name'],'__user__')<0:
+ if isusedby.has_key(postlist[i]['name']):
+ #if not quiet:
+ outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'],string.join(map(lambda s:'"%s"'%s,isusedby[postlist[i]['name']]),',')))
+ if options.has_key('signsfile'):
+ if options['verbose']>1:
+ outmess('Stopping. Edit the signature file and then run f2py on the signature file: ')
+ outmess('%s %s\n'%(os.path.basename(sys.argv[0]),options['signsfile']))
+ return
+ for i in range(len(postlist)):
+ if postlist[i]['block']!='python module':
+ if not options.has_key('python module'):
+ errmess('Tip: If your original code is Fortran 77 then you must use -m option.\n')
+ raise TypeError,'All blocks must be module blocks but got %s'%(`postlist[i]['block']`)
+ auxfuncs.debugoptions=options['debug']
+ f90mod_rules.options=options
+ auxfuncs.wrapfuncs=options['wrapfuncs']
+
+ ret=buildmodules(postlist)
+
+ for mn in ret.keys():
+ dict_append(ret[mn],{'csrc':fobjcsrc,'h':fobjhsrc})
+ return ret
+
+def filter_files(prefix,suffix,files,remove_prefix=None):
+ """
+ Filter files by prefix and suffix.
+ """
+ filtered,rest = [],[]
+ match = re.compile(prefix+r'.*'+suffix+r'\Z').match
+ if remove_prefix:
+ ind = len(prefix)
+ else:
+ ind = 0
+ for file in map(string.strip,files):
+ if match(file): filtered.append(file[ind:])
+ else: rest.append(file)
+ return filtered,rest
+
+def get_prefix(module):
+ p = os.path.dirname(os.path.dirname(module.__file__))
+ return p
+
+def run_compile():
+ """
+ Do it all in one call!
+ """
+ import tempfile,os,shutil
+
+ i = sys.argv.index('-c')
+ del sys.argv[i]
+
+ remove_build_dir = 0
+ try: i = sys.argv.index('--build-dir')
+ except ValueError: i=None
+ if i is not None:
+ build_dir = sys.argv[i+1]
+ del sys.argv[i+1]
+ del sys.argv[i]
+ else:
+ remove_build_dir = 1
+ build_dir = os.path.join(tempfile.mktemp())
+
+ sysinfo_flags = filter(re.compile(r'[-][-]link[-]').match,sys.argv[1:])
+ sys.argv = filter(lambda a,flags=sysinfo_flags:a not in flags,sys.argv)
+ if sysinfo_flags:
+ sysinfo_flags = [f[7:] for f in sysinfo_flags]
+
+ f2py_flags = filter(re.compile(r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include').match,sys.argv[1:])
+ sys.argv = filter(lambda a,flags=f2py_flags:a not in flags,sys.argv)
+ f2py_flags2 = []
+ fl = 0
+ for a in sys.argv[1:]:
+ if a in ['only:','skip:']:
+ fl = 1
+ elif a==':':
+ fl = 0
+ if fl or a==':':
+ f2py_flags2.append(a)
+ if f2py_flags2 and f2py_flags2[-1]!=':':
+ f2py_flags2.append(':')
+ f2py_flags.extend(f2py_flags2)
+
+ sys.argv = filter(lambda a,flags=f2py_flags2:a not in flags,sys.argv)
+
+ flib_flags = filter(re.compile(r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)').match,sys.argv[1:])
+ sys.argv = filter(lambda a,flags=flib_flags:a not in flags,sys.argv)
+ fc_flags = filter(re.compile(r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))').match,sys.argv[1:])
+ sys.argv = filter(lambda a,flags=fc_flags:a not in flags,sys.argv)
+
+ if 1:
+ del_list = []
+ for s in flib_flags:
+ v = '--fcompiler='
+ if s[:len(v)]==v:
+ from scipy.distutils import fcompiler
+ allowed_keys = fcompiler.fcompiler_class.keys()
+ nv = ov = s[len(v):].lower()
+ if ov not in allowed_keys:
+ vmap = {} # XXX
+ try:
+ nv = vmap[ov]
+ except KeyError:
+ if ov not in vmap.values():
+ print 'Unknown vendor: "%s"' % (s[len(v):])
+ nv = ov
+ i = flib_flags.index(s)
+ flib_flags[i] = '--fcompiler=' + nv
+ continue
+ for s in del_list:
+ i = flib_flags.index(s)
+ del flib_flags[i]
+ assert len(flib_flags)<=2,`flib_flags`
+ setup_flags = filter(re.compile(r'[-][-](verbose)').match,sys.argv[1:])
+ sys.argv = filter(lambda a,flags=setup_flags:a not in flags,sys.argv)
+ if '--quiet' in f2py_flags:
+ setup_flags.append('--quiet')
+
+ modulename = 'untitled'
+ sources = sys.argv[1:]
+ if '-m' in sys.argv:
+ i = sys.argv.index('-m')
+ modulename = sys.argv[i+1]
+ del sys.argv[i+1],sys.argv[i]
+ sources = sys.argv[1:]
+ else:
+ from scipy.distutils.command.build_src import get_f2py_modulename
+ pyf_files,sources = filter_files('','[.]pyf([.]src|)',sources)
+ sources = pyf_files + sources
+ for f in pyf_files:
+ modulename = get_f2py_modulename(f)
+ if modulename:
+ break
+
+ extra_objects, sources = filter_files('','[.](o|a|so)',sources)
+ include_dirs, sources = filter_files('-I','',sources,remove_prefix=1)
+ library_dirs, sources = filter_files('-L','',sources,remove_prefix=1)
+ libraries, sources = filter_files('-l','',sources,remove_prefix=1)
+ undef_macros, sources = filter_files('-U','',sources,remove_prefix=1)
+ define_macros, sources = filter_files('-D','',sources,remove_prefix=1)
+ using_numarray = 0
+ using_numeric = 0
+ for i in range(len(define_macros)):
+ name_value = string.split(define_macros[i],'=',1)
+ if len(name_value)==1:
+ name_value.append(None)
+ if len(name_value)==2:
+ define_macros[i] = tuple(name_value)
+ else:
+ print 'Invalid use of -D:',name_value
+
+ from scipy.distutils.system_info import get_info
+
+ num_include_dir = None
+ num_info = {}
+ #import scipy
+ #n = 'scipy'
+ #p = get_prefix(scipy)
+ #from scipy.distutils.misc_util import get_scipy_include_dirs
+ #num_info = {'include_dirs': get_scipy_include_dirs()}
+
+ if num_info:
+ include_dirs.extend(num_info.get('include_dirs',[]))
+
+ from scipy.distutils.core import setup,Extension
+ ext_args = {'name':modulename,'sources':sources,
+ 'include_dirs': include_dirs,
+ 'library_dirs': library_dirs,
+ 'libraries': libraries,
+ 'define_macros': define_macros,
+ 'undef_macros': undef_macros,
+ 'extra_objects': extra_objects,
+ 'f2py_options': f2py_flags,
+ }
+
+ if sysinfo_flags:
+ from scipy.distutils.misc_util import dict_append
+ for n in sysinfo_flags:
+ i = get_info(n)
+ if not i:
+ outmess('No %s resources found in system'\
+ ' (try `f2py --help-link`)\n' % (`n`))
+ dict_append(ext_args,**i)
+
+ ext = Extension(**ext_args)
+ sys.argv = [sys.argv[0]] + setup_flags
+ sys.argv.extend(['build',
+ '--build-temp',build_dir,
+ '--build-base',build_dir,
+ '--build-platlib','.'])
+ if fc_flags:
+ sys.argv.extend(['config_fc']+fc_flags)
+ if flib_flags:
+ sys.argv.extend(['build_ext']+flib_flags)
+
+ setup(ext_modules = [ext])
+
+ if remove_build_dir and os.path.exists(build_dir):
+ outmess('Removing build directory %s\n'%(build_dir))
+ shutil.rmtree(build_dir)
+
+def main():
+ if '--help-link' in sys.argv[1:]:
+ sys.argv.remove('--help-link')
+ from scipy.distutils.system_info import show_all
+ show_all()
+ return
+ if '-c' in sys.argv[1:]:
+ run_compile()
+ else:
+ run_main(sys.argv[1:])
+
+#if __name__ == "__main__":
+# main()
+
+
+# EOF
+
diff --git a/numpy/f2py/f2py_testing.py b/numpy/f2py/f2py_testing.py
new file mode 100644
index 000000000..1126c3085
--- /dev/null
+++ b/numpy/f2py/f2py_testing.py
@@ -0,0 +1,74 @@
+
+import os,sys,re,time
+
+def cmdline():
+ m=re.compile(r'\A\d+\Z')
+ args = []
+ repeat = 1
+ for a in sys.argv[1:]:
+ if m.match(a):
+ repeat = eval(a)
+ else:
+ args.append(a)
+ f2py_opts = ' '.join(args)
+ return repeat,f2py_opts
+
+if sys.platform[:5]=='linux':
+ def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),
+ _load_time=time.time()):
+ """ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc. """
+ try:
+ f=open(_proc_pid_stat,'r')
+ l = f.readline().split(' ')
+ f.close()
+ return int(l[13])
+ except:
+ return int(100*(time.time()-_load_time))
+
+ def memusage(_proc_pid_stat = '/proc/%s/stat'%(os.getpid())):
+ """ Return virtual memory size in bytes of the running python.
+ """
+ try:
+ f=open(_proc_pid_stat,'r')
+ l = f.readline().split(' ')
+ f.close()
+ return int(l[22])
+ except:
+ return
+else:
+ def jiffies(_load_time=time.time()):
+ """ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. [Emulation with time.time]. """
+ return int(100*(time.time()-_load_time))
+
+ def memusage():
+ pass
+
+def run(runtest,test_functions,repeat=1):
+ l = [(t,repr(t.__doc__.split('\n')[1].strip())) for t in test_functions]
+ #l = [(t,'') for t in test_functions]
+ start_memusage = memusage()
+ diff_memusage = None
+ start_jiffies = jiffies()
+ i = 0
+ while i<repeat:
+ i += 1
+ for t,fname in l:
+ runtest(t)
+ if start_memusage is None: continue
+ if diff_memusage is None:
+ diff_memusage = memusage() - start_memusage
+ else:
+ diff_memusage2 = memusage() - start_memusage
+ if diff_memusage2!=diff_memusage:
+ print 'memory usage change at step %i:' % i,\
+ diff_memusage2-diff_memusage,\
+ fname
+ diff_memusage = diff_memusage2
+ current_memusage = memusage()
+ print 'run',repeat*len(test_functions),'tests',\
+ 'in %.2f seconds' % ((jiffies()-start_jiffies)/100.0)
+ if start_memusage:
+ print 'initial virtual memory size:',start_memusage,'bytes'
+ print 'current virtual memory size:',current_memusage,'bytes'
diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py
new file mode 100644
index 000000000..9c0d54900
--- /dev/null
+++ b/numpy/f2py/f90mod_rules.py
@@ -0,0 +1,240 @@
+#!/usr/bin/env python
+"""
+
+Build F90 module support for f2py2e.
+
+Copyright 2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/02/03 19:30:23 $
+Pearu Peterson
+"""
+
+__version__ = "$Revision: 1.27 $"[10:-1]
+
+f2py_version='See `f2py -v`'
+
+import pprint
+import sys,string,time,types,copy
+errmess=sys.stderr.write
+outmess=sys.stdout.write
+show=pprint.pprint
+
+from auxfuncs import *
+import capi_maps
+import cfuncs
+import rules
+import func2subr
+from crackfortran import undo_rmbadname, undo_rmbadname1
+
+options={}
+
+def findf90modules(m):
+ if ismodule(m): return [m]
+ if not hasbody(m): return []
+ ret = []
+ for b in m['body']:
+ if ismodule(b): ret.append(b)
+ else: ret=ret+findf90modules(b)
+ return ret
+
+fgetdims1 = """\
+ external f2pysetdata
+ logical ns
+ integer s(*),r,i,j
+ ns = .FALSE.
+ if (allocated(d)) then
+ do i=1,r
+ if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then
+ ns = .TRUE.
+ end if
+ end do
+ if (ns) then
+ deallocate(d)
+ end if
+ end if
+ if ((.not.allocated(d)).and.(s(1).ge.1)) then"""
+
+fgetdims2="""\
+ end if
+ if (allocated(d)) then
+ do i=1,r
+ s(i) = size(d,i)
+ end do
+ end if
+ flag = 1
+ call f2pysetdata(d,allocated(d))"""
+
+fgetdims2_sa="""\
+ end if
+ if (allocated(d)) then
+ do i=1,r
+ s(i) = size(d,i)
+ end do
+ !s(r) must be equal to len(d(1))
+ end if
+ flag = 2
+ call f2pysetdata(d,allocated(d))"""
+
+
+def buildhooks(pymod):
+ global fgetdims1,fgetdims2
+ ret = {'f90modhooks':[],'initf90modhooks':[],'body':[],
+ 'need':['F_FUNC','arrayobject.h'],
+ 'separatorsfor':{'includes0':'\n','includes':'\n'},
+ 'docs':['"Fortran 90/95 modules:\\n"'],
+ 'latexdoc':[]}
+ fhooks=['']
+ def fadd(line,s=fhooks): s[0] = '%s\n %s'%(s[0],line)
+ doc = ['']
+ def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0],line)
+ for m in findf90modules(pymod):
+ sargs,fargs,efargs,modobjs,notvars,onlyvars=[],[],[],[],[m['name']],[]
+ sargsp = []
+ ifargs = []
+ mfargs = []
+ if hasbody(m):
+ for b in m['body']: notvars.append(b['name'])
+ for n in m['vars'].keys():
+ var = m['vars'][n]
+ if (n not in notvars) and (not l_or(isintent_hide,isprivate)(var)):
+ onlyvars.append(n)
+ mfargs.append(n)
+ outmess('\t\tConstructing F90 module support for "%s"...\n'%(m['name']))
+ if onlyvars:
+ outmess('\t\t Variables: %s\n'%(string.join(onlyvars)))
+ chooks=['']
+ def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0],line)
+ ihooks=['']
+ def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line)
+
+ vrd=capi_maps.modsign2map(m)
+ cadd('static FortranDataDef f2py_%s_def[] = {'%(m['name']))
+ dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n'%(m['name']))
+ if hasnote(m):
+ note = m['note']
+ if type(note) is type([]): note=string.join(note,'\n')
+ dadd(note)
+ if onlyvars:
+ dadd('\\begin{description}')
+ for n in onlyvars:
+ var = m['vars'][n]
+ modobjs.append(n)
+ ct = capi_maps.getctype(var)
+ at = capi_maps.c2capi_map[ct]
+ dm = capi_maps.getarrdims(n,var)
+ dms = string.strip(string.replace(dm['dims'],'*','-1'))
+ dms = string.strip(string.replace(dms,':','-1'))
+ if not dms: dms='-1'
+ use_fgetdims2 = fgetdims2
+ if isstringarray(var):
+ if var.has_key('charselector') and var['charselector'].has_key('len'):
+ cadd('\t{"%s",%s,{{%s,%s}},%s},'\
+ %(undo_rmbadname1(n),dm['rank'],dms,var['charselector']['len'],at))
+ use_fgetdims2 = fgetdims2_sa
+ else:
+ cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n),dm['rank'],dms,at))
+ else:
+ cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n),dm['rank'],dms,at))
+ dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,var)))
+ if hasnote(var):
+ note = var['note']
+ if type(note) is type([]): note=string.join(note,'\n')
+ dadd('--- %s'%(note))
+ if isallocatable(var):
+ fargs.append('f2py_%s_getdims_%s'%(m['name'],n))
+ efargs.append(fargs[-1])
+ sargs.append('void (*%s)(int*,int*,void(*)(char*,int*),int*)'%(n))
+ sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)')
+ iadd('\tf2py_%s_def[i_f2py++].func = %s;'%(m['name'],n))
+ fadd('subroutine %s(r,s,f2pysetdata,flag)'%(fargs[-1]))
+ fadd('use %s, only: d => %s\n'%(m['name'],undo_rmbadname1(n)))
+ fadd('integer flag\n')
+ fhooks[0]=fhooks[0]+fgetdims1
+ dms = eval('range(1,%s+1)'%(dm['rank']))
+ fadd(' allocate(d(%s))\n'%(string.join(map(lambda i:'s(%s)'%i,dms),',')))
+ fhooks[0]=fhooks[0]+use_fgetdims2
+ fadd('end subroutine %s'%(fargs[-1]))
+ else:
+ fargs.append(n)
+ sargs.append('char *%s'%(n))
+ sargsp.append('char*')
+ iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'],n))
+ if onlyvars:
+ dadd('\\end{description}')
+ if hasbody(m):
+ for b in m['body']:
+ if not isroutine(b):
+ print 'Skipping',b['block'],b['name']
+ continue
+ modobjs.append('%s()'%(b['name']))
+ b['modulename'] = m['name']
+ api,wrap=rules.buildapi(b)
+ if isfunction(b):
+ fhooks[0]=fhooks[0]+wrap
+ fargs.append('f2pywrap_%s_%s'%(m['name'],b['name']))
+ #efargs.append(fargs[-1])
+ ifargs.append(func2subr.createfuncwrapper(b,signature=1))
+ else:
+ fargs.append(b['name'])
+ mfargs.append(fargs[-1])
+ #if options.has_key('--external-modroutines') and options['--external-modroutines']:
+ # outmess('\t\t\tapplying --external-modroutines for %s\n'%(b['name']))
+ # efargs.append(fargs[-1])
+ api['externroutines']=[]
+ ar=applyrules(api,vrd)
+ ar['docs']=[]
+ ar['docshort']=[]
+ ret=dictappend(ret,ar)
+ cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},'%(b['name'],m['name'],b['name'],m['name'],b['name']))
+ sargs.append('char *%s'%(b['name']))
+ sargsp.append('char *')
+ iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'],b['name']))
+ cadd('\t{NULL}\n};\n')
+ iadd('}')
+ ihooks[0]='static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s'%(m['name'],string.join(sargs,','),ihooks[0])
+ if '_' in m['name']:
+ F_FUNC='F_FUNC_US'
+ else:
+ F_FUNC='F_FUNC'
+ iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));'\
+ %(F_FUNC,m['name'],string.upper(m['name']),string.join(sargsp,',')))
+ iadd('static void f2py_init_%s(void) {'%(m['name']))
+ iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\
+ %(F_FUNC,m['name'],string.upper(m['name']),m['name']))
+ iadd('}\n')
+ ret['f90modhooks']=ret['f90modhooks']+chooks+ihooks
+ ret['initf90modhooks']=['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(m['name'],m['name'],m['name'])]+ret['initf90modhooks']
+ fadd('')
+ fadd('subroutine f2pyinit%s(f2pysetupfunc)'%(m['name']))
+ #fadd('use %s'%(m['name']))
+ if mfargs:
+ for a in undo_rmbadname(mfargs):
+ fadd('use %s, only : %s'%(m['name'],a))
+ if ifargs:
+ fadd(string.join(['interface']+ifargs))
+ fadd('end interface')
+ fadd('external f2pysetupfunc')
+ if efargs:
+ for a in undo_rmbadname(efargs):
+ fadd('external %s'%(a))
+ fadd('call f2pysetupfunc(%s)'%(string.join(undo_rmbadname(fargs),',')))
+ fadd('end subroutine f2pyinit%s\n'%(m['name']))
+
+ dadd(string.replace(string.join(ret['latexdoc'],'\n'),r'\subsection{',r'\subsubsection{'))
+
+ ret['latexdoc']=[]
+ ret['docs'].append('"\t%s --- %s"'%(m['name'],
+ string.join(undo_rmbadname(modobjs),',')))
+
+ ret['routine_defs']=''
+ ret['doc']=[]
+ ret['docshort']=[]
+ ret['latexdoc']=doc[0]
+ if len(ret['docs'])<=1: ret['docs']=''
+ return ret,fhooks[0]
+
+
diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py
new file mode 100644
index 000000000..4039c9996
--- /dev/null
+++ b/numpy/f2py/func2subr.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+"""
+
+Rules for building C/API module with f2py2e.
+
+Copyright 1999,2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2004/11/26 11:13:06 $
+Pearu Peterson
+"""
+
+__version__ = "$Revision: 1.16 $"[10:-1]
+
+f2py_version='See `f2py -v`'
+
+import pprint,copy
+import sys,string,time,types,copy
+errmess=sys.stderr.write
+outmess=sys.stdout.write
+show=pprint.pprint
+
+from auxfuncs import *
+def var2fixfortran(vars,a,fa=None,f90mode=None):
+ if fa is None:
+ fa = a
+ if not vars.has_key(a):
+ show(vars)
+ outmess('var2fixfortran: No definition for argument "%s".\n'%a)
+ return ''
+ if not vars[a].has_key('typespec'):
+ show(vars[a])
+ outmess('var2fixfortran: No typespec for argument "%s".\n'%a)
+ return ''
+ vardef=vars[a]['typespec']
+ if vardef=='type' and vars[a].has_key('typename'):
+ vardef='%s(%s)'%(vardef,vars[a]['typename'])
+ selector={}
+ lk = ''
+ if vars[a].has_key('kindselector'):
+ selector=vars[a]['kindselector']
+ lk = 'kind'
+ elif vars[a].has_key('charselector'):
+ selector=vars[a]['charselector']
+ lk = 'len'
+ if selector.has_key('*'):
+ if f90mode:
+ if selector['*'] in ['*',':','(*)']:
+ vardef='%s(len=*)'%(vardef)
+ else:
+ vardef='%s(%s=%s)'%(vardef,lk,selector['*'])
+ else:
+ if selector['*'] in ['*',':']:
+ vardef='%s*(%s)'%(vardef,selector['*'])
+ else:
+ vardef='%s*%s'%(vardef,selector['*'])
+ else:
+ if selector.has_key('len'):
+ vardef='%s(len=%s'%(vardef,selector['len'])
+ if selector.has_key('kind'):
+ vardef='%s,kind=%s)'%(vardef,selector['kind'])
+ else:
+ vardef='%s)'%(vardef)
+ elif selector.has_key('kind'):
+ vardef='%s(kind=%s)'%(vardef,selector['kind'])
+
+ vardef='%s %s'%(vardef,fa)
+ if vars[a].has_key('dimension'):
+ vardef='%s(%s)'%(vardef,string.join(vars[a]['dimension'],','))
+ return vardef
+
+def createfuncwrapper(rout,signature=0):
+ assert isfunction(rout)
+ ret = ['']
+ def add(line,ret=ret):
+ ret[0] = '%s\n %s'%(ret[0],line)
+ name = rout['name']
+ fortranname = getfortranname(rout)
+ f90mode = ismoduleroutine(rout)
+ newname = '%sf2pywrap'%(name)
+ vars = rout['vars']
+ if not vars.has_key(newname):
+ vars[newname] = vars[name]
+ args = [newname]+rout['args'][1:]
+ else:
+ args = [newname]+rout['args']
+
+ l = var2fixfortran(vars,name,newname,f90mode)
+ return_char_star = 0
+ if l[:13]=='character*(*)':
+ return_char_star = 1
+ if f90mode: l = 'character(len=10)'+l[13:]
+ else: l = 'character*10'+l[13:]
+ charselect = vars[name]['charselector']
+ if charselect.get('*','')=='(*)':
+ charselect['*'] = '10'
+ if f90mode:
+ sargs = string.join(args,', ')
+ add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'],name,sargs))
+ if not signature:
+ add('use %s, only : %s'%(rout['modulename'],fortranname))
+ else:
+ add('subroutine f2pywrap%s (%s)'%(name,string.join(args,', ')))
+ add('external %s'%(fortranname))
+ #if not return_char_star:
+ l = l + ', '+fortranname
+ args = args[1:]
+ dumped_args = []
+ for a in args:
+ if isexternal(vars[a]):
+ add('external %s'%(a))
+ dumped_args.append(a)
+ for a in args:
+ if a in dumped_args: continue
+ if isscalar(vars[a]):
+ add(var2fixfortran(vars,a,f90mode=f90mode))
+ dumped_args.append(a)
+ for a in args:
+ if a in dumped_args: continue
+ add(var2fixfortran(vars,a,f90mode=f90mode))
+
+ add(l)
+
+ if not signature:
+ if islogicalfunction(rout):
+ add('%s = .not.(.not.%s(%s))'%(newname,fortranname,string.join(args,', ')))
+ else:
+ add('%s = %s(%s)'%(newname,fortranname,string.join(args,', ')))
+ if f90mode:
+ add('end subroutine f2pywrap_%s_%s'%(rout['modulename'],name))
+ else:
+ add('end')
+ #print '**'*10
+ #print ret[0]
+ #print '**'*10
+ return ret[0]
+
+def assubr(rout):
+ if not isfunction_wrap(rout): return rout,''
+ fortranname = getfortranname(rout)
+ name = rout['name']
+ outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n'%(name,fortranname))
+ rout = copy.copy(rout)
+ fname = name
+ rname = fname
+ if rout.has_key('result'):
+ rname = rout['result']
+ rout['vars'][fname]=rout['vars'][rname]
+ fvar = rout['vars'][fname]
+ if not isintent_out(fvar):
+ if not fvar.has_key('intent'): fvar['intent']=[]
+ fvar['intent'].append('out')
+ flag=1
+ for i in fvar['intent']:
+ if i.startswith('out='):
+ flag = 0
+ break
+ if flag:
+ fvar['intent'].append('out=%s' % (rname))
+
+ rout['args'] = [fname] + rout['args']
+ return rout,createfuncwrapper(rout)
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
new file mode 100644
index 000000000..cb2d78e65
--- /dev/null
+++ b/numpy/f2py/rules.py
@@ -0,0 +1,1345 @@
+#!/usr/bin/env python
+"""
+
+Rules for building C/API module with f2py2e.
+
+Here is a skeleton of a new wrapper function (13Dec2001):
+
+wrapper_function(args)
+ declarations
+ get_python_arguments, say, `a' and `b'
+
+ get_a_from_python
+ if (successful) {
+
+ get_b_from_python
+ if (successful) {
+
+ callfortran
+ if (succesful) {
+
+ put_a_to_python
+ if (succesful) {
+
+ put_b_to_python
+ if (succesful) {
+
+ buildvalue = ...
+
+ }
+
+ }
+
+ }
+
+ }
+ cleanup_b
+
+ }
+ cleanup_a
+
+ return buildvalue
+"""
+"""
+Copyright 1999,2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/08/30 08:58:42 $
+Pearu Peterson
+"""
+
+__version__ = "$Revision: 1.129 $"[10:-1]
+
+import __version__
+f2py_version = __version__.version
+
+import pprint
+import sys,string,time,types,copy
+errmess=sys.stderr.write
+outmess=sys.stdout.write
+show=pprint.pprint
+
+from auxfuncs import *
+import capi_maps
+from capi_maps import *
+import cfuncs
+import common_rules
+import use_rules
+import f90mod_rules
+import func2subr
+options={}
+
+sepdict={}
+#for k in ['need_cfuncs']: sepdict[k]=','
+for k in ['decl',
+ 'frompyobj',
+ 'cleanupfrompyobj',
+ 'topyarr','method',
+ 'pyobjfrom','closepyobjfrom',
+ 'freemem',
+ 'userincludes',
+ 'includes0','includes','typedefs','typedefs_generated',
+ 'cppmacros','cfuncs','callbacks',
+ 'latexdoc',
+ 'restdoc',
+ 'routine_defs','externroutines',
+ 'initf2pywraphooks',
+ 'commonhooks','initcommonhooks',
+ 'f90modhooks','initf90modhooks']:
+ sepdict[k]='\n'
+
+#################### Rules for C/API module #################
+
+module_rules={
+ 'modulebody':"""\
+/* File: #modulename#module.c
+ * This file is auto-generated with f2py (version:#f2py_version#).
+ * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
+ * written by Pearu Peterson <pearu@cens.ioc.ee>.
+ * See http://cens.ioc.ee/projects/f2py2e/
+ * Generation date: """+time.asctime(time.localtime(time.time()))+"""
+ * $R"""+"""evision:$
+ * $D"""+"""ate:$
+ * Do not edit this file directly unless you know what you are doing!!!
+ */
+#ifdef __cplusplus
+extern \"C\" {
+#endif
+
+"""+gentitle("See f2py2e/cfuncs.py: includes")+"""
+#includes#
+#includes0#
+
+"""+gentitle("See f2py2e/rules.py: mod_rules['modulebody']")+"""
+static PyObject *#modulename#_error;
+static PyObject *#modulename#_module;
+
+"""+gentitle("See f2py2e/cfuncs.py: typedefs")+"""
+#typedefs#
+
+"""+gentitle("See f2py2e/cfuncs.py: typedefs_generated")+"""
+#typedefs_generated#
+
+"""+gentitle("See f2py2e/cfuncs.py: cppmacros")+"""
+#cppmacros#
+
+"""+gentitle("See f2py2e/cfuncs.py: cfuncs")+"""
+#cfuncs#
+
+"""+gentitle("See f2py2e/cfuncs.py: userincludes")+"""
+#userincludes#
+
+"""+gentitle("See f2py2e/capi_rules.py: usercode")+"""
+#usercode#
+
+/* See f2py2e/rules.py */
+#externroutines#
+
+"""+gentitle("See f2py2e/capi_rules.py: usercode1")+"""
+#usercode1#
+
+"""+gentitle("See f2py2e/cb_rules.py: buildcallback")+"""
+#callbacks#
+
+"""+gentitle("See f2py2e/rules.py: buildapi")+"""
+#body#
+
+"""+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+"""
+#f90modhooks#
+
+"""+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+"""
+
+"""+gentitle("See f2py2e/common_rules.py: buildhooks")+"""
+#commonhooks#
+
+"""+gentitle("See f2py2e/rules.py")+"""
+
+static FortranDataDef f2py_routine_defs[] = {
+#routine_defs#
+\t{NULL}
+};
+
+static PyMethodDef f2py_module_methods[] = {
+#pymethoddef#
+\t{NULL,NULL}
+};
+
+DL_EXPORT(void) init#modulename#(void) {
+\tint i;
+\tPyObject *m,*d, *s;
+\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods);
+\tPyFortran_Type.ob_type = &PyType_Type;
+\timport_array();
+\tif (PyErr_Occurred())
+\t\tPy_FatalError(\"can't initialize module #modulename# (failed to import scipy.base)\");
+\td = PyModule_GetDict(m);
+\ts = PyString_FromString(\"$R"""+"""evision: $\");
+\tPyDict_SetItemString(d, \"__version__\", s);
+\ts = PyString_FromString(\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
+\tPyDict_SetItemString(d, \"__doc__\", s);
+\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
+\tPy_DECREF(s);
+\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++)
+\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i]));
+#initf2pywraphooks#
+#initf90modhooks#
+#initcommonhooks#
+#interface_usercode#
+\tif (PyErr_Occurred())
+\t\tPy_FatalError(\"can't initialize module #modulename#\");
+
+#ifdef F2PY_REPORT_ATEXIT
+\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
+#endif
+
+}
+#ifdef __cplusplus
+}
+#endif
+""",
+ 'separatorsfor':{'latexdoc':'\n\n',
+ 'restdoc':'\n\n'},
+ 'latexdoc':['\\section{Module \\texttt{#texmodulename#}}\n',
+ '#modnote#\n',
+ '#latexdoc#'],
+ 'restdoc':['Module #modulename#\n'+'='*80,
+ '\n#restdoc#']
+ }
+
+defmod_rules=[
+ {'body':'/*eof body*/',
+ 'method':'/*eof method*/',
+ 'externroutines':'/*eof externroutines*/',
+ 'routine_defs':'/*eof routine_defs*/',
+ 'initf90modhooks':'/*eof initf90modhooks*/',
+ 'initf2pywraphooks':'/*eof initf2pywraphooks*/',
+ 'initcommonhooks':'/*eof initcommonhooks*/',
+ 'latexdoc':'',
+ 'restdoc':'',
+ 'modnote':{hasnote:'#note#',l_not(hasnote):''},
+ }
+ ]
+
+routine_rules={
+ 'separatorsfor':sepdict,
+ 'body':"""
+#begintitle#
+static char doc_#apiname#[] = \"\\\nFunction signature:\\n\\\n\t#docreturn##name#(#docsignatureshort#)\\n\\\n#docstrsigns#\";
+/* #declfortranroutine# */
+static PyObject *#apiname#(const PyObject *capi_self,
+ PyObject *capi_args,
+ PyObject *capi_keywds,
+ #functype# (*f2py_func)(#callprotoargument#)) {
+\tPyObject * volatile capi_buildvalue = NULL;
+\tvolatile int f2py_success = 1;
+#decl#
+\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
+#usercode#
+#routdebugenter#
+#ifdef F2PY_REPORT_ATEXIT
+f2py_start_clock();
+#endif
+\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
+\t\t\"#argformat#|#keyformat##xaformat#:#pyname#\",\\
+\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
+#frompyobj#
+/*end of frompyobj*/
+#ifdef F2PY_REPORT_ATEXIT
+f2py_start_call_clock();
+#endif
+#callfortranroutine#
+if (PyErr_Occurred())
+ f2py_success = 0;
+#ifdef F2PY_REPORT_ATEXIT
+f2py_stop_call_clock();
+#endif
+/*end of callfortranroutine*/
+\t\tif (f2py_success) {
+#pyobjfrom#
+/*end of pyobjfrom*/
+\t\tCFUNCSMESS(\"Building return value.\\n\");
+\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
+/*closepyobjfrom*/
+#closepyobjfrom#
+\t\t} /*if (f2py_success) after callfortranroutine*/
+/*cleanupfrompyobj*/
+#cleanupfrompyobj#
+\tif (capi_buildvalue == NULL) {
+#routdebugfailure#
+\t} else {
+#routdebugleave#
+\t}
+\tCFUNCSMESS(\"Freeing memory.\\n\");
+#freemem#
+#ifdef F2PY_REPORT_ATEXIT
+f2py_stop_clock();
+#endif
+\treturn capi_buildvalue;
+}
+#endtitle#
+""",
+ 'routine_defs':'#routine_def#',
+ 'initf2pywraphooks':'#initf2pywraphook#',
+ 'externroutines':'#declfortranroutine#',
+ 'doc':'#docreturn##name#(#docsignature#)',
+ 'docshort':'#docreturn##name#(#docsignatureshort#)',
+ 'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n',
+ 'need':['arrayobject.h','CFUNCSMESS','MINMAX'],
+ 'cppmacros':{debugcapi:'#define DEBUGCFUNCS'},
+ 'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n',
+ """
+\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
+#routnote#
+
+#latexdocstrsigns#
+"""],
+ 'restdoc':['Wrapped function ``#name#``\n'+'-'*80,
+
+ ]
+ }
+
+################## Rules for C/API function ##############
+
+rout_rules=[
+ { # Init
+ 'separatorsfor': {'callfortranroutine':'\n','routdebugenter':'\n','decl':'\n',
+ 'routdebugleave':'\n','routdebugfailure':'\n',
+ 'setjmpbuf':' || ',
+ 'docstrreq':'\n','docstropt':'\n','docstrout':'\n',
+ 'docstrcbs':'\n','docstrsigns':'\\n"\n"',
+ 'latexdocstrsigns':'\n',
+ 'latexdocstrreq':'\n','latexdocstropt':'\n',
+ 'latexdocstrout':'\n','latexdocstrcbs':'\n',
+ },
+ 'kwlist':'','kwlistopt':'','callfortran':'','callfortranappend':'',
+ 'docsign':'','docsignopt':'','decl':'/*decl*/',
+ 'freemem':'/*freemem*/',
+ 'docsignshort':'','docsignoptshort':'',
+ 'docstrsigns':'','latexdocstrsigns':'',
+ 'docstrreq':'Required arguments:',
+ 'docstropt':'Optional arguments:',
+ 'docstrout':'Return objects:',
+ 'docstrcbs':'Call-back functions:',
+ 'latexdocstrreq':'\\noindent Required arguments:',
+ 'latexdocstropt':'\\noindent Optional arguments:',
+ 'latexdocstrout':'\\noindent Return objects:',
+ 'latexdocstrcbs':'\\noindent Call-back functions:',
+ 'args_capi':'','keys_capi':'','functype':'',
+ 'frompyobj':'/*frompyobj*/',
+ 'cleanupfrompyobj':['/*end of cleanupfrompyobj*/'], #this list will be reversed
+ 'pyobjfrom':'/*pyobjfrom*/',
+ 'closepyobjfrom':['/*end of closepyobjfrom*/'], #this list will be reversed
+ 'topyarr':'/*topyarr*/','routdebugleave':'/*routdebugleave*/',
+ 'routdebugenter':'/*routdebugenter*/',
+ 'routdebugfailure':'/*routdebugfailure*/',
+ 'callfortranroutine':'/*callfortranroutine*/',
+ 'argformat':'','keyformat':'','need_cfuncs':'',
+ 'docreturn':'','return':'','returnformat':'','rformat':'',
+ 'kwlistxa':'','keys_xa':'','xaformat':'','docsignxa':'','docsignxashort':'',
+ 'initf2pywraphook':'',
+ 'routnote':{hasnote:'--- #note#',l_not(hasnote):''},
+ },{
+ 'apiname':'f2py_rout_#modulename#_#name#',
+ 'pyname':'#modulename#.#name#',
+ 'decl':'',
+ '_check':l_not(ismoduleroutine)
+ },{
+ 'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#',
+ 'pyname':'#modulename#.#f90modulename#.#name#',
+ 'decl':'',
+ '_check':ismoduleroutine
+ },{ # Subroutine
+ 'functype':'void',
+ 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
+ l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);',
+ ismoduleroutine:'',
+ isdummyroutine:''
+ },
+ 'routine_def':{l_not(l_or(ismoduleroutine,isintent_c,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine),isdummyroutine):'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ },
+ 'need':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'F_FUNC'},
+ 'callfortranroutine':[
+ {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
+ {hasexternals:"""\
+\t\tif (#setjmpbuf#) {
+\t\t\tf2py_success = 0;
+\t\t} else {"""},
+ {isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'},
+ {hascallstatement:'''\t\t\t\t#callstatement#;
+\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
+ {l_not(l_or(hascallstatement,isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'},
+ {isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'},
+ {hasexternals:"""\t\t}"""}
+ ],
+ '_check':issubroutine,
+ },{ # Wrapped function
+ 'functype':'void',
+ 'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
+ isdummyroutine:'',
+ },
+
+ 'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ },
+ 'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):'''
+ {
+ extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
+ PyObject* o = PyDict_GetItemString(d,"#name#");
+ PyObject_SetAttrString(o,"_cpointer", PyCObject_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
+ }
+ '''},
+ 'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']},
+ 'callfortranroutine':[
+ {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
+ {hasexternals:"""\
+\tif (#setjmpbuf#) {
+\t\tf2py_success = 0;
+\t} else {"""},
+ {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
+ {l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
+ {hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
+ {isthreadsafe:'\tPy_END_ALLOW_THREADS'},
+ {hasexternals:'\t}'}
+ ],
+ '_check':isfunction_wrap,
+ },{ # Function
+ 'functype':'#ctype#',
+ 'docreturn':{l_not(isintent_hide):'#rname#,'},
+ 'docstrout':'\t#pydocsignout#',
+ 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}',
+ {hasresultnote:'--- #resultnote#'}],
+ 'callfortranroutine':[{l_and(debugcapi,isstringfunction):"""\
+#ifdef USESCOMPAQFORTRAN
+\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
+#else
+\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
+#endif
+"""},
+ {l_and(debugcapi,l_not(isstringfunction)):"""\
+\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
+"""}
+ ],
+ '_check':l_and(isfunction,l_not(isfunction_wrap))
+ },{ # Scalar function
+ 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
+ l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);',
+ isdummyroutine:''
+ },
+ 'routine_def':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
+ isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ },
+ 'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};',
+ l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'},
+ {iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'}
+ ],
+ 'callfortranroutine':[
+ {hasexternals:"""\
+\tif (#setjmpbuf#) {
+\t\tf2py_success = 0;
+\t} else {"""},
+ {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
+ {hascallstatement:'''\t#callstatement#;
+/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
+'''},
+ {l_not(l_or(hascallstatement,isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'},
+ {isthreadsafe:'\tPy_END_ALLOW_THREADS'},
+ {hasexternals:'\t}'},
+ {l_and(debugcapi,iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
+ {l_and(debugcapi,l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
+ 'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
+ 'need':[{l_not(isdummyroutine):'F_FUNC'},
+ {iscomplexfunction:'pyobj_from_#ctype#1'},
+ {islong_longfunction:'long_long'},
+ {islong_doublefunction:'long_double'}],
+ 'returnformat':{l_not(isintent_hide):'#rformat#'},
+ 'return':{iscomplexfunction:',#name#_return_value_capi',
+ l_not(l_or(iscomplexfunction,isintent_hide)):',#name#_return_value'},
+ '_check':l_and(isfunction,l_not(isstringfunction),l_not(isfunction_wrap))
+ },{ # String function # in use for --no-wrap
+ 'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
+ 'routine_def':{l_not(l_or(ismoduleroutine,isintent_c)):
+# '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},',
+ '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine),isintent_c):
+# '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},'
+ '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
+ },
+ 'decl':['\t#ctype# #name#_return_value = NULL;',
+ '\tint #name#_return_value_len = 0;'],
+ 'callfortran':'#name#_return_value,#name#_return_value_len,',
+ 'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
+ '\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
+ '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
+ '\t\tf2py_success = 0;',
+ '\t} else {',
+ "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
+ '\t}',
+ '\tif (f2py_success) {',
+ {hasexternals:"""\
+\t\tif (#setjmpbuf#) {
+\t\t\tf2py_success = 0;
+\t\t} else {"""},
+ {isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'},
+ """\
+#ifdef USESCOMPAQFORTRAN
+\t\t(*f2py_func)(#callcompaqfortran#);
+#else
+\t\t(*f2py_func)(#callfortran#);
+#endif
+""",
+ {isthreadsafe:'\t\tPy_END_ALLOW_THREADS'},
+ {hasexternals:'\t\t}'},
+ {debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
+ '\t} /* if (f2py_success) after (string)malloc */',
+ ],
+ 'returnformat':'#rformat#',
+ 'return':',#name#_return_value',
+ 'freemem':'\tSTRINGFREE(#name#_return_value);',
+ 'need':['F_FUNC','#ctype#','STRINGFREE'],
+ '_check':l_and(isstringfunction,l_not(isfunction_wrap)) # ???obsolete
+ },
+ { # Debugging
+ 'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
+ 'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
+ 'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
+ '_check':debugcapi
+ }
+ ]
+
+################ Rules for arguments ##################
+
+typedef_need_dict = {islong_long:'long_long',
+ islong_double:'long_double',
+ islong_complex:'complex_long_double',
+ isunsigned_char:'unsigned_char',
+ isunsigned_short:'unsigned_short',
+ isunsigned:'unsigned',
+ isunsigned_long_long:'unsigned_long_long'}
+
+aux_rules=[
+ {
+ 'separatorsfor':sepdict
+ },
+ { # Common
+ 'frompyobj':['\t/* Processing auxiliary variable #varname# */',
+ {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
+ 'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
+ 'need':typedef_need_dict,
+ },
+# Scalars (not complex)
+ { # Common
+ 'decl':'\t#ctype# #varname# = 0;',
+ 'need':{hasinitvalue:'math.h'},
+ 'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
+ '_check':l_and(isscalar,l_not(iscomplex)),
+ },
+ {
+ 'return':',#varname#',
+ 'docstrout':'\t#pydocsignout#',
+ 'docreturn':'#outvarname#,',
+ 'returnformat':'#varrformat#',
+ '_check':l_and(isscalar,l_not(iscomplex),isintent_out),
+ },
+# Complex scalars
+ { # Common
+ 'decl':'\t#ctype# #varname#;',
+ 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
+ '_check':iscomplex
+ },
+# String
+ { # Common
+ 'decl':['\t#ctype# #varname# = NULL;',
+ '\tint slen(#varname#);',
+ ],
+ 'need':['len..'],
+ '_check':isstring
+ },
+# Array
+ { # Common
+ 'decl':['\t#ctype# *#varname# = NULL;',
+ '\tintp #varname#_Dims[#rank#] = {#rank*[-1]#};',
+ '\tconst int #varname#_Rank = #rank#;',
+ ],
+ 'need':['len..',{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
+ '_check':isarray
+ },
+# Scalararray
+ { # Common
+ '_check':l_and(isarray,l_not(iscomplexarray))
+ },{ # Not hidden
+ '_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
+ },
+# Integer*1 array
+ {'need':'#ctype#',
+ '_check':isint1array,
+ '_depend':''
+ },
+# Integer*-1 array
+ {'need':'#ctype#',
+ '_check':isunsigned_chararray,
+ '_depend':''
+ },
+# Integer*-2 array
+ {'need':'#ctype#',
+ '_check':isunsigned_shortarray,
+ '_depend':''
+ },
+# Integer*-8 array
+ {'need':'#ctype#',
+ '_check':isunsigned_long_longarray,
+ '_depend':''
+ },
+# Complexarray
+ {'need':'#ctype#',
+ '_check':iscomplexarray,
+ '_depend':''
+ },
+# Stringarray
+ {
+ 'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
+ 'need':'string',
+ '_check':isstringarray
+ }
+ ]
+
+arg_rules=[
+ {
+ 'separatorsfor':sepdict
+ },
+ { # Common
+ 'frompyobj':['\t/* Processing variable #varname# */',
+ {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
+ 'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
+ '_depend':'',
+ 'need':typedef_need_dict,
+ },
+# Doc signatures
+ {
+ 'docstropt':{l_and(isoptional,isintent_nothide):'\t#pydocsign#'},
+ 'docstrreq':{l_and(isrequired,isintent_nothide):'\t#pydocsign#'},
+ 'docstrout':{isintent_out:'\t#pydocsignout#'},
+ 'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
+ {hasnote:'--- #note#'}]},
+ 'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
+ {hasnote:'--- #note#'}]},
+ 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}',
+ {l_and(hasnote,isintent_hide):'--- #note#',
+ l_and(hasnote,isintent_nothide):'--- See above.'}]},
+ 'depend':''
+ },
+# Required/Optional arguments
+ {
+ 'kwlist':'"#varname#",',
+ 'docsign':'#varname#,',
+ '_check':l_and(isintent_nothide,l_not(isoptional))
+ },
+ {
+ 'kwlistopt':'"#varname#",',
+ 'docsignopt':'#varname#=#showinit#,',
+ 'docsignoptshort':'#varname#,',
+ '_check':l_and(isintent_nothide,isoptional)
+ },
+# Docstring/BuildValue
+ {
+ 'docreturn':'#outvarname#,',
+ 'returnformat':'#varrformat#',
+ '_check':isintent_out
+ },
+# Externals (call-back functions)
+ { # Common
+ 'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'},
+ 'docsignxashort':{isintent_nothide:'#varname#_extra_args,'},
+ 'docstropt':{isintent_nothide:'\t#varname#_extra_args := () input tuple'},
+ 'docstrcbs':'#cbdocstr#',
+ 'latexdocstrcbs':'\\item[] #cblatexdocstr#',
+ 'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
+ 'decl':['\tPyObject *#varname#_capi = Py_None;',
+ '\tPyTupleObject *#varname#_xa_capi = NULL;',
+ '\tPyTupleObject *#varname#_args_capi = NULL;',
+ '\tint #varname#_nofargs_capi = 0;',
+ {l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'}
+ ],
+ 'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'},
+ 'argformat':{isrequired:'O'},
+ 'keyformat':{isoptional:'O'},
+ 'xaformat':{isintent_nothide:'O!'},
+ 'args_capi':{isrequired:',&#varname#_capi'},
+ 'keys_capi':{isoptional:',&#varname#_capi'},
+ 'keys_xa':',&PyTuple_Type,&#varname#_xa_capi',
+ 'setjmpbuf':'(setjmp(#cbname#_jmpbuf))',
+ 'callfortran':{l_not(isintent_callback):'#varname#_cptr,'},
+ 'need':['#cbname#','setjmp.h'],
+ '_check':isexternal
+ },
+ {
+ 'frompyobj':[{l_not(isintent_callback):"""\
+if(PyCObject_Check(#varname#_capi)) {
+ #varname#_cptr = PyCObject_AsVoidPtr(#varname#_capi);
+} else {
+ #varname#_cptr = #cbname#;
+}
+"""},{isintent_callback:"""\
+if (#varname#_capi==Py_None) {
+ #varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
+ if (#varname#_capi) {
+ if (#varname#_xa_capi==NULL) {
+ if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
+ PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
+ if (capi_tmp)
+ #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
+ else
+ #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
+ if (#varname#_xa_capi==NULL) {
+ PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
+ return NULL;
+ }
+ }
+ }
+ }
+ if (#varname#_capi==NULL) {
+ PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
+ return NULL;
+ }
+}
+"""},
+## {l_not(isintent_callback):"""\
+## if (#varname#_capi==Py_None) {
+## printf(\"hoi\\n\");
+## }
+## """},
+"""\
+\t#varname#_nofargs_capi = #cbname#_nofargs;
+\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) {
+\t\tjmp_buf #varname#_jmpbuf;""",
+{debugcapi:["""\
+\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs);
+\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""",
+{l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
+ """\
+\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\");
+\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject);
+\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject);
+\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""",
+ ],
+'cleanupfrompyobj':
+"""\
+\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\");
+\t\t#cbname#_capi = #varname#_capi;
+\t\tPy_DECREF(#cbname#_args_capi);
+\t\t#cbname#_args_capi = #varname#_args_capi;
+\t\t#cbname#_nofargs = #varname#_nofargs_capi;
+\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf));
+\t}""",
+ 'need':['SWAP','create_cb_arglist'],
+ '_check':isexternal,
+ '_depend':''
+ },
+# Scalars (not complex)
+ { # Common
+ 'decl':'\t#ctype# #varname# = 0;',
+ 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
+ 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
+ 'return':{isintent_out:',#varname#'},
+ '_check':l_and(isscalar,l_not(iscomplex))
+ },{
+ 'need':{hasinitvalue:'math.h'},
+ '_check':l_and(isscalar,l_not(iscomplex)),
+ #'_depend':''
+ },{ # Not hidden
+ 'decl':'\tPyObject *#varname#_capi = Py_None;',
+ 'argformat':{isrequired:'O'},
+ 'keyformat':{isoptional:'O'},
+ 'args_capi':{isrequired:',&#varname#_capi'},
+ 'keys_capi':{isoptional:',&#varname#_capi'},
+ 'pyobjfrom':{isintent_inout:"""\
+\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
+\tif (f2py_success) {"""},
+ 'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
+ 'need':{isintent_inout:'try_pyarr_from_#ctype#'},
+ '_check':l_and(isscalar,l_not(iscomplex),isintent_nothide)
+ },{
+ 'frompyobj':[
+# hasinitvalue...
+# if pyobj is None:
+# varname = init
+# else
+# from_pyobj(varname)
+#
+# isoptional and noinitvalue...
+# if pyobj is not None:
+# from_pyobj(varname)
+# else:
+# varname is uninitialized
+#
+# ...
+# from_pyobj(varname)
+#
+ {hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else',
+ '_depend':''},
+ {l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)',
+ '_depend':''},
+ {l_not(islogical):'''\
+\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
+\tif (f2py_success) {'''},
+ {islogical:'''\
+\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
+\t\tf2py_success = 1;
+\tif (f2py_success) {'''},
+ ],
+ 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/',
+ 'need':{l_not(islogical):'#ctype#_from_pyobj'},
+ '_check':l_and(isscalar,l_not(iscomplex),isintent_nothide),
+ '_depend':''
+# },{ # Hidden
+# '_check':l_and(isscalar,l_not(iscomplex),isintent_hide)
+ },{ # Hidden
+ 'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
+ 'need':typedef_need_dict,
+ '_check':l_and(isscalar,l_not(iscomplex),isintent_hide),
+ '_depend':''
+ },{ # Common
+ 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
+ '_check':l_and(isscalar,l_not(iscomplex)),
+ '_depend':''
+ },
+# Complex scalars
+ { # Common
+ 'decl':'\t#ctype# #varname#;',
+ 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
+ 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
+ 'return':{isintent_out:',#varname#_capi'},
+ '_check':iscomplex
+ },{ # Not hidden
+ 'decl':'\tPyObject *#varname#_capi = Py_None;',
+ 'argformat':{isrequired:'O'},
+ 'keyformat':{isoptional:'O'},
+ 'args_capi':{isrequired:',&#varname#_capi'},
+ 'keys_capi':{isoptional:',&#varname#_capi'},
+ 'need':{isintent_inout:'try_pyarr_from_#ctype#'},
+ 'pyobjfrom':{isintent_inout:"""\
+\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
+\t\tif (f2py_success) {"""},
+ 'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
+ '_check':l_and(iscomplex,isintent_nothide)
+ },{
+ 'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
+ {l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'},
+# '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");'
+ '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
+ '\n\tif (f2py_success) {'],
+ 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/',
+ 'need':['#ctype#_from_pyobj'],
+ '_check':l_and(iscomplex,isintent_nothide),
+ '_depend':''
+ },{ # Hidden
+ 'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'},
+ '_check':l_and(iscomplex,isintent_hide)
+ },{
+ 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
+ '_check':l_and(iscomplex,isintent_hide),
+ '_depend':''
+ },{ # Common
+ 'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
+ 'need':['pyobj_from_#ctype#1'],
+ '_check':iscomplex
+ },{
+ 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
+ '_check':iscomplex,
+ '_depend':''
+ },
+# String
+ { # Common
+ 'decl':['\t#ctype# #varname# = NULL;',
+ '\tint slen(#varname#);',
+ '\tPyObject *#varname#_capi = Py_None;'],
+ 'callfortran':'#varname#,',
+ 'callfortranappend':'slen(#varname#),',
+ 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
+# 'freemem':'\tSTRINGFREE(#varname#);',
+ 'return':{isintent_out:',#varname#'},
+ 'need':['len..'],#'STRINGFREE'],
+ '_check':isstring
+ },{ # Common
+ 'frompyobj':"""\
+\tslen(#varname#) = #length#;
+\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\");
+\tif (f2py_success) {""",
+ 'cleanupfrompyobj':"""\
+\t\tSTRINGFREE(#varname#);
+\t} /*if (f2py_success) of #varname#*/""",
+ 'need':['#ctype#_from_pyobj','len..','STRINGFREE'],
+ '_check':isstring,
+ '_depend':''
+ },{ # Not hidden
+ 'argformat':{isrequired:'O'},
+ 'keyformat':{isoptional:'O'},
+ 'args_capi':{isrequired:',&#varname#_capi'},
+ 'keys_capi':{isoptional:',&#varname#_capi'},
+ 'pyobjfrom':{isintent_inout:'''\
+\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
+\tif (f2py_success) {'''},
+ 'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
+ 'need':{isintent_inout:'try_pyarr_from_#ctype#'},
+ '_check':l_and(isstring,isintent_nothide)
+ },{ # Hidden
+ '_check':l_and(isstring,isintent_hide)
+ },{
+ 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
+ '_check':isstring,
+ '_depend':''
+ },
+# Array
+ { # Common
+ 'decl':['\t#ctype# *#varname# = NULL;',
+ '\tintp #varname#_Dims[#rank#] = {#rank*[-1]#};',
+ '\tconst int #varname#_Rank = #rank#;',
+ '\tPyArrayObject *capi_#varname#_tmp = NULL;',
+ '\tint capi_#varname#_intent = 0;',
+ ],
+ 'callfortran':'#varname#,',
+ 'return':{isintent_out:',capi_#varname#_tmp'},
+ 'need':'len..',
+ '_check':isarray
+ },{ # intent(overwrite) array
+ 'decl':'\tint capi_overwrite_#varname# = 1;',
+ 'kwlistxa':'"overwrite_#varname#",',
+ 'xaformat':'i',
+ 'keys_xa':',&capi_overwrite_#varname#',
+ 'docsignxa':'overwrite_#varname#=1,',
+ 'docsignxashort':'overwrite_#varname#,',
+ 'docstropt':'\toverwrite_#varname# := 1 input int',
+ '_check':l_and(isarray,isintent_overwrite),
+ },{
+ 'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
+ '_check':l_and(isarray,isintent_overwrite),
+ '_depend':'',
+ },
+ { # intent(copy) array
+ 'decl':'\tint capi_overwrite_#varname# = 0;',
+ 'kwlistxa':'"overwrite_#varname#",',
+ 'xaformat':'i',
+ 'keys_xa':',&capi_overwrite_#varname#',
+ 'docsignxa':'overwrite_#varname#=0,',
+ 'docsignxashort':'overwrite_#varname#,',
+ 'docstropt':'\toverwrite_#varname# := 0 input int',
+ '_check':l_and(isarray,isintent_copy),
+ },{
+ 'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
+ '_check':l_and(isarray,isintent_copy),
+ '_depend':'',
+ },{
+ 'need':[{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
+ '_check':isarray,
+ '_depend':''
+ },{ # Not hidden
+ 'decl':'\tPyObject *#varname#_capi = Py_None;',
+ 'argformat':{isrequired:'O'},
+ 'keyformat':{isoptional:'O'},
+ 'args_capi':{isrequired:',&#varname#_capi'},
+ 'keys_capi':{isoptional:',&#varname#_capi'},
+# 'pyobjfrom':{isintent_inout:"""\
+# /* Partly because of the following hack, intent(inout) is depreciated,
+# Use intent(in,out) instead.
+
+# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\
+# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) {
+# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) {
+# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base)
+# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi);
+# \t\t} else
+# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi);
+# \t}
+# */
+# """},
+# 'need':{isintent_inout:'copy_ND_array'},
+ '_check':l_and(isarray,isintent_nothide)
+ },{
+ 'frompyobj':['\t#setdims#;',
+ '\tcapi_#varname#_intent |= #intent#;',
+ {isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
+ {isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
+ """\
+\tif (capi_#varname#_tmp == NULL) {
+\t\tif (!PyErr_Occurred())
+\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
+\t} else {
+\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data);
+""",
+{hasinitvalue:[
+ {isintent_nothide:'\tif (#varname#_capi == Py_None) {'},
+ {isintent_hide:'\t{'},
+ {iscomplexarray:'\t\t#ctype# capi_c;'},
+ """\
+\t\tint *_i,capi_i=0;
+\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
+\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) {
+\t\t\twhile ((_i = nextforcomb()))
+\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
+\t\t} else {
+\t\t\tif (!PyErr_Occurred())
+\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
+\t\t\tf2py_success = 0;
+\t\t}
+\t}
+\tif (f2py_success) {"""]},
+ ],
+ 'cleanupfrompyobj':[ # note that this list will be reversed
+ '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
+ {l_not(l_or(isintent_out,isintent_hide)):"""\
+\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
+\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
+ {l_and(isintent_hide,l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""},
+ {hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'},
+ ],
+ '_check':isarray,
+ '_depend':''
+ },
+# { # Hidden
+# 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_#varname#_tmp);'},
+# '_check':l_and(isarray,isintent_hide)
+# },
+# Scalararray
+ { # Common
+ '_check':l_and(isarray,l_not(iscomplexarray))
+ },{ # Not hidden
+ '_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
+ },
+# Integer*1 array
+ {'need':'#ctype#',
+ '_check':isint1array,
+ '_depend':''
+ },
+# Integer*-1 array
+ {'need':'#ctype#',
+ '_check':isunsigned_chararray,
+ '_depend':''
+ },
+# Integer*-2 array
+ {'need':'#ctype#',
+ '_check':isunsigned_shortarray,
+ '_depend':''
+ },
+# Integer*-8 array
+ {'need':'#ctype#',
+ '_check':isunsigned_long_longarray,
+ '_depend':''
+ },
+# Complexarray
+ {'need':'#ctype#',
+ '_check':iscomplexarray,
+ '_depend':''
+ },
+# Stringarray
+ {
+ 'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
+ 'need':'string',
+ '_check':isstringarray
+ }
+ ]
+
+################# Rules for checking ###############
+
+check_rules=[
+ {
+ 'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
+ 'need':'len..'
+ },{
+ 'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
+ 'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/',
+ 'need':'CHECKSCALAR',
+ '_check':l_and(isscalar,l_not(iscomplex)),
+ '_break':''
+ },{
+ 'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
+ 'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/',
+ 'need':'CHECKSTRING',
+ '_check':isstring,
+ '_break':''
+ },{
+ 'need':'CHECKARRAY',
+ 'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
+ 'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/',
+ '_check':isarray,
+ '_break':''
+ },{
+ 'need':'CHECKGENERIC',
+ 'frompyobj':'\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
+ 'cleanupfrompyobj':'\t} /*CHECKGENERIC(#check#)*/',
+ }
+]
+
+########## Applying the rules. No need to modify what follows #############
+
+#################### Build C/API module #######################
+
+def buildmodule(m,um):
+ """
+ Return
+ """
+ global f2py_version,options
+ outmess('\tBuilding module "%s"...\n'%(m['name']))
+ ret = {}
+ mod_rules=defmod_rules[:]
+ vrd=modsign2map(m)
+ rd=dictappend({'f2py_version':f2py_version},vrd)
+ funcwrappers = []
+ funcwrappers2 = [] # F90 codes
+ for n in m['interfaced']:
+ nb=None
+ for bi in m['body']:
+ if not bi['block']=='interface':
+ errmess('buildmodule: Expected interface block. Skipping.\n')
+ continue
+ for b in bi['body']:
+ if b['name']==n: nb=b;break
+
+ if not nb:
+ errmess('buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n'%(n))
+ continue
+ nb_list = [nb]
+ if nb.has_key('entry'):
+ for k,a in nb['entry'].items():
+ nb1 = copy.deepcopy(nb)
+ del nb1['entry']
+ nb1['name'] = k
+ nb1['args'] = a
+ nb_list.append(nb1)
+ for nb in nb_list:
+ api,wrap=buildapi(nb)
+ if wrap:
+ if ismoduleroutine(nb):
+ funcwrappers2.append(wrap)
+ else:
+ funcwrappers.append(wrap)
+ ar=applyrules(api,vrd)
+ rd=dictappend(rd,ar)
+
+ # Construct COMMON block support
+ cr,wrap = common_rules.buildhooks(m)
+ if wrap:
+ funcwrappers.append(wrap)
+ ar=applyrules(cr,vrd)
+ rd=dictappend(rd,ar)
+
+ # Construct F90 module support
+ mr,wrap = f90mod_rules.buildhooks(m)
+ if wrap:
+ funcwrappers2.append(wrap)
+ ar=applyrules(mr,vrd)
+ rd=dictappend(rd,ar)
+
+ for u in um:
+ ar=use_rules.buildusevars(u,m['use'][u['name']])
+ rd=dictappend(rd,ar)
+
+ needs=cfuncs.get_needs()
+ code={}
+ for n in needs.keys():
+ code[n]=[]
+ for k in needs[n]:
+ c=''
+ if cfuncs.includes0.has_key(k): c=cfuncs.includes0[k]
+ elif cfuncs.includes.has_key(k): c=cfuncs.includes[k]
+ elif cfuncs.userincludes.has_key(k): c=cfuncs.userincludes[k]
+ elif cfuncs.typedefs.has_key(k): c=cfuncs.typedefs[k]
+ elif cfuncs.typedefs_generated.has_key(k):
+ c=cfuncs.typedefs_generated[k]
+ elif cfuncs.cppmacros.has_key(k): c=cfuncs.cppmacros[k]
+ elif cfuncs.cfuncs.has_key(k): c=cfuncs.cfuncs[k]
+ elif cfuncs.callbacks.has_key(k): c=cfuncs.callbacks[k]
+ elif cfuncs.f90modhooks.has_key(k): c=cfuncs.f90modhooks[k]
+ elif cfuncs.commonhooks.has_key(k): c=cfuncs.commonhooks[k]
+ else: errmess('buildmodule: unknown need %s.\n'%(`k`));continue
+ code[n].append(c)
+ mod_rules.append(code)
+ for r in mod_rules:
+ if (r.has_key('_check') and r['_check'](m)) or (not r.has_key('_check')):
+ ar=applyrules(r,vrd,m)
+ rd=dictappend(rd,ar)
+ ar=applyrules(module_rules,rd)
+
+ fn = os.path.join(options['buildpath'],vrd['modulename']+'module.c')
+ ret['csrc'] = fn
+ f=open(fn,'w')
+ f.write(string.replace(ar['modulebody'],'\t',2*' '))
+ f.close()
+ outmess('\tWrote C/API module "%s" to file "%s/%smodule.c"\n'%(m['name'],options['buildpath'],vrd['modulename']))
+
+ if options['dorestdoc']:
+ fn = os.path.join(options['buildpath'],vrd['modulename']+'module.rest')
+ f=open(fn,'w')
+ f.write('.. -*- rest -*-\n')
+ f.write(string.join(ar['restdoc'],'\n'))
+ f.close()
+ outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'],vrd['modulename']))
+ if options['dolatexdoc']:
+ fn = os.path.join(options['buildpath'],vrd['modulename']+'module.tex')
+ ret['ltx'] = fn
+ f=open(fn,'w')
+ f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version))
+ if not options.has_key('shortlatex'):
+ f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
+ f.write(string.join(ar['latexdoc'],'\n'))
+ if not options.has_key('shortlatex'):
+ f.write('\\end{document}')
+ f.close()
+ outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'],vrd['modulename']))
+ if funcwrappers:
+ wn = os.path.join(options['buildpath'],'%s-f2pywrappers.f'%(vrd['modulename']))
+ ret['fsrc'] = wn
+ f=open(wn,'w')
+ f.write('C -*- fortran -*-\n')
+ f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
+ f.write('C It contains Fortran 77 wrappers to fortran functions.\n')
+ lines = []
+ for l in string.split(string.join(funcwrappers,'\n\n')+'\n','\n'):
+ if l and l[0]==' ':
+ while len(l)>=66:
+ lines.append(l[:66]+'\n &')
+ l = l[66:]
+ lines.append(l+'\n')
+ else: lines.append(l+'\n')
+ lines = string.join(lines,'').replace('\n &\n','\n')
+ f.write(lines)
+ f.close()
+ outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn))
+ if funcwrappers2:
+ wn = os.path.join(options['buildpath'],'%s-f2pywrappers2.f90'%(vrd['modulename']))
+ ret['fsrc'] = wn
+ f=open(wn,'w')
+ f.write('! -*- f90 -*-\n')
+ f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
+ f.write('! It contains Fortran 90 wrappers to fortran functions.\n')
+ lines = []
+ for l in string.split(string.join(funcwrappers2,'\n\n')+'\n','\n'):
+ if len(l)>72 and l[0]==' ':
+ lines.append(l[:72]+'&\n &')
+ l = l[72:]
+ while len(l)>66:
+ lines.append(l[:66]+'&\n &')
+ l = l[66:]
+ lines.append(l+'\n')
+ else: lines.append(l+'\n')
+ lines = string.join(lines,'').replace('\n &\n','\n')
+ f.write(lines)
+ f.close()
+ outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn))
+ return ret
+
+################## Build C/API function #############
+
+stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'}
+def buildapi(rout):
+ rout,wrap = func2subr.assubr(rout)
+ args,depargs=getargs2(rout)
+ capi_maps.depargs=depargs
+ var=rout['vars']
+ auxvars = [a for a in var.keys() if isintent_aux(var[a])]
+
+ if ismoduleroutine(rout):
+ outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'],rout['name']))
+ else:
+ outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name']))
+ # Routine
+ vrd=routsign2map(rout)
+ rd=dictappend({},vrd)
+ for r in rout_rules:
+ if (r.has_key('_check') and r['_check'](rout)) or (not r.has_key('_check')):
+ ar=applyrules(r,vrd,rout)
+ rd=dictappend(rd,ar)
+
+ # Args
+ nth,nthk=0,0
+ savevrd={}
+ for a in args:
+ vrd=sign2map(a,var[a])
+ if isintent_aux(var[a]):
+ _rules = aux_rules
+ else:
+ _rules = arg_rules
+ if not isintent_hide(var[a]):
+ if not isoptional(var[a]):
+ nth=nth+1
+ vrd['nth']=`nth`+stnd[nth%10]+' argument'
+ else:
+ nthk=nthk+1
+ vrd['nth']=`nthk`+stnd[nthk%10]+' keyword'
+ else: vrd['nth']='hidden'
+ savevrd[a]=vrd
+ for r in _rules:
+ if r.has_key('_depend'): continue
+ if (r.has_key('_check') and r['_check'](var[a])) or (not r.has_key('_check')):
+ ar=applyrules(r,vrd,var[a])
+ rd=dictappend(rd,ar)
+ if r.has_key('_break'): break
+ for a in depargs:
+ if isintent_aux(var[a]):
+ _rules = aux_rules
+ else:
+ _rules = arg_rules
+ vrd=savevrd[a]
+ for r in _rules:
+ if not r.has_key('_depend'): continue
+ if (r.has_key('_check') and r['_check'](var[a])) or (not r.has_key('_check')):
+ ar=applyrules(r,vrd,var[a])
+ rd=dictappend(rd,ar)
+ if r.has_key('_break'): break
+ if var[a].has_key('check'):
+ for c in var[a]['check']:
+ vrd['check']=c
+ ar=applyrules(check_rules,vrd,var[a])
+ rd=dictappend(rd,ar)
+ if type(rd['cleanupfrompyobj']) is types.ListType:
+ rd['cleanupfrompyobj'].reverse()
+ if type(rd['closepyobjfrom']) is types.ListType:
+ rd['closepyobjfrom'].reverse()
+ rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#',
+ {'docsign':rd['docsign'],
+ 'docsignopt':rd['docsignopt'],
+ 'docsignxa':rd['docsignxa']}))
+ optargs=stripcomma(replace('#docsignopt##docsignxa#',
+ {'docsignxa':rd['docsignxashort'],
+ 'docsignopt':rd['docsignoptshort']}
+ ))
+ if optargs=='':
+ rd['docsignatureshort']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']}))
+ else:
+ rd['docsignatureshort']=replace('#docsign#[#docsignopt#]',
+ {'docsign':rd['docsign'],
+ 'docsignopt':optargs,
+ })
+ rd['latexdocsignatureshort']=string.replace(rd['docsignatureshort'],'_','\\_')
+ rd['latexdocsignatureshort']=string.replace(rd['latexdocsignatureshort'],',',', ')
+ cfs=stripcomma(replace('#callfortran##callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
+ if len(rd['callfortranappend'])>1:
+ rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
+ else:
+ rd['callcompaqfortran']=cfs
+ rd['callfortran']=cfs
+ if type(rd['docreturn'])==types.ListType:
+ rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))+' = '
+ rd['docstrsigns']=[]
+ rd['latexdocstrsigns']=[]
+ for k in ['docstrreq','docstropt','docstrout','docstrcbs']:
+ if rd.has_key(k) and type(rd[k])==types.ListType:
+ rd['docstrsigns']=rd['docstrsigns']+rd[k]
+ k='latex'+k
+ if rd.has_key(k) and type(rd[k])==types.ListType:
+ rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
+ ['\\begin{description}']+rd[k][1:]+\
+ ['\\end{description}']
+ ar=applyrules(routine_rules,rd)
+ if ismoduleroutine(rout):
+ outmess('\t\t\t %s\n'%(ar['docshort']))
+ else:
+ outmess('\t\t %s\n'%(ar['docshort']))
+ return ar,wrap
+
+
+#################### EOF rules.py #######################
diff --git a/numpy/f2py/setup.cfg b/numpy/f2py/setup.cfg
new file mode 100644
index 000000000..14669544c
--- /dev/null
+++ b/numpy/f2py/setup.cfg
@@ -0,0 +1,3 @@
+[bdist_rpm]
+doc_files = docs/
+ tests/ \ No newline at end of file
diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py
new file mode 100755
index 000000000..2616ccbd5
--- /dev/null
+++ b/numpy/f2py/setup.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+"""
+setup.py for installing F2PY
+
+Usage:
+ python setup.py install
+
+Copyright 2001-2005 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@cens.ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Revision: 1.32 $
+$Date: 2005/01/30 17:22:14 $
+Pearu Peterson
+"""
+
+__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $"
+
+import os
+import sys
+from distutils.dep_util import newer
+from scipy.distutils.core import setup
+from scipy.distutils.misc_util import Configuration
+
+from __version__ import version
+
+def configuration(parent_package='',top_path=None):
+ config = Configuration('f2py', parent_package, top_path)
+
+ config.add_data_dir('docs')
+
+ config.add_data_files('src/fortranobject.c',
+ 'src/fortranobject.h',
+ 'f2py.1'
+ )
+
+ config.make_svn_version_py()
+
+ def generate_f2py_py(build_dir):
+ f2py_exe = 'f2py'+os.path.basename(sys.executable)[6:]
+ if f2py_exe[-4:]=='.exe':
+ f2py_exe = f2py_exe[:-4] + '.py'
+ if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py':
+ f2py_exe = f2py_exe + '.py'
+ target = os.path.join(build_dir,f2py_exe)
+ if newer(__file__,target):
+ print 'Creating',target
+ f = open(target,'w')
+ f.write('''\
+#!/usr/bin/env %s
+# See http://cens.ioc.ee/projects/f2py2e/
+import os
+os.environ["NO_SCIPY_IMPORT"]="f2py"
+import scipy.f2py as f2py
+f2py.main()
+'''%(os.path.basename(sys.executable)))
+ f.close()
+ return target
+
+ config.add_scripts(generate_f2py_py)
+
+ print 'F2PY Version',config.get_version()
+
+ return config
+
+if __name__ == "__main__":
+
+ config = configuration(top_path='')
+ version = config.get_version()
+ print 'F2PY Version',version
+ config = config.todict()
+
+ if sys.version[:3]>='2.3':
+ config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\
+ "/F2PY-2-latest.tar.gz"
+ config['classifiers'] = [
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Science/Research',
+ 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
+ 'Natural Language :: English',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: C',
+ 'Programming Language :: Fortran',
+ 'Programming Language :: Python',
+ 'Topic :: Scientific/Engineering',
+ 'Topic :: Software Development :: Code Generators',
+ ]
+ setup(version=version,
+ description = "F2PY - Fortran to Python Interface Generaton",
+ author = "Pearu Peterson",
+ author_email = "pearu@cens.ioc.ee",
+ maintainer = "Pearu Peterson",
+ maintainer_email = "pearu@cens.ioc.ee",
+ license = "LGPL",
+ platforms = "Unix, Windows (mingw|cygwin), Mac OSX",
+ long_description = """\
+The Fortran to Python Interface Generator, or F2PY for short, is a
+command line tool (f2py) for generating Python C/API modules for
+wrapping Fortran 77/90/95 subroutines, accessing common blocks from
+Python, and calling Python functions from Fortran (call-backs).
+Interfacing subroutines/data from Fortran 90/95 modules is supported.""",
+ url = "http://cens.ioc.ee/projects/f2py2e/",
+ keywords = ['Fortran','f2py'],
+ **config)
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
new file mode 100644
index 000000000..d5da43a88
--- /dev/null
+++ b/numpy/f2py/src/fortranobject.c
@@ -0,0 +1,756 @@
+#define FORTRANOBJECT_C
+#include "fortranobject.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ This file implements: FortranObject, array_from_pyobj, copy_ND_array
+
+ Author: Pearu Peterson <pearu@cens.ioc.ee>
+ $Revision: 1.52 $
+ $Date: 2005/07/11 07:44:20 $
+*/
+
+/************************* FortranObject *******************************/
+
+typedef PyObject *(*fortranfunc)(PyObject *,PyObject *,PyObject *,void *);
+
+PyObject *
+PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) {
+ int i;
+ PyFortranObject *fp = NULL;
+ PyObject *v = NULL;
+ if (init!=NULL) /* Initialize F90 module objects */
+ (*(init))();
+ if ((fp = PyObject_New(PyFortranObject, &PyFortran_Type))==NULL) return NULL;
+ if ((fp->dict = PyDict_New())==NULL) return NULL;
+ fp->len = 0;
+ while (defs[fp->len].name != NULL) fp->len++;
+ if (fp->len == 0) goto fail;
+ fp->defs = defs;
+ for (i=0;i<fp->len;i++)
+ if (fp->defs[i].rank == -1) { /* Is Fortran routine */
+ v = PyFortranObject_NewAsAttr(&(fp->defs[i]));
+ if (v==NULL) return NULL;
+ PyDict_SetItemString(fp->dict,fp->defs[i].name,v);
+ } else
+ if ((fp->defs[i].data)!=NULL) { /* Is Fortran variable or array (not allocatable) */
+ v = PyArray_New(&PyArray_Type, fp->defs[i].rank, fp->defs[i].dims.d,
+ fp->defs[i].type, NULL, fp->defs[i].data, 0, FARRAY_FLAGS,
+ NULL);
+ if (v==NULL) return NULL;
+ PyDict_SetItemString(fp->dict,fp->defs[i].name,v);
+ }
+ Py_XDECREF(v);
+ return (PyObject *)fp;
+ fail:
+ Py_XDECREF(v);
+ return NULL;
+}
+
+PyObject *
+PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module routines */
+ PyFortranObject *fp = NULL;
+ fp = PyObject_New(PyFortranObject, &PyFortran_Type);
+ if (fp == NULL) return NULL;
+ if ((fp->dict = PyDict_New())==NULL) return NULL;
+ fp->len = 1;
+ fp->defs = defs;
+ return (PyObject *)fp;
+}
+
+/* Fortran methods */
+
+static void
+fortran_dealloc(PyFortranObject *fp) {
+ Py_XDECREF(fp->dict);
+ PyMem_Del(fp);
+}
+
+
+static PyMethodDef fortran_methods[] = {
+ {NULL, NULL} /* sentinel */
+};
+
+
+static PyObject *
+fortran_doc (FortranDataDef def) {
+ char *p;
+ PyObject *s = NULL;
+ int i;
+ unsigned size=100;
+ if (def.doc!=NULL)
+ size += strlen(def.doc);
+ p = (char*)malloc (size);
+ if (sprintf(p,"%s - ",def.name)==0) goto fail;
+ if (def.rank==-1) {
+ if (def.doc==NULL) {
+ if (sprintf(p,"%sno docs available",p)==0)
+ goto fail;
+ } else {
+ if (sprintf(p,"%s%s",p,def.doc)==0)
+ goto fail;
+ }
+ } else {
+ PyArray_Descr *d = PyArray_DescrFromType(def.type);
+ if (sprintf(p,"%s'%c'-",p,d->type)==0) goto fail;
+ if (def.data==NULL) {
+ if (sprintf(p,"%sarray(%" INTP_FMT,p,def.dims.d[0])==0) goto fail;
+ for(i=1;i<def.rank;++i)
+ if (sprintf(p,"%s,%" INTP_FMT,p,def.dims.d[i])==0) goto fail;
+ if (sprintf(p,"%s), not allocated",p)==0) goto fail;
+ } else {
+ if (def.rank>0) {
+ if (sprintf(p,"%sarray(%"INTP_FMT,p,def.dims.d[0])==0) goto fail;
+ for(i=1;i<def.rank;i++)
+ if (sprintf(p,"%s,%" INTP_FMT,p,def.dims.d[i])==0) goto fail;
+ if (sprintf(p,"%s)",p)==0) goto fail;
+ } else {
+ if (sprintf(p,"%sscalar",p)==0) goto fail;
+ }
+ }
+ }
+ if (sprintf(p,"%s\n",p)==0) goto fail;
+ if (strlen(p)>size) {
+ fprintf(stderr,"fortranobject.c:fortran_doc:len(p)=%zd>%d(size): too long doc string required, increase size\n",strlen(p),size);
+ goto fail;
+ }
+ s = PyString_FromString(p);
+ fail:
+ free(p);
+ return s;
+}
+
+static FortranDataDef *save_def; /* save pointer of an allocatable array */
+static void set_data(char *d,intp *f) { /* callback from Fortran */
+ if (*f) /* In fortran f=allocated(d) */
+ save_def->data = d;
+ else
+ save_def->data = NULL;
+ /* printf("set_data: d=%p,f=%d\n",d,*f); */
+}
+
+static PyObject *
+fortran_getattr(PyFortranObject *fp, char *name) {
+ int i,j,k,flag;
+ if (fp->dict != NULL) {
+ PyObject *v = PyDict_GetItemString(fp->dict, name);
+ if (v != NULL) {
+ Py_INCREF(v);
+ return v;
+ }
+ }
+ for (i=0,j=1;i<fp->len && (j=strcmp(name,fp->defs[i].name));i++);
+ if (j==0)
+ if (fp->defs[i].rank!=-1) { /* F90 allocatable array */
+ if (fp->defs[i].func==NULL) return NULL;
+ for(k=0;k<fp->defs[i].rank;++k)
+ fp->defs[i].dims.d[k]=-1;
+ save_def = &fp->defs[i];
+ (*(fp->defs[i].func))(&fp->defs[i].rank,fp->defs[i].dims.d,set_data,&flag);
+ if (flag==2)
+ k = fp->defs[i].rank + 1;
+ else
+ k = fp->defs[i].rank;
+ if (fp->defs[i].data !=NULL) { /* array is allocated */
+ PyObject *v = PyArray_New(&PyArray_Type, k, fp->defs[i].dims.d,
+ fp->defs[i].type, NULL, fp->defs[i].data, 0, FARRAY_FLAGS,
+ NULL);
+ if (v==NULL) return NULL;
+ /* Py_INCREF(v); */
+ return v;
+ } else { /* array is not allocated */
+ Py_INCREF(Py_None);
+ return Py_None;
+ }
+ }
+ if (strcmp(name,"__dict__")==0) {
+ Py_INCREF(fp->dict);
+ return fp->dict;
+ }
+ if (strcmp(name,"__doc__")==0) {
+ PyObject *s = PyString_FromString("");
+ for (i=0;i<fp->len;i++)
+ PyString_ConcatAndDel(&s,fortran_doc(fp->defs[i]));
+ if (PyDict_SetItemString(fp->dict, name, s))
+ return NULL;
+ return s;
+ }
+ if ((strcmp(name,"_cpointer")==0) && (fp->len==1)) {
+ PyObject *cobj = PyCObject_FromVoidPtr((void *)(fp->defs[0].data),NULL);
+ if (PyDict_SetItemString(fp->dict, name, cobj))
+ return NULL;
+ return cobj;
+ }
+ return Py_FindMethod(fortran_methods, (PyObject *)fp, name);
+}
+
+static int
+fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) {
+ int i,j,flag;
+ PyArrayObject *arr = NULL;
+ for (i=0,j=1;i<fp->len && (j=strcmp(name,fp->defs[i].name));i++);
+ if (j==0) {
+ if (fp->defs[i].rank==-1) {
+ PyErr_SetString(PyExc_AttributeError,"over-writing fortran routine");
+ return -1;
+ }
+ if (fp->defs[i].func!=NULL) { /* is allocatable array */
+ intp dims[F2PY_MAX_DIMS];
+ int k;
+ save_def = &fp->defs[i];
+ if (v!=Py_None) { /* set new value (reallocate if needed --
+ see f2py generated code for more
+ details ) */
+ for(k=0;k<fp->defs[i].rank;k++) dims[k]=-1;
+ if ((arr = array_from_pyobj(fp->defs[i].type,dims,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL)
+ return -1;
+ (*(fp->defs[i].func))(&fp->defs[i].rank,arr->dimensions,set_data,&flag);
+ } else { /* deallocate */
+ for(k=0;k<fp->defs[i].rank;k++) dims[k]=0;
+ (*(fp->defs[i].func))(&fp->defs[i].rank,dims,set_data,&flag);
+ for(k=0;k<fp->defs[i].rank;k++) dims[k]=-1;
+ }
+ memcpy(fp->defs[i].dims.d,dims,fp->defs[i].rank*sizeof(intp));
+ } else { /* not allocatable array */
+ if ((arr = array_from_pyobj(fp->defs[i].type,fp->defs[i].dims.d,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL)
+ return -1;
+ }
+ if (fp->defs[i].data!=NULL) { /* copy Python object to Fortran array */
+ intp s = PyArray_MultiplyList(fp->defs[i].dims.d,arr->nd);
+ if (s==-1)
+ s = PyArray_MultiplyList(arr->dimensions,arr->nd);
+ if (s<0 ||
+ (memcpy(fp->defs[i].data,arr->data,s*PyArray_ITEMSIZE(arr)))==NULL) {
+ if ((PyObject*)arr!=v) {
+ Py_DECREF(arr);
+ }
+ return -1;
+ }
+ if ((PyObject*)arr!=v) {
+ Py_DECREF(arr);
+ }
+ } else return (fp->defs[i].func==NULL?-1:0);
+ return 0; /* succesful */
+ }
+ if (fp->dict == NULL) {
+ fp->dict = PyDict_New();
+ if (fp->dict == NULL)
+ return -1;
+ }
+ if (v == NULL) {
+ int rv = PyDict_DelItemString(fp->dict, name);
+ if (rv < 0)
+ PyErr_SetString(PyExc_AttributeError,"delete non-existing fortran attribute");
+ return rv;
+ }
+ else
+ return PyDict_SetItemString(fp->dict, name, v);
+}
+
+static PyObject*
+fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) {
+ int i = 0;
+ /* printf("fortran call
+ name=%s,func=%p,data=%p,%p\n",fp->defs[i].name,
+ fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */
+ if (fp->defs[i].rank==-1) {/* is Fortran routine */
+ if ((fp->defs[i].func==NULL)) {
+ PyErr_Format(PyExc_RuntimeError, "no function to call");
+ return NULL;
+ }
+ else if (fp->defs[i].data==NULL)
+ /* dummy routine */
+ return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw,NULL);
+ else
+ return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw,
+ (void *)fp->defs[i].data);
+ }
+ PyErr_Format(PyExc_TypeError, "this fortran object is not callable");
+ return NULL;
+}
+
+
+PyTypeObject PyFortran_Type = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "fortran", /*tp_name*/
+ sizeof(PyFortranObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)fortran_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ (getattrfunc)fortran_getattr, /*tp_getattr*/
+ (setattrfunc)fortran_setattr, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ (ternaryfunc)fortran_call, /*tp_call*/
+};
+
+/************************* f2py_report_atexit *******************************/
+
+#ifdef F2PY_REPORT_ATEXIT
+static int passed_time = 0;
+static int passed_counter = 0;
+static int passed_call_time = 0;
+static struct timeb start_time;
+static struct timeb stop_time;
+static struct timeb start_call_time;
+static struct timeb stop_call_time;
+static int cb_passed_time = 0;
+static int cb_passed_counter = 0;
+static int cb_passed_call_time = 0;
+static struct timeb cb_start_time;
+static struct timeb cb_stop_time;
+static struct timeb cb_start_call_time;
+static struct timeb cb_stop_call_time;
+
+extern void f2py_start_clock(void) { ftime(&start_time); }
+extern
+void f2py_start_call_clock(void) {
+ f2py_stop_clock();
+ ftime(&start_call_time);
+}
+extern
+void f2py_stop_clock(void) {
+ ftime(&stop_time);
+ passed_time += 1000*(stop_time.time - start_time.time);
+ passed_time += stop_time.millitm - start_time.millitm;
+}
+extern
+void f2py_stop_call_clock(void) {
+ ftime(&stop_call_time);
+ passed_call_time += 1000*(stop_call_time.time - start_call_time.time);
+ passed_call_time += stop_call_time.millitm - start_call_time.millitm;
+ passed_counter += 1;
+ f2py_start_clock();
+}
+
+extern void f2py_cb_start_clock(void) { ftime(&cb_start_time); }
+extern
+void f2py_cb_start_call_clock(void) {
+ f2py_cb_stop_clock();
+ ftime(&cb_start_call_time);
+}
+extern
+void f2py_cb_stop_clock(void) {
+ ftime(&cb_stop_time);
+ cb_passed_time += 1000*(cb_stop_time.time - cb_start_time.time);
+ cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm;
+}
+extern
+void f2py_cb_stop_call_clock(void) {
+ ftime(&cb_stop_call_time);
+ cb_passed_call_time += 1000*(cb_stop_call_time.time - cb_start_call_time.time);
+ cb_passed_call_time += cb_stop_call_time.millitm - cb_start_call_time.millitm;
+ cb_passed_counter += 1;
+ f2py_cb_start_clock();
+}
+
+static int f2py_report_on_exit_been_here = 0;
+extern
+void f2py_report_on_exit(int exit_flag,void *name) {
+ if (f2py_report_on_exit_been_here) {
+ fprintf(stderr," %s\n",(char*)name);
+ return;
+ }
+ f2py_report_on_exit_been_here = 1;
+ fprintf(stderr," /-----------------------\\\n");
+ fprintf(stderr," < F2PY performance report >\n");
+ fprintf(stderr," \\-----------------------/\n");
+ fprintf(stderr,"Overall time spent in ...\n");
+ fprintf(stderr,"(a) wrapped (Fortran/C) functions : %8d msec\n",
+ passed_call_time);
+ fprintf(stderr,"(b) f2py interface, %6d calls : %8d msec\n",
+ passed_counter,passed_time);
+ fprintf(stderr,"(c) call-back (Python) functions : %8d msec\n",
+ cb_passed_call_time);
+ fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n",
+ cb_passed_counter,cb_passed_time);
+
+ fprintf(stderr,"(e) wrapped (Fortran/C) functions (acctual) : %8d msec\n\n",
+ passed_call_time-cb_passed_call_time-cb_passed_time);
+ fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n");
+ fprintf(stderr,"Exit status: %d\n",exit_flag);
+ fprintf(stderr,"Modules : %s\n",(char*)name);
+}
+#endif
+
+/********************** report on array copy ****************************/
+
+#ifdef F2PY_REPORT_ON_ARRAY_COPY
+static void f2py_report_on_array_copy(PyArrayObject* arr) {
+ const long arr_size = PyArray_Size((PyObject *)arr);
+ if (arr_size>F2PY_REPORT_ON_ARRAY_COPY) {
+ fprintf(stderr,"copied an array: size=%ld, elsize=%d\n",
+ arr_size, PyArray_ITEMSIZE(arr));
+ }
+}
+static void f2py_report_on_array_copy_fromany(void) {
+ fprintf(stderr,"created an array from object\n");
+}
+
+#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR f2py_report_on_array_copy((PyArrayObject *)arr)
+#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany()
+#else
+#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR
+#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY
+#endif
+
+
+/************************* array_from_obj *******************************/
+
+/*
+ * File: array_from_pyobj.c
+ *
+ * Description:
+ * ------------
+ * Provides array_from_pyobj function that returns a contigious array
+ * object with the given dimensions and required storage order, either
+ * in row-major (C) or column-major (Fortran) order. The function
+ * array_from_pyobj is very flexible about its Python object argument
+ * that can be any number, list, tuple, or array.
+ *
+ * array_from_pyobj is used in f2py generated Python extension
+ * modules.
+ *
+ * Author: Pearu Peterson <pearu@cens.ioc.ee>
+ * Created: 13-16 January 2002
+ * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $
+ */
+
+static int
+count_nonpos(const int rank,
+ const intp *dims) {
+ int i=0,r=0;
+ while (i<rank) {
+ if (dims[i] <= 0) ++r;
+ ++i;
+ }
+ return r;
+}
+
+static int check_and_fix_dimensions(const PyArrayObject* arr,
+ const int rank,
+ intp *dims);
+
+#ifdef DEBUG_COPY_ND_ARRAY
+void dump_attrs(const PyArrayObject* arr) {
+ int rank = arr->nd;
+ intp size = PyArray_Size((PyObject *)arr);
+ int i;
+ printf("\trank = %d, flags = %d, size = %" INTP_FMT "\n",
+ rank,arr->flags,size);
+ printf("\tstrides = [");
+ for(i=0;i<rank;++i) {
+ printf("%3" INTP_FMT,arr->strides[i]);
+ }
+ printf("]\n\t dimensions = [");
+ for(i=0;i<rank;++i) {
+ printf("%3" INTP_FMT, arr->dimensions[i]);
+ }
+ printf("]\n");
+}
+#endif
+
+#define SWAPTYPE(a,b,t) {t c; c = (a); (a) = (b); (b) = c; }
+
+static int swap_arrays(PyArrayObject* arr1, PyArrayObject* arr2) {
+ SWAPTYPE(arr1->data,arr2->data,char*);
+ SWAPTYPE(arr1->nd,arr2->nd,int);
+ SWAPTYPE(arr1->dimensions,arr2->dimensions,intp*);
+ SWAPTYPE(arr1->strides,arr2->strides,intp*);
+ SWAPTYPE(arr1->base,arr2->base,PyObject*);
+ SWAPTYPE(arr1->descr,arr2->descr,PyArray_Descr*);
+ SWAPTYPE(arr1->flags,arr2->flags,int);
+ /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */
+ return 0;
+}
+
+#define ARRAY_ISCOMPATIBLE(arr,type_num) \
+( (PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) \
+ ||(PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) \
+ ||(PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) \
+)
+
+extern
+PyArrayObject* array_from_pyobj(const int type_num,
+ intp *dims,
+ const int rank,
+ const int intent,
+ PyObject *obj) {
+ /* Note about reference counting
+ -----------------------------
+ If the caller returns the array to Python, it must be done with
+ Py_BuildValue("N",arr).
+ Otherwise, if obj!=arr then the caller must call Py_DECREF(arr).
+
+ Note on intent(cache,out,..)
+ ---------------------
+ Don't expect correct data when returning intent(cache) array.
+
+ */
+ char mess[200];
+ PyArrayObject *arr = NULL;
+ PyArray_Descr *descr = PyArray_DescrFromType(type_num);
+
+ if ((intent & F2PY_INTENT_HIDE)
+ || ((intent & F2PY_INTENT_CACHE) && (obj==Py_None))
+ || ((intent & F2PY_OPTIONAL) && (obj==Py_None))
+ ) {
+ /* intent(cache), optional, intent(hide) */
+ if (count_nonpos(rank,dims)) {
+ int i;
+ sprintf(mess,"failed to create intent(cache|hide)|optional array"
+ "-- must have defined dimensions but got (");
+ for(i=0;i<rank;++i)
+ sprintf(mess+strlen(mess),"%" INTP_FMT ",",dims[i]);
+ sprintf(mess+strlen(mess),")");
+ PyErr_SetString(PyExc_ValueError,mess);
+ return NULL;
+ }
+ arr = (PyArrayObject *)
+ PyArray_New(&PyArray_Type, rank, dims, type_num,
+ NULL,NULL,0,
+ !(intent&F2PY_INTENT_C),
+ NULL);
+ if (!(intent & F2PY_INTENT_CACHE))
+ PyArray_FILLWBYTE(arr, 0);
+ return arr;
+ }
+
+ if (PyArray_Check(obj)) {
+ arr = (PyArrayObject *)obj;
+
+ if (intent & F2PY_INTENT_CACHE) {
+ /* intent(cache) */
+ if (PyArray_ISONESEGMENT(obj)
+ && PyArray_ITEMSIZE((PyArrayObject *)obj)>=descr->elsize) {
+ if (check_and_fix_dimensions((PyArrayObject *)obj,rank,dims))
+ return NULL; /*XXX: set exception */
+ if (intent & F2PY_INTENT_OUT)
+ Py_INCREF(obj);
+ return (PyArrayObject *)obj;
+ }
+ sprintf(mess,"failed to initialize intent(cache) array");
+ if (!PyArray_ISONESEGMENT(obj))
+ sprintf(mess+strlen(mess)," -- input must be in one segment");
+ if (PyArray_ITEMSIZE(arr)<descr->elsize)
+ sprintf(mess+strlen(mess)," -- expected at least elsize=%d but got %d",
+ descr->elsize,PyArray_ITEMSIZE(arr)
+ );
+ PyErr_SetString(PyExc_ValueError,mess);
+ return NULL;
+ }
+
+ /* here we have always intent(in) or intent(inout) or intent(inplace) */
+
+ if (check_and_fix_dimensions(arr,rank,dims))
+ return NULL; /*XXX: set exception */
+
+ if ((! (intent & F2PY_INTENT_COPY))
+ && PyArray_ITEMSIZE(arr)==descr->elsize
+ && ARRAY_ISCOMPATIBLE(arr,type_num)
+ ) {
+ if ((intent & F2PY_INTENT_C)?PyArray_ISCARRAY(arr):PyArray_ISFARRAY(arr)) {
+ if ((intent & F2PY_INTENT_OUT)) {
+ Py_INCREF(arr);
+ }
+ /* Returning input array */
+ return arr;
+ }
+ }
+
+ if (intent & F2PY_INTENT_INOUT) {
+ sprintf(mess,"failed to initialize intent(inout) array");
+ if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr))
+ sprintf(mess+strlen(mess)," -- input not contiguous");
+ if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr))
+ sprintf(mess+strlen(mess)," -- input not fortran contiguous");
+ if (PyArray_ITEMSIZE(arr)!=descr->elsize)
+ sprintf(mess+strlen(mess)," -- expected elsize=%d but got %d",
+ descr->elsize,
+ PyArray_ITEMSIZE(arr)
+ );
+ if (!(ARRAY_ISCOMPATIBLE(arr,type_num)))
+ sprintf(mess+strlen(mess)," -- input '%c' not compatible to '%c'",
+ arr->descr->type,descr->type);
+ PyErr_SetString(PyExc_ValueError,mess);
+ return NULL;
+ }
+
+ /* here we have always intent(in) or intent(inplace) */
+
+ {
+ PyArrayObject *retarr = (PyArrayObject *) \
+ PyArray_New(&PyArray_Type, arr->nd, arr->dimensions, type_num,
+ NULL,NULL,0,
+ !(intent&F2PY_INTENT_C),
+ NULL);
+ if (retarr==NULL)
+ return NULL;
+ F2PY_REPORT_ON_ARRAY_COPY_FROMARR;
+ if (PyArray_CopyInto(retarr, arr)) {
+ Py_DECREF(retarr);
+ return NULL;
+ }
+ if (intent & F2PY_INTENT_INPLACE) {
+ if (swap_arrays(arr,retarr))
+ return NULL; /* XXX: set exception */
+ Py_XDECREF(retarr);
+ if (intent & F2PY_INTENT_OUT)
+ Py_INCREF(arr);
+ } else {
+ arr = retarr;
+ }
+ }
+ return arr;
+ }
+
+ if ((intent & F2PY_INTENT_INOUT)
+ || (intent & F2PY_INTENT_INPLACE)
+ || (intent & F2PY_INTENT_CACHE)) {
+ sprintf(mess,"failed to initialize intent(inout|inplace|cache) array"
+ " -- input must be array but got %s",
+ PyString_AsString(PyObject_Str(PyObject_Type(obj)))
+ );
+ PyErr_SetString(PyExc_TypeError,mess);
+ return NULL;
+ }
+
+ {
+ F2PY_REPORT_ON_ARRAY_COPY_FROMANY;
+ arr = (PyArrayObject *) \
+ PyArray_FromAny(obj,PyArray_DescrFromType(type_num), 0,0,
+ ((intent & F2PY_INTENT_C)?CARRAY_FLAGS:FARRAY_FLAGS) \
+ | FORCECAST );
+ if (arr==NULL)
+ return NULL;
+ if (check_and_fix_dimensions(arr,rank,dims))
+ return NULL; /*XXX: set exception */
+ return arr;
+ }
+
+}
+
+ /*****************************************/
+ /* Helper functions for array_from_pyobj */
+ /*****************************************/
+
+static
+int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,intp *dims) {
+ /*
+ This function fills in blanks (that are -1\'s) in dims list using
+ the dimensions from arr. It also checks that non-blank dims will
+ match with the corresponding values in arr dimensions.
+ */
+ const intp arr_size = (arr->nd)?PyArray_Size((PyObject *)arr):1;
+
+ if (rank > arr->nd) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */
+ intp new_size = 1;
+ int free_axe = -1;
+ int i;
+ /* Fill dims where -1 or 0; check dimensions; calc new_size; */
+ for(i=0;i<arr->nd;++i) {
+ if (dims[i] >= 0) {
+ if (dims[i]!=arr->dimensions[i]) {
+ fprintf(stderr,"%d-th dimension must be fixed to %" INTP_FMT
+ " but got %" INTP_FMT "\n",
+ i,dims[i], arr->dimensions[i]);
+ return 1;
+ }
+ if (!dims[i]) dims[i] = 1;
+ } else {
+ dims[i] = arr->dimensions[i] ? arr->dimensions[i] : 1;
+ }
+ new_size *= dims[i];
+ }
+ for(i=arr->nd;i<rank;++i)
+ if (dims[i]>1) {
+ fprintf(stderr,"%d-th dimension must be %" INTP_FMT
+ " but got 0 (not defined).\n",
+ i,dims[i]);
+ return 1;
+ } else if (free_axe<0)
+ free_axe = i;
+ else
+ dims[i] = 1;
+ if (free_axe>=0) {
+ dims[free_axe] = arr_size/new_size;
+ new_size *= dims[free_axe];
+ }
+ if (new_size != arr_size) {
+ fprintf(stderr,"confused: new_size=%" INTP_FMT
+ ", arr_size=%" INTP_FMT " (maybe too many free"
+ " indices)\n", new_size,arr_size);
+ return 1;
+ }
+ } else { /* [[1,2]] -> [[1],[2]] */
+ int i,j;
+ intp d;
+ int effrank;
+ intp size;
+ for (i=0,effrank=0;i<arr->nd;++i)
+ if (arr->dimensions[i]>1) ++effrank;
+ if (dims[rank-1]>=0)
+ if (effrank>rank) {
+ fprintf(stderr,"too many axes: %d (effrank=%d), expected rank=%d\n",
+ arr->nd,effrank,rank);
+ return 1;
+ }
+ for (i=0,j=0;i<rank;++i) {
+ while (j<arr->nd && arr->dimensions[j]<2) ++j;
+ if (j>=arr->nd) d = 1;
+ else d = arr->dimensions[j++];
+ if (dims[i]>=0) {
+ if (d>1 && d!=dims[i]) {
+ fprintf(stderr,"%d-th dimension must be fixed to %" INTP_FMT
+ " but got %" INTP_FMT " (real index=%d)\n",
+ i,dims[i],d,j-1);
+ return 1;
+ }
+ if (!dims[i]) dims[i] = 1;
+ } else
+ dims[i] = d;
+ }
+ for (i=rank;i<arr->nd;++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */
+ while (j<arr->nd && arr->dimensions[j]<2) ++j;
+ if (j>=arr->nd) d = 1;
+ else d = arr->dimensions[j++];
+ dims[rank-1] *= d;
+ }
+ for (i=0,size=1;i<rank;++i) size *= dims[i];
+ if (size != arr_size) {
+ fprintf(stderr,"confused: size=%" INTP_FMT ", arr_size=%" INTP_FMT
+ ", rank=%d, effrank=%d, arr.nd=%d, dims=[",
+ size,arr_size,rank,effrank,arr->nd);
+ for (i=0;i<rank;++i) fprintf(stderr," %" INTP_FMT,dims[i]);
+ fprintf(stderr," ], arr.dims=[");
+ for (i=0;i<arr->nd;++i) fprintf(stderr," %" INTP_FMT,arr->dimensions[i]);
+ fprintf(stderr," ]\n");
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* End of file: array_from_pyobj.c */
+
+/************************* copy_ND_array *******************************/
+
+extern
+int copy_ND_array(const PyArrayObject *arr, PyArrayObject *out)
+{
+ F2PY_REPORT_ON_ARRAY_COPY_FROMARR;
+ return PyArray_CopyInto(out, (PyArrayObject *)arr);
+}
+
+#ifdef __cplusplus
+}
+#endif
+/************************* EOF fortranobject.c *******************************/
diff --git a/numpy/f2py/src/fortranobject.h b/numpy/f2py/src/fortranobject.h
new file mode 100644
index 000000000..680e6690e
--- /dev/null
+++ b/numpy/f2py/src/fortranobject.h
@@ -0,0 +1,123 @@
+#ifndef Py_FORTRANOBJECT_H
+#define Py_FORTRANOBJECT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "Python.h"
+
+#ifdef FORTRANOBJECT_C
+#define NO_IMPORT_ARRAY
+#endif
+#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API
+#include "scipy/arrayobject.h"
+
+ /*
+#ifdef F2PY_REPORT_ATEXIT_DISABLE
+#undef F2PY_REPORT_ATEXIT
+#else
+
+#ifndef __FreeBSD__
+#ifndef __WIN32__
+#ifndef __APPLE__
+#define F2PY_REPORT_ATEXIT
+#endif
+#endif
+#endif
+
+#endif
+ */
+
+#ifdef F2PY_REPORT_ATEXIT
+#include <sys/timeb.h>
+ extern void f2py_start_clock(void);
+ extern void f2py_stop_clock(void);
+ extern void f2py_start_call_clock(void);
+ extern void f2py_stop_call_clock(void);
+ extern void f2py_cb_start_clock(void);
+ extern void f2py_cb_stop_clock(void);
+ extern void f2py_cb_start_call_clock(void);
+ extern void f2py_cb_stop_call_clock(void);
+ extern void f2py_report_on_exit(int,void*);
+#endif
+
+#ifdef DMALLOC
+#include "dmalloc.h"
+#endif
+
+/* Fortran object interface */
+
+/*
+123456789-123456789-123456789-123456789-123456789-123456789-123456789-12
+
+PyFortranObject represents various Fortran objects:
+Fortran (module) routines, COMMON blocks, module data.
+
+Author: Pearu Peterson <pearu@cens.ioc.ee>
+*/
+
+#define F2PY_MAX_DIMS 40
+
+typedef void (*f2py_set_data_func)(char*,intp*);
+typedef void (*f2py_void_func)(void);
+typedef void (*f2py_init_func)(int*,intp*,f2py_set_data_func,int*);
+
+ /*typedef void* (*f2py_c_func)(void*,...);*/
+
+typedef void *(*f2pycfunc)(void);
+
+typedef struct {
+ char *name; /* attribute (array||routine) name */
+ int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS,
+ || rank=-1 for Fortran routine */
+ struct {intp d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */
+ int type; /* PyArray_<type> || not used */
+ char *data; /* pointer to array || Fortran routine */
+ f2py_init_func func; /* initialization function for
+ allocatable arrays:
+ func(&rank,dims,set_ptr_func,name,len(name))
+ || C/API wrapper for Fortran routine */
+ char *doc; /* documentation string; only recommended
+ for routines. */
+} FortranDataDef;
+
+typedef struct {
+ PyObject_HEAD
+ int len; /* Number of attributes */
+ FortranDataDef *defs; /* An array of FortranDataDef's */
+ PyObject *dict; /* Fortran object attribute dictionary */
+} PyFortranObject;
+
+#define PyFortran_Check(op) ((op)->ob_type == &PyFortran_Type)
+#define PyFortran_Check1(op) (0==strcmp((op)->ob_type->tp_name,"fortran"))
+
+ extern PyTypeObject PyFortran_Type;
+ extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init);
+ extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs);
+
+#define ISCONTIGUOUS(m) ((m)->flags & CONTIGUOUS)
+#define F2PY_INTENT_IN 1
+#define F2PY_INTENT_INOUT 2
+#define F2PY_INTENT_OUT 4
+#define F2PY_INTENT_HIDE 8
+#define F2PY_INTENT_CACHE 16
+#define F2PY_INTENT_COPY 32
+#define F2PY_INTENT_C 64
+#define F2PY_OPTIONAL 128
+#define F2PY_INTENT_INPLACE 256
+
+ extern PyArrayObject* array_from_pyobj(const int type_num,
+ intp *dims,
+ const int rank,
+ const int intent,
+ PyObject *obj);
+ extern int copy_ND_array(const PyArrayObject *in, PyArrayObject *out);
+
+#ifdef DEBUG_COPY_ND_ARRAY
+ extern void dump_attrs(const PyArrayObject* arr);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_FORTRANOBJECT_H */
diff --git a/numpy/f2py/src/test/Makefile b/numpy/f2py/src/test/Makefile
new file mode 100644
index 000000000..0f8869f72
--- /dev/null
+++ b/numpy/f2py/src/test/Makefile
@@ -0,0 +1,96 @@
+# -*- makefile -*-
+# File: Makefile-foo
+# Usage:
+# make -f Makefile-foo [MODE=opt|debug]
+# Notes:
+# 1) You must use GNU make; try `gmake ..' if `make' fails.
+# 2) This file is auto-generated with f2py (version 2.264).
+# f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
+# written by Pearu Peterson <pearu@ioc.ee>.
+# See http://cens.ioc.ee/projects/f2py2e/
+# Generation date: Wed Sep 13 16:22:55 2000
+# $Revision: 1.2 $
+# $Date: 2000/09/17 16:10:27 $
+
+# Recommendation notes produced by f2py2e/buildmakefile.py:
+# ***
+
+PYINC = -I/numeric/include/python1.5/Numeric -I/numeric/include/python1.5
+INCLUDES = -I..
+LIBS = -L$(shell gcc -v 2>&1 | grep specs | sed -e 's/Reading specs from //g' | sed -e 's/\/specs//g') -lg2c
+LIBS=-L$$ABSOFT/lib -lfio -lf77math -lf90math
+LIBS=-L/numeric/bin -lvast90 -L/usr/lib/gcc-lib/i586-mandrake-linux/2.95.2 -lg2c
+
+# Wrapper generator:
+F2PY = /home/pearu/bin/f2py-cvs
+
+# Fortran compiler: Absoft f95
+FC = f95
+FC = f90
+FOPT =
+FDEBUG =
+FFLAGS = -B108 -YCFRL=1 -YCOM_NAMES=LCS -YCOM_PFX -YCOM_SFX=_ -YEXT_PFX -YEXT_NAMES=LCS
+FFLAGS =
+# C compiler: cc ('gcc 2.x.x' 2.95.2)
+CC = cc
+COPT =
+CDEBUG =
+CFLAGS = -fpic
+
+# Linker: ld ('GNU ld' 2.9.5)
+LD = ld
+LDFLAGS = -shared -s
+SO = .so
+
+ifeq '$(MODE)' 'debug'
+FFLAGS += $(FDEBUG)
+CFLAGS += $(CDEBUG)
+endif
+ifeq '$(MODE)' 'opt'
+FFLAGS += $(FOPT)
+CFLAGS += $(COPT)
+endif
+FFLAGS += $(INCLUDES)
+CFLAGS += $(PYINC) $(INCLUDES)
+
+SRCC = ../fortranobject.c
+SRCF = mod.f90 bar.f foo90.f90 wrap.f
+SRCS = $(SRCC) $(SRCF)
+OBJC = $(filter %.o,$(SRCC:.c=.o) $(SRCC:.cc=.o) $(SRCC:.C=.o))
+OBJF = $(filter %.o,$(SRCF:.f90=.o) $(SRCF:.f=.o) $(SRCF:.F=.o) $(SRCF:.for=.o))
+OBJS = $(OBJC) $(OBJF)
+
+INSTALLNAME = f2py2e-apps
+INSTALLDIRECTORY = /numeric/lib/python1.5/site-packages/$(INSTALLNAME)
+INSTALLDIR = install -d -c
+INSTALLEXEC = install -m 755 -c
+
+all: foo
+
+foo: foomodule$(SO)
+foomodule$(SO) : foomodule.o $(OBJS)
+ $(LD) $(LDFLAGS) -o $@ $< $(OBJS) $(LIBS)
+
+foomodule.o: foomodule.c
+
+
+$(OBJS) : $(SRCS)
+%.o : %.f ; $(FC) -c $(FFLAGS) $<
+%.o : %.f90 ; $(FC) -c $(FFLAGS) $<
+
+test: foomodule$(SO)
+ python -c 'import foo;print foo.__doc__'
+install: foomodule$(SO)
+ $(INSTALLDIR) $(INSTALLDIRECTORY)
+ $(INSTALLEXEC) foomodule$(SO) $(INSTALLDIRECTORY)
+ cd $(INSTALLDIRECTORY) && echo "$(INSTALLNAME)" > ../$(INSTALLNAME).pth
+
+.PHONY: clean distclean debug test install foo
+debug:
+ echo "OBJS=$(OBJS)"
+ echo "SRCS=$(SRCS)"
+clean:
+ $(RM) *.o *.mod core foomodule.{dvi,log} $(OBJS)
+distclean: clean
+ $(RM) *.so *.sl foomodule.{tex,so}
+ $(RM) .f2py_get_compiler_*
diff --git a/numpy/f2py/src/test/bar.f b/numpy/f2py/src/test/bar.f
new file mode 100644
index 000000000..5354ceaf9
--- /dev/null
+++ b/numpy/f2py/src/test/bar.f
@@ -0,0 +1,11 @@
+ subroutine bar()
+ integer a
+ real*8 b,c(3)
+ common /foodata/ a,b,c
+ a = 4
+ b = 6.7
+ c(2) = 3.0
+ write(*,*) "bar:a=",a
+ write(*,*) "bar:b=",b
+ write(*,*) "bar:c=",c
+ end
diff --git a/numpy/f2py/src/test/foo.f b/numpy/f2py/src/test/foo.f
new file mode 100644
index 000000000..5354ceaf9
--- /dev/null
+++ b/numpy/f2py/src/test/foo.f
@@ -0,0 +1,11 @@
+ subroutine bar()
+ integer a
+ real*8 b,c(3)
+ common /foodata/ a,b,c
+ a = 4
+ b = 6.7
+ c(2) = 3.0
+ write(*,*) "bar:a=",a
+ write(*,*) "bar:b=",b
+ write(*,*) "bar:c=",c
+ end
diff --git a/numpy/f2py/src/test/foo90.f90 b/numpy/f2py/src/test/foo90.f90
new file mode 100644
index 000000000..dbca7e95b
--- /dev/null
+++ b/numpy/f2py/src/test/foo90.f90
@@ -0,0 +1,13 @@
+subroutine foo()
+ integer a
+ real*8 b,c(3)
+ common /foodata/ a,b,c
+ print*, " F: in foo"
+ a = 5
+ b = 6.3
+ c(2) = 9.1
+end subroutine foo
+
+
+
+
diff --git a/numpy/f2py/src/test/foomodule.c b/numpy/f2py/src/test/foomodule.c
new file mode 100644
index 000000000..0a954676e
--- /dev/null
+++ b/numpy/f2py/src/test/foomodule.c
@@ -0,0 +1,143 @@
+/* File: foomodule.c
+ * Example of FortranObject usage. See also wrap.f foo.f foo90.f90.
+ * Author: Pearu Peterson <pearu@ioc.ee>.
+ * http://cens.ioc.ee/projects/f2py2e/
+ * $Revision: 1.2 $
+ * $Date: 2000/09/17 16:10:27 $
+ */
+#ifdef __CPLUSPLUS__
+extern "C" {
+#endif
+
+#include "Python.h"
+#include "fortranobject.h"
+
+static PyObject *foo_error;
+
+#if defined(NO_APPEND_FORTRAN)
+#if defined(UPPERCASE_FORTRAN)
+#define F_FUNC(f,F) F
+#else
+#define F_FUNC(f,F) f
+#endif
+#else
+#if defined(UPPERCASE_FORTRAN)
+#define F_FUNC(f,F) F##_
+#else
+#define F_FUNC(f,F) f##_
+#endif
+#endif
+
+ /************* foo_bar *************/
+ static char doc_foo_bar[] = "\
+Function signature:\n\
+ bar()\n\
+";
+ static PyObject *foo_bar(PyObject *capi_self, PyObject *capi_args,
+ PyObject *capi_keywds, void (*f2py_func)()) {
+ PyObject *capi_buildvalue = NULL;
+ static char *capi_kwlist[] = {NULL};
+ if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\
+ "|:foo.bar",\
+ capi_kwlist))
+ goto capi_fail;
+ (*f2py_func)();
+ capi_buildvalue = Py_BuildValue("");
+ capi_fail:
+ return capi_buildvalue;
+ }
+ /************ mod_init **************/
+ static PyObject *mod_init(PyObject *capi_self, PyObject *capi_args,
+ PyObject *capi_keywds, void (*f2py_func)()) {
+ PyObject *capi_buildvalue = NULL;
+ static char *capi_kwlist[] = {NULL};
+ if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\
+ "|:mod.init",\
+ capi_kwlist))
+ goto capi_fail;
+ (*f2py_func)();
+ capi_buildvalue = Py_BuildValue("");
+ capi_fail:
+ return capi_buildvalue;
+ }
+
+ /* F90 module */
+ static FortranDataDef f2py_mod_def[] = {
+ {"a",0, {}, PyArray_INT},
+ {"b",0, {}, PyArray_DOUBLE},
+ {"c",1, {3}, PyArray_DOUBLE},
+ {"d",1, {-1}, PyArray_DOUBLE},
+ {"init",-1,{},0,NULL,(void *)mod_init},
+ {NULL}
+ };
+ static void f2py_setup_mod(char *a,char *b,char *c,void (*d)(),char *init) {
+ f2py_mod_def[0].data = a;
+ f2py_mod_def[1].data = b;
+ f2py_mod_def[2].data = c;
+ f2py_mod_def[3].func = d;
+ f2py_mod_def[4].data = init;
+ }
+ extern void F_FUNC(f2pyinitmod,F2PYINITMOD)();
+ static void f2py_init_mod() {
+ F_FUNC(f2pyinitmod,F2PYINITMOD)(f2py_setup_mod);
+ }
+
+ /* COMMON block */
+ static FortranDataDef f2py_foodata_def[] = {
+ {"a",0, {}, PyArray_INT},
+ {"b",0, {}, PyArray_DOUBLE},
+ {"c",1, {3}, PyArray_DOUBLE},
+ {NULL}
+ };
+ static void f2py_setup_foodata(char *a,char *b,char *c) {
+ f2py_foodata_def[0].data = a;
+ f2py_foodata_def[1].data = b;
+ f2py_foodata_def[2].data = c;
+ }
+ extern void F_FUNC(f2pyinitfoodata,F2PYINITFOODATA)();
+ static void f2py_init_foodata() {
+ F_FUNC(f2pyinitfoodata,F2PYINITFOODATA)(f2py_setup_foodata);
+ }
+
+ /* Fortran routines (needs no initialization/setup function) */
+ extern void F_FUNC(bar,BAR)();
+ extern void F_FUNC(foo,FOO)();
+ static FortranDataDef f2py_routines_def[] = {
+ {"bar",-1, {}, 0, (char *)F_FUNC(bar,BAR),(void *)foo_bar,doc_foo_bar},
+ {"foo",-1, {}, 0, (char *)F_FUNC(foo,FOO),(void *)foo_bar,doc_foo_bar},
+ {NULL}
+ };
+
+static PyMethodDef foo_module_methods[] = {
+/*eof method*/
+ {NULL,NULL}
+};
+
+void initfoo() {
+ int i;
+ PyObject *m, *d, *s;
+ PyTypeObject *t;
+ PyObject *f;
+ import_array();
+
+ m = Py_InitModule("foo", foo_module_methods);
+
+ d = PyModule_GetDict(m);
+ s = PyString_FromString("This module 'foo' demonstrates the usage of fortranobject.");
+ PyDict_SetItemString(d, "__doc__", s);
+
+ /* Fortran objects: */
+ PyDict_SetItemString(d, "mod", PyFortranObject_New(f2py_mod_def,f2py_init_mod));
+ PyDict_SetItemString(d, "foodata", PyFortranObject_New(f2py_foodata_def,f2py_init_foodata));
+ for(i=0;f2py_routines_def[i].name!=NULL;i++)
+ PyDict_SetItemString(d, f2py_routines_def[i].name,
+ PyFortranObject_NewAsAttr(&f2py_routines_def[i]));
+
+ Py_DECREF(s);
+
+ if (PyErr_Occurred())
+ Py_FatalError("can't initialize module foo");
+}
+#ifdef __CPLUSCPLUS__
+}
+#endif
diff --git a/numpy/f2py/src/test/wrap.f b/numpy/f2py/src/test/wrap.f
new file mode 100644
index 000000000..9414eb9f6
--- /dev/null
+++ b/numpy/f2py/src/test/wrap.f
@@ -0,0 +1,70 @@
+ subroutine f2py_mod_get_dims(f2py_r,f2py_s,f2py_set,f2py_n)
+ use mod
+ external f2py_set
+ logical f2py_ns
+ integer f2py_s(*),f2py_r,f2py_i,f2py_j
+ character*(*) f2py_n
+ if ("d".eq.f2py_n) then
+ f2py_ns = .FALSE.
+ if (allocated(d)) then
+ do f2py_i=1,f2py_r
+ if ((size(d,f2py_r-f2py_i+1).ne.f2py_s(f2py_i)).and.
+ c (f2py_s(f2py_i).ge.0)) then
+ f2py_ns = .TRUE.
+ end if
+ end do
+ if (f2py_ns) then
+ deallocate(d)
+ end if
+ end if
+ if (.not.allocated(d)) then
+ allocate(d(f2py_s(1)))
+ end if
+ if (allocated(d)) then
+ do f2py_i=1,f2py_r
+ f2py_s(f2py_i) = size(d,f2py_r-f2py_i+1)
+ end do
+ call f2py_set(d)
+ end if
+ end if
+ end subroutine f2py_mod_get_dims
+ subroutine f2py_mod_get_dims_d(r,s,set_data)
+ use mod, only: d => d
+ external set_data
+ logical ns
+ integer s(*),r,i,j
+ ns = .FALSE.
+ if (allocated(d)) then
+ do i=1,r
+ if ((size(d,r-i+1).ne.s(i)).and.(s(i).ge.0)) then
+ ns = .TRUE.
+ end if
+ end do
+ if (ns) then
+ deallocate(d)
+ end if
+ end if
+ if (.not.allocated(d).and.(s(1).ge.1)) then
+ allocate(d(s(1)))
+ end if
+ if (allocated(d)) then
+ do i=1,r
+ s(i) = size(d,r-i+1)
+ end do
+ end if
+ call set_data(d,allocated(d))
+ end subroutine f2py_mod_get_dims_d
+
+ subroutine f2pyinitmod(setupfunc)
+ use mod
+ external setupfunc,f2py_mod_get_dims_d,init
+ call setupfunc(a,b,c,f2py_mod_get_dims_d,init)
+ end subroutine f2pyinitmod
+
+ subroutine f2pyinitfoodata(setupfunc)
+ external setupfunc
+ integer a
+ real*8 b,c(3)
+ common /foodata/ a,b,c
+ call setupfunc(a,b,c)
+ end subroutine f2pyinitfoodata
diff --git a/numpy/f2py/tests/array_from_pyobj/__init__.py b/numpy/f2py/tests/array_from_pyobj/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/f2py/tests/array_from_pyobj/__init__.py
diff --git a/numpy/f2py/tests/array_from_pyobj/setup.py b/numpy/f2py/tests/array_from_pyobj/setup.py
new file mode 100644
index 000000000..ff7ff8cfc
--- /dev/null
+++ b/numpy/f2py/tests/array_from_pyobj/setup.py
@@ -0,0 +1,26 @@
+
+import os
+def configuration(parent_name='',top_path=None):
+ from scipy.distutils.misc_util import Configuration
+
+ config = Configuration('array_from_pyobj',parent_name,top_path)
+ #import scipy.f2py as f2py
+ #f2pydir=os.path.dirname(os.path.abspath(f2py.__file__))
+ f2pydir=os.path.join(config.local_path,'..','..')
+ fobjhsrc = os.path.join(f2pydir,'src','fortranobject.h')
+ fobjcsrc = os.path.join(f2pydir,'src','fortranobject.c')
+ config.add_extension('wrap',
+ sources = ['wrapmodule.c',fobjcsrc],
+ include_dirs = [os.path.dirname(fobjhsrc)],
+ depends = [fobjhsrc,fobjcsrc],
+ define_macros = [('DEBUG_COPY_ND_ARRAY',1),
+ #('F2PY_REPORT_ON_ARRAY_COPY',1),
+ #('F2PY_REPORT_ATEXIT',1)
+ ]
+ )
+
+ return config
+
+if __name__ == "__main__":
+ from scipy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/numpy/f2py/tests/array_from_pyobj/tests/test_array_from_pyobj.py b/numpy/f2py/tests/array_from_pyobj/tests/test_array_from_pyobj.py
new file mode 100644
index 000000000..309ad03f6
--- /dev/null
+++ b/numpy/f2py/tests/array_from_pyobj/tests/test_array_from_pyobj.py
@@ -0,0 +1,512 @@
+import unittest
+import sys
+import copy
+
+from scipy.test.testing import *
+from scipy.base import array, typeinfo, alltrue, ndarray, asarray, can_cast,zeros
+set_package_path()
+from array_from_pyobj import wrap
+del sys.path[0]
+
+def flags_info(arr):
+ flags = wrap.array_attrs(arr)[6]
+ return flags2names(flags)
+
+def flags2names(flags):
+ info = []
+ for flagname in ['CONTIGUOUS','FORTRAN','OWNDATA','ENSURECOPY',
+ 'ENSUREARRAY','ALIGNED','NOTSWAPPED','WRITEABLE',
+ 'UPDATEIFCOPY','BEHAVED_FLAGS','BEHAVED_FLAGS_RO',
+ 'CARRAY_FLAGS','FARRAY_FLAGS'
+ ]:
+ if abs(flags) & getattr(wrap,flagname):
+ info.append(flagname)
+ return info
+
+class Intent:
+ def __init__(self,intent_list=[]):
+ self.intent_list = intent_list[:]
+ flags = 0
+ for i in intent_list:
+ if i=='optional':
+ flags |= wrap.F2PY_OPTIONAL
+ else:
+ flags |= getattr(wrap,'F2PY_INTENT_'+i.upper())
+ self.flags = flags
+ def __getattr__(self,name):
+ name = name.lower()
+ if name=='in_': name='in'
+ return self.__class__(self.intent_list+[name])
+ def __str__(self):
+ return 'intent(%s)' % (','.join(self.intent_list))
+ def __repr__(self):
+ return 'Intent(%r)' % (self.intent_list)
+ def is_intent(self,*names):
+ for name in names:
+ if name not in self.intent_list:
+ return False
+ return True
+ def is_intent_exact(self,*names):
+ return len(self.intent_list)==len(names) and self.is_intent(*names)
+
+intent = Intent()
+
+class Type(object):
+
+ _type_names = ['BOOL','BYTE','UBYTE','SHORT','USHORT','INT','UINT',
+ 'LONG','ULONG','LONGLONG','ULONGLONG',
+ 'FLOAT','DOUBLE','LONGDOUBLE','CFLOAT','CDOUBLE',
+ 'CLONGDOUBLE']
+ _type_cache = {}
+
+ _cast_dict = {'BOOL':['BOOL']}
+ _cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE']
+ _cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE']
+ _cast_dict['BYTE'] = ['BYTE']
+ _cast_dict['UBYTE'] = ['UBYTE']
+ _cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE','SHORT']
+ _cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE','USHORT']
+ _cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT','INT']
+ _cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT','UINT']
+
+ _cast_dict['LONG'] = _cast_dict['INT'] + ['LONG']
+ _cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG']
+
+ _cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG']
+ _cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG']
+
+ _cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT','FLOAT']
+ _cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT','FLOAT','DOUBLE']
+ _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + ['ULONG','FLOAT','DOUBLE','LONGDOUBLE']
+
+ _cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT']
+ _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT','CDOUBLE']
+ _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + ['CFLOAT','CDOUBLE','CLONGDOUBLE']
+
+
+ def __new__(cls,name):
+ if isinstance(name,type):
+ dtype = name
+ name = None
+ for n,i in typeinfo.items():
+ if isinstance(i,tuple) and dtype is i[-1]:
+ name = n
+ break
+ obj = cls._type_cache.get(name.upper(),None)
+ if obj is not None:
+ return obj
+ obj = object.__new__(cls)
+ obj._init(name)
+ cls._type_cache[name.upper()] = obj
+ return obj
+
+ def _init(self,name):
+ self.NAME = name.upper()
+ self.type_num = getattr(wrap,'PyArray_'+self.NAME)
+ assert_equal(self.type_num,typeinfo[self.NAME][1])
+ self.dtype = typeinfo[self.NAME][-1]
+ self.elsize = typeinfo[self.NAME][2] / 8
+ self.dtypechar = typeinfo[self.NAME][0]
+
+ def cast_types(self):
+ return map(self.__class__,self._cast_dict[self.NAME])
+
+ def all_types(self):
+ return map(self.__class__,self._type_names)
+
+ def smaller_types(self):
+ bits = typeinfo[self.NAME][3]
+ types = []
+ for name in self._type_names:
+ if typeinfo[name][3]<bits:
+ types.append(Type(name))
+ return types
+
+ def equal_types(self):
+ bits = typeinfo[self.NAME][3]
+ types = []
+ for name in self._type_names:
+ if name==self.NAME: continue
+ if typeinfo[name][3]==bits:
+ types.append(Type(name))
+ return types
+
+ def larger_types(self):
+ bits = typeinfo[self.NAME][3]
+ types = []
+ for name in self._type_names:
+ if typeinfo[name][3]>bits:
+ types.append(Type(name))
+ return types
+
+class Array:
+ def __init__(self,typ,dims,intent,obj):
+ self.type = typ
+ self.dims = dims
+ self.intent = intent
+ self.obj_copy = copy.deepcopy(obj)
+ self.obj = obj
+
+ # arr.dtypechar may be different from typ.dtypechar
+ self.arr = wrap.call(typ.type_num,dims,intent.flags,obj)
+
+ self.arr_attr = wrap.array_attrs(self.arr)
+
+ if len(dims)>1:
+ if self.intent.is_intent('c'):
+ assert intent.flags & wrap.F2PY_INTENT_C
+ assert not self.arr.flags['FORTRAN'],`self.arr.flags,obj.flags`
+ assert self.arr.flags['CONTIGUOUS']
+ assert not self.arr_attr[6] & wrap.FORTRAN
+ else:
+ assert not intent.flags & wrap.F2PY_INTENT_C
+ assert self.arr.flags['FORTRAN']
+ assert not self.arr.flags['CONTIGUOUS']
+ assert self.arr_attr[6] & wrap.FORTRAN
+
+ if obj is None:
+ self.pyarr = None
+ self.pyarr_attr = None
+ return
+
+ if intent.is_intent('cache'):
+ assert isinstance(obj,ndarray),`type(obj)`
+ self.pyarr = array(obj).reshape(*dims)
+
+ else:
+ self.pyarr = array(array(obj,
+ dtype = typ.dtypechar).reshape(*dims),
+ fortran=not self.intent.is_intent('c'))
+ assert self.pyarr.dtypechar==typ.dtypechar,\
+ `self.pyarr.dtypechar,typ.dtypechar`
+ assert self.pyarr.flags['OWNDATA']
+ self.pyarr_attr = wrap.array_attrs(self.pyarr)
+
+ if len(dims)>1:
+ if self.intent.is_intent('c'):
+ assert not self.pyarr.flags['FORTRAN']
+ assert self.pyarr.flags['CONTIGUOUS']
+ assert not self.pyarr_attr[6] & wrap.FORTRAN
+ else:
+ assert self.pyarr.flags['FORTRAN']
+ assert not self.pyarr.flags['CONTIGUOUS']
+ assert self.pyarr_attr[6] & wrap.FORTRAN
+
+
+ assert self.arr_attr[1]==self.pyarr_attr[1] # nd
+ assert self.arr_attr[2]==self.pyarr_attr[2] # dimensions
+ if self.arr_attr[1]<=1:
+ assert self.arr_attr[3]==self.pyarr_attr[3],\
+ `self.arr_attr[3],self.pyarr_attr[3],self.arr.tostring(),self.pyarr.tostring()` # strides
+ assert self.arr_attr[5][-2:]==self.pyarr_attr[5][-2:],\
+ `self.arr_attr[5],self.pyarr_attr[5]` # descr
+ assert self.arr_attr[6]==self.pyarr_attr[6],\
+ `self.arr_attr[6],self.pyarr_attr[6],flags2names(0*self.arr_attr[6]-self.pyarr_attr[6]),flags2names(self.arr_attr[6]),intent` # flags
+
+ if intent.is_intent('cache'):
+ assert self.arr_attr[5][3]>=self.type.elsize,\
+ `self.arr_attr[5][3],self.type.elsize`
+ else:
+ assert self.arr_attr[5][3]==self.type.elsize,\
+ `self.arr_attr[5][3],self.type.elsize`
+ assert self.arr_equal(self.pyarr,self.arr)
+
+ if isinstance(self.obj,ndarray):
+ if typ.elsize==Type(obj.dtype).elsize:
+ if not intent.is_intent('copy') and self.arr_attr[1]<=1:
+ assert self.has_shared_memory()
+
+ def arr_equal(self,arr1,arr2):
+ if arr1.shape != arr2.shape:
+ return False
+ return alltrue(arr1==arr2)
+
+ def __str__(self):
+ return str(self.arr)
+
+ def has_shared_memory(self):
+ """Check that created array shares data with input array.
+ """
+ if self.obj is self.arr:
+ return True
+ if not isinstance(self.obj,ndarray):
+ return False
+ obj_attr = wrap.array_attrs(self.obj)
+ return obj_attr[0]==self.arr_attr[0]
+
+##################################################
+
+class test_intent(unittest.TestCase):
+ def check_in_out(self):
+ assert_equal(str(intent.in_.out),'intent(in,out)')
+ assert intent.in_.c.is_intent('c')
+ assert not intent.in_.c.is_intent_exact('c')
+ assert intent.in_.c.is_intent_exact('c','in')
+ assert intent.in_.c.is_intent_exact('in','c')
+ assert not intent.in_.is_intent('c')
+
+class _test_shared_memory:
+ num2seq = [1,2]
+ num23seq = [[1,2,3],[4,5,6]]
+ def check_in_from_2seq(self):
+ a = self.array([2],intent.in_,self.num2seq)
+ assert not a.has_shared_memory()
+
+ def check_in_from_2casttype(self):
+ for t in self.type.cast_types():
+ obj = array(self.num2seq,dtype=t.dtype)
+ a = self.array([len(self.num2seq)],intent.in_,obj)
+ if t.elsize==self.type.elsize:
+ assert a.has_shared_memory(),`self.type.dtype,t.dtype`
+ else:
+ assert not a.has_shared_memory(),`t.dtype`
+
+ def check_inout_2seq(self):
+ obj = array(self.num2seq,dtype=self.type.dtype)
+ a = self.array([len(self.num2seq)],intent.inout,obj)
+ assert a.has_shared_memory()
+
+ try:
+ a = self.array([2],intent.in_.inout,self.num2seq)
+ except TypeError,msg:
+ if not str(msg).startswith('failed to initialize intent(inout|inplace|cache) array'):
+ raise
+ else:
+ raise SystemError,'intent(inout) should have failed on sequence'
+
+ def check_f_inout_23seq(self):
+ obj = array(self.num23seq,dtype=self.type.dtype,fortran=1)
+ shape = (len(self.num23seq),len(self.num23seq[0]))
+ a = self.array(shape,intent.in_.inout,obj)
+ assert a.has_shared_memory()
+
+ obj = array(self.num23seq,dtype=self.type.dtype,fortran=0)
+ shape = (len(self.num23seq),len(self.num23seq[0]))
+ try:
+ a = self.array(shape,intent.in_.inout,obj)
+ except ValueError,msg:
+ if not str(msg).startswith('failed to initialize intent(inout) array'):
+ raise
+ else:
+ raise SystemError,'intent(inout) should have failed on improper array'
+
+ def check_c_inout_23seq(self):
+ obj = array(self.num23seq,dtype=self.type.dtype)
+ shape = (len(self.num23seq),len(self.num23seq[0]))
+ a = self.array(shape,intent.in_.c.inout,obj)
+ assert a.has_shared_memory()
+
+ def check_in_copy_from_2casttype(self):
+ for t in self.type.cast_types():
+ obj = array(self.num2seq,dtype=t.dtype)
+ a = self.array([len(self.num2seq)],intent.in_.copy,obj)
+ assert not a.has_shared_memory(),`t.dtype`
+
+ def check_c_in_from_23seq(self):
+ a = self.array([len(self.num23seq),len(self.num23seq[0])],
+ intent.in_,self.num23seq)
+ assert not a.has_shared_memory()
+
+ def check_in_from_23casttype(self):
+ for t in self.type.cast_types():
+ obj = array(self.num23seq,dtype=t.dtype)
+ a = self.array([len(self.num23seq),len(self.num23seq[0])],
+ intent.in_,obj)
+ assert not a.has_shared_memory(),`t.dtype`
+
+ def check_f_in_from_23casttype(self):
+ for t in self.type.cast_types():
+ obj = array(self.num23seq,dtype=t.dtype,fortran=1)
+ a = self.array([len(self.num23seq),len(self.num23seq[0])],
+ intent.in_,obj)
+ if t.elsize==self.type.elsize:
+ assert a.has_shared_memory(),`t.dtype`
+ else:
+ assert not a.has_shared_memory(),`t.dtype`
+
+ def check_c_in_from_23casttype(self):
+ for t in self.type.cast_types():
+ obj = array(self.num23seq,dtype=t.dtype)
+ a = self.array([len(self.num23seq),len(self.num23seq[0])],
+ intent.in_.c,obj)
+ if t.elsize==self.type.elsize:
+ assert a.has_shared_memory(),`t.dtype`
+ else:
+ assert not a.has_shared_memory(),`t.dtype`
+
+ def check_f_copy_in_from_23casttype(self):
+ for t in self.type.cast_types():
+ obj = array(self.num23seq,dtype=t.dtype,fortran=1)
+ a = self.array([len(self.num23seq),len(self.num23seq[0])],
+ intent.in_.copy,obj)
+ assert not a.has_shared_memory(),`t.dtype`
+
+ def check_c_copy_in_from_23casttype(self):
+ for t in self.type.cast_types():
+ obj = array(self.num23seq,dtype=t.dtype)
+ a = self.array([len(self.num23seq),len(self.num23seq[0])],
+ intent.in_.c.copy,obj)
+ assert not a.has_shared_memory(),`t.dtype`
+
+ def check_in_cache_from_2casttype(self):
+ for t in self.type.all_types():
+ if t.elsize != self.type.elsize:
+ continue
+ obj = array(self.num2seq,dtype=t.dtype)
+ shape = (len(self.num2seq),)
+ a = self.array(shape,intent.in_.c.cache,obj)
+ assert a.has_shared_memory(),`t.dtype`
+
+ a = self.array(shape,intent.in_.cache,obj)
+ assert a.has_shared_memory(),`t.dtype`
+
+ obj = array(self.num2seq,dtype=t.dtype,fortran=1)
+ a = self.array(shape,intent.in_.c.cache,obj)
+ assert a.has_shared_memory(),`t.dtype`
+
+ a = self.array(shape,intent.in_.cache,obj)
+ assert a.has_shared_memory(),`t.dtype`
+
+ try:
+ a = self.array(shape,intent.in_.cache,obj[::-1])
+ except ValueError,msg:
+ if not str(msg).startswith('failed to initialize intent(cache) array'):
+ raise
+ else:
+ raise SystemError,'intent(cache) should have failed on multisegmented array'
+ def check_in_cache_from_2casttype_failure(self):
+ for t in self.type.all_types():
+ if t.elsize >= self.type.elsize:
+ continue
+ obj = array(self.num2seq,dtype=t.dtype)
+ shape = (len(self.num2seq),)
+ try:
+ a = self.array(shape,intent.in_.cache,obj)
+ except ValueError,msg:
+ if not str(msg).startswith('failed to initialize intent(cache) array'):
+ raise
+ else:
+ raise SystemError,'intent(cache) should have failed on smaller array'
+
+ def check_cache_hidden(self):
+ shape = (2,)
+ a = self.array(shape,intent.cache.hide,None)
+ assert a.arr.shape==shape
+
+ shape = (2,3)
+ a = self.array(shape,intent.cache.hide,None)
+ assert a.arr.shape==shape
+
+ shape = (-1,3)
+ try:
+ a = self.array(shape,intent.cache.hide,None)
+ except ValueError,msg:
+ if not str(msg).startswith('failed to create intent(cache|hide)|optional array'):
+ raise
+ else:
+ raise SystemError,'intent(cache) should have failed on undefined dimensions'
+
+ def check_hidden(self):
+ shape = (2,)
+ a = self.array(shape,intent.hide,None)
+ assert a.arr.shape==shape
+ assert a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))
+
+ shape = (2,3)
+ a = self.array(shape,intent.hide,None)
+ assert a.arr.shape==shape
+ assert a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))
+ assert a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']
+
+ shape = (2,3)
+ a = self.array(shape,intent.c.hide,None)
+ assert a.arr.shape==shape
+ assert a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))
+ assert not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']
+
+ shape = (-1,3)
+ try:
+ a = self.array(shape,intent.hide,None)
+ except ValueError,msg:
+ if not str(msg).startswith('failed to create intent(cache|hide)|optional array'):
+ raise
+ else:
+ raise SystemError,'intent(hide) should have failed on undefined dimensions'
+
+ def check_optional_none(self):
+ shape = (2,)
+ a = self.array(shape,intent.optional,None)
+ assert a.arr.shape==shape
+ assert a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))
+
+ shape = (2,3)
+ a = self.array(shape,intent.optional,None)
+ assert a.arr.shape==shape
+ assert a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))
+ assert a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']
+
+ shape = (2,3)
+ a = self.array(shape,intent.c.optional,None)
+ assert a.arr.shape==shape
+ assert a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))
+ assert not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']
+
+ def check_optional_from_2seq(self):
+ obj = self.num2seq
+ shape = (len(obj),)
+ a = self.array(shape,intent.optional,obj)
+ assert a.arr.shape==shape
+ assert not a.has_shared_memory()
+
+ def check_optional_from_23seq(self):
+ obj = self.num23seq
+ shape = (len(obj),len(obj[0]))
+ a = self.array(shape,intent.optional,obj)
+ assert a.arr.shape==shape
+ assert not a.has_shared_memory()
+
+ a = self.array(shape,intent.optional.c,obj)
+ assert a.arr.shape==shape
+ assert not a.has_shared_memory()
+
+ def check_inplace(self):
+ obj = array(self.num23seq,dtype=self.type.dtype)
+ assert not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']
+ shape = obj.shape
+ a = self.array(shape,intent.inplace,obj)
+ assert obj[1][2]==a.arr[1][2],`obj,a.arr`
+ a.arr[1][2]=54
+ assert obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),`obj,a.arr`
+ assert a.arr is obj
+ assert obj.flags['FORTRAN'] # obj attributes are changed inplace!
+ assert not obj.flags['CONTIGUOUS']
+
+ def check_inplace_from_casttype(self):
+ for t in self.type.cast_types():
+ if t is self.type:
+ continue
+ obj = array(self.num23seq,dtype=t.dtype)
+ assert obj.dtype==t.dtype
+ assert obj.dtype is not self.type.dtype
+ assert not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']
+ shape = obj.shape
+ a = self.array(shape,intent.inplace,obj)
+ assert obj[1][2]==a.arr[1][2],`obj,a.arr`
+ a.arr[1][2]=54
+ assert obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),`obj,a.arr`
+ assert a.arr is obj
+ assert obj.flags['FORTRAN'] # obj attributes are changed inplace!
+ assert not obj.flags['CONTIGUOUS']
+ assert obj.dtype is self.type.dtype # obj type is changed inplace!
+
+for t in Type._type_names:
+ exec '''\
+class test_%s_gen(unittest.TestCase,
+ _test_shared_memory
+ ):
+ type = Type(%r)
+ array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj)
+''' % (t,t,t)
+
+if __name__ == "__main__":
+ ScipyTest().run()
diff --git a/numpy/f2py/tests/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/array_from_pyobj/wrapmodule.c
new file mode 100644
index 000000000..0d9e41f1c
--- /dev/null
+++ b/numpy/f2py/tests/array_from_pyobj/wrapmodule.c
@@ -0,0 +1,194 @@
+/* File: wrapmodule.c
+ * This file is auto-generated with f2py (version:2_1330).
+ * Hand edited by Pearu.
+ * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
+ * written by Pearu Peterson <pearu@cens.ioc.ee>.
+ * See http://cens.ioc.ee/projects/f2py2e/
+ * Generation date: Fri Oct 21 22:41:12 2005
+ * $Revision:$
+ * $Date:$
+ * Do not edit this file directly unless you know what you are doing!!!
+ */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*********************** See f2py2e/cfuncs.py: includes ***********************/
+#include "Python.h"
+#include "fortranobject.h"
+#include <math.h>
+
+static PyObject *wrap_error;
+static PyObject *wrap_module;
+
+/************************************ call ************************************/
+static char doc_f2py_rout_wrap_call[] = "\
+Function signature:\n\
+ arr = call(type_num,dims,intent,obj)\n\
+Required arguments:\n"
+" type_num : input int\n"
+" dims : input int-sequence\n"
+" intent : input int\n"
+" obj : input python object\n"
+"Return objects:\n"
+" arr : array";
+static PyObject *f2py_rout_wrap_call(PyObject *capi_self,
+ PyObject *capi_args) {
+ PyObject * volatile capi_buildvalue = NULL;
+ int type_num = 0;
+ intp *dims = NULL;
+ PyObject *dims_capi = Py_None;
+ int rank = 0;
+ int intent = 0;
+ PyArrayObject *capi_arr_tmp = NULL;
+ PyObject *arr_capi = Py_None;
+ int i;
+
+ if (!PyArg_ParseTuple(capi_args,"iOiO|:wrap.call",\
+ &type_num,&dims_capi,&intent,&arr_capi))
+ return NULL;
+ rank = PySequence_Length(dims_capi);
+ dims = malloc(rank*sizeof(intp));
+ for (i=0;i<rank;++i)
+ dims[i] = (intp)PyInt_AsLong(PySequence_GetItem(dims_capi,i));
+
+ capi_arr_tmp = array_from_pyobj(type_num,dims,rank,intent|F2PY_INTENT_OUT,arr_capi);
+ if (capi_arr_tmp == NULL)
+ return NULL;
+ capi_buildvalue = Py_BuildValue("N",capi_arr_tmp);
+ free(dims);
+ return capi_buildvalue;
+}
+
+static char doc_f2py_rout_wrap_attrs[] = "\
+Function signature:\n\
+ arr = array_attrs(arr)\n\
+Required arguments:\n"
+" arr : input array object\n"
+"Return objects:\n"
+" data : data address in hex\n"
+" nd : int\n"
+" dimensions : tuple\n"
+" strides : tuple\n"
+" base : python object\n"
+" (kind,type,type_num,elsize,alignment) : 4-tuple\n"
+" flags : int\n"
+" itemsize : int\n"
+;
+static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self,
+ PyObject *capi_args) {
+ PyObject *arr_capi = Py_None;
+ PyArrayObject *arr = NULL;
+ PyObject *dimensions = NULL;
+ PyObject *strides = NULL;
+ char s[100];
+ int i;
+ memset(s,0,100*sizeof(char));
+ if (!PyArg_ParseTuple(capi_args,"O!|:wrap.attrs",
+ &PyArray_Type,&arr_capi))
+ return NULL;
+ arr = (PyArrayObject *)arr_capi;
+ sprintf(s,"%p",arr->data);
+ dimensions = PyTuple_New(arr->nd);
+ strides = PyTuple_New(arr->nd);
+ for (i=0;i<arr->nd;++i) {
+ PyTuple_SetItem(dimensions,i,PyInt_FromLong(arr->dimensions[i]));
+ PyTuple_SetItem(strides,i,PyInt_FromLong(arr->strides[i]));
+ }
+ return Py_BuildValue("siOOO(cciii)ii",s,arr->nd,
+ dimensions,strides,
+ (arr->base==NULL?Py_None:arr->base),
+ arr->descr->kind,
+ arr->descr->type,
+ arr->descr->type_num,
+ arr->descr->elsize,
+ arr->descr->alignment,
+ arr->flags,
+ arr->itemsize);
+}
+
+static PyMethodDef f2py_module_methods[] = {
+
+ {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call},
+ {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs},
+ {NULL,NULL}
+};
+
+DL_EXPORT(void) initwrap(void) {
+ PyObject *m,*d, *s;
+ m = wrap_module = Py_InitModule("wrap", f2py_module_methods);
+ PyFortran_Type.ob_type = &PyType_Type;
+ import_array();
+ if (PyErr_Occurred())
+ Py_FatalError("can't initialize module wrap (failed to import scipy.base)");
+ d = PyModule_GetDict(m);
+ s = PyString_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n"
+" arr = call(type_num,dims,intent,obj)\n"
+".");
+ PyDict_SetItemString(d, "__doc__", s);
+ wrap_error = PyErr_NewException ("wrap.error", NULL, NULL);
+ Py_DECREF(s);
+ PyDict_SetItemString(d, "F2PY_INTENT_IN", PyInt_FromLong(F2PY_INTENT_IN));
+ PyDict_SetItemString(d, "F2PY_INTENT_INOUT", PyInt_FromLong(F2PY_INTENT_INOUT));
+ PyDict_SetItemString(d, "F2PY_INTENT_OUT", PyInt_FromLong(F2PY_INTENT_OUT));
+ PyDict_SetItemString(d, "F2PY_INTENT_HIDE", PyInt_FromLong(F2PY_INTENT_HIDE));
+ PyDict_SetItemString(d, "F2PY_INTENT_CACHE", PyInt_FromLong(F2PY_INTENT_CACHE));
+ PyDict_SetItemString(d, "F2PY_INTENT_COPY", PyInt_FromLong(F2PY_INTENT_COPY));
+ PyDict_SetItemString(d, "F2PY_INTENT_C", PyInt_FromLong(F2PY_INTENT_C));
+ PyDict_SetItemString(d, "F2PY_OPTIONAL", PyInt_FromLong(F2PY_OPTIONAL));
+ PyDict_SetItemString(d, "F2PY_INTENT_INPLACE", PyInt_FromLong(F2PY_INTENT_INPLACE));
+ PyDict_SetItemString(d, "PyArray_BOOL", PyInt_FromLong(PyArray_BOOL));
+ PyDict_SetItemString(d, "PyArray_BYTE", PyInt_FromLong(PyArray_BYTE));
+ PyDict_SetItemString(d, "PyArray_UBYTE", PyInt_FromLong(PyArray_UBYTE));
+ PyDict_SetItemString(d, "PyArray_SHORT", PyInt_FromLong(PyArray_SHORT));
+ PyDict_SetItemString(d, "PyArray_USHORT", PyInt_FromLong(PyArray_USHORT));
+ PyDict_SetItemString(d, "PyArray_INT", PyInt_FromLong(PyArray_INT));
+ PyDict_SetItemString(d, "PyArray_UINT", PyInt_FromLong(PyArray_UINT));
+ PyDict_SetItemString(d, "PyArray_INTP", PyInt_FromLong(PyArray_INTP));
+ PyDict_SetItemString(d, "PyArray_UINTP", PyInt_FromLong(PyArray_UINTP));
+ PyDict_SetItemString(d, "PyArray_LONG", PyInt_FromLong(PyArray_LONG));
+ PyDict_SetItemString(d, "PyArray_ULONG", PyInt_FromLong(PyArray_ULONG));
+ PyDict_SetItemString(d, "PyArray_LONGLONG", PyInt_FromLong(PyArray_LONGLONG));
+ PyDict_SetItemString(d, "PyArray_ULONGLONG", PyInt_FromLong(PyArray_ULONGLONG));
+ PyDict_SetItemString(d, "PyArray_FLOAT", PyInt_FromLong(PyArray_FLOAT));
+ PyDict_SetItemString(d, "PyArray_DOUBLE", PyInt_FromLong(PyArray_DOUBLE));
+ PyDict_SetItemString(d, "PyArray_LONGDOUBLE", PyInt_FromLong(PyArray_LONGDOUBLE));
+ PyDict_SetItemString(d, "PyArray_CFLOAT", PyInt_FromLong(PyArray_CFLOAT));
+ PyDict_SetItemString(d, "PyArray_CDOUBLE", PyInt_FromLong(PyArray_CDOUBLE));
+ PyDict_SetItemString(d, "PyArray_CLONGDOUBLE", PyInt_FromLong(PyArray_CLONGDOUBLE));
+ PyDict_SetItemString(d, "PyArray_OBJECT", PyInt_FromLong(PyArray_OBJECT));
+ PyDict_SetItemString(d, "PyArray_STRING", PyInt_FromLong(PyArray_STRING));
+ PyDict_SetItemString(d, "PyArray_UNICODE", PyInt_FromLong(PyArray_UNICODE));
+ PyDict_SetItemString(d, "PyArray_VOID", PyInt_FromLong(PyArray_VOID));
+ PyDict_SetItemString(d, "PyArray_NTYPES", PyInt_FromLong(PyArray_NTYPES));
+ PyDict_SetItemString(d, "PyArray_NOTYPE", PyInt_FromLong(PyArray_NOTYPE));
+ PyDict_SetItemString(d, "PyArray_UDERDEF", PyInt_FromLong(PyArray_USERDEF));
+
+ PyDict_SetItemString(d, "CONTIGUOUS", PyInt_FromLong(CONTIGUOUS));
+ PyDict_SetItemString(d, "FORTRAN", PyInt_FromLong(FORTRAN));
+ PyDict_SetItemString(d, "OWNDATA", PyInt_FromLong(OWNDATA));
+ PyDict_SetItemString(d, "ENSURECOPY", PyInt_FromLong(ENSURECOPY));
+ PyDict_SetItemString(d, "ENSUREARRAY", PyInt_FromLong(ENSUREARRAY));
+ PyDict_SetItemString(d, "ALIGNED", PyInt_FromLong(ALIGNED));
+ PyDict_SetItemString(d, "NOTSWAPPED", PyInt_FromLong(NOTSWAPPED));
+ PyDict_SetItemString(d, "WRITEABLE", PyInt_FromLong(WRITEABLE));
+ PyDict_SetItemString(d, "UPDATEIFCOPY", PyInt_FromLong(UPDATEIFCOPY));
+
+ PyDict_SetItemString(d, "BEHAVED_FLAGS", PyInt_FromLong(BEHAVED_FLAGS));
+ PyDict_SetItemString(d, "BEHAVED_FLAGS_RO", PyInt_FromLong(BEHAVED_FLAGS_RO));
+ PyDict_SetItemString(d, "CARRAY_FLAGS", PyInt_FromLong(CARRAY_FLAGS));
+ PyDict_SetItemString(d, "FARRAY_FLAGS", PyInt_FromLong(FARRAY_FLAGS));
+ PyDict_SetItemString(d, "DEFAULT_FLAGS", PyInt_FromLong(DEFAULT_FLAGS));
+ PyDict_SetItemString(d, "UPDATE_ALL_FLAGS", PyInt_FromLong(UPDATE_ALL_FLAGS));
+
+ if (PyErr_Occurred())
+ Py_FatalError("can't initialize module wrap");
+
+#ifdef F2PY_REPORT_ATEXIT
+ on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call");
+#endif
+
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/numpy/f2py/tests/c/return_real.py b/numpy/f2py/tests/c/return_real.py
new file mode 100644
index 000000000..7d35b28cf
--- /dev/null
+++ b/numpy/f2py/tests/c/return_real.py
@@ -0,0 +1,108 @@
+__usage__ = """
+Run:
+ python return_real.py [<f2py options>]
+Examples:
+ python return_real.py --fcompiler=Gnu --no-wrap-functions
+ python return_real.py --quiet
+"""
+
+
+import f2py2e
+from Numeric import array
+
+def build(f2py_opts):
+ try:
+ import c_ext_return_real
+ except ImportError:
+ assert not f2py2e.compile('''\
+python module c_ext_return_real
+usercode \'\'\'
+float t4(float value) { return value; }
+void s4(float *t4, float value) { *t4 = value; }
+double t8(double value) { return value; }
+void s8(double *t8, double value) { *t8 = value; }
+\'\'\'
+interface
+ function t4(value)
+ real*4 intent(c) :: t4,value
+ end
+ function t8(value)
+ real*8 intent(c) :: t8,value
+ end
+ subroutine s4(t4,value)
+ intent(c) s4
+ real*4 intent(out) :: t4
+ real*4 intent(c) :: value
+ end
+ subroutine s8(t8,value)
+ intent(c) s8
+ real*8 intent(out) :: t8
+ real*8 intent(c) :: value
+ end
+end interface
+end python module c_ext_return_real
+''','c_ext_return_real',f2py_opts,source_fn='c_ret_real.pyf')
+
+ from c_ext_return_real import t4,t8,s4,s8
+ test_functions = [t4,t8,s4,s8]
+ return test_functions
+
+def runtest(t):
+ import sys
+ if t.__doc__.split()[0] in ['t0','t4','s0','s4']:
+ err = 1e-5
+ else:
+ err = 0.0
+ assert abs(t(234)-234.0)<=err
+ assert abs(t(234.6)-234.6)<=err
+ assert abs(t(234l)-234.0)<=err
+ if sys.version[:3]<'2.3':
+ assert abs(t(234.6+3j)-234.6)<=err
+ assert abs(t('234')-234)<=err
+ assert abs(t('234.6')-234.6)<=err
+ assert abs(t(-234)+234)<=err
+ assert abs(t([234])-234)<=err
+ assert abs(t((234,))-234.)<=err
+ assert abs(t(array(234))-234.)<=err
+ assert abs(t(array([234]))-234.)<=err
+ assert abs(t(array([[234]]))-234.)<=err
+ assert abs(t(array([234],'1'))+22)<=err
+ assert abs(t(array([234],'s'))-234.)<=err
+ assert abs(t(array([234],'i'))-234.)<=err
+ assert abs(t(array([234],'l'))-234.)<=err
+ assert abs(t(array([234],'b'))-234.)<=err
+ assert abs(t(array([234],'f'))-234.)<=err
+ assert abs(t(array([234],'d'))-234.)<=err
+ if sys.version[:3]<'2.3':
+ assert abs(t(array([234+3j],'F'))-234.)<=err
+ assert abs(t(array([234],'D'))-234.)<=err
+ if t.__doc__.split()[0] in ['t0','t4','s0','s4']:
+ assert t(1e200)==t(1e300) # inf
+
+ try: raise RuntimeError,`t(array([234],'c'))`
+ except ValueError: pass
+ try: raise RuntimeError,`t('abc')`
+ except ValueError: pass
+
+ try: raise RuntimeError,`t([])`
+ except IndexError: pass
+ try: raise RuntimeError,`t(())`
+ except IndexError: pass
+
+ try: raise RuntimeError,`t(t)`
+ except TypeError: pass
+ try: raise RuntimeError,`t({})`
+ except TypeError: pass
+
+ try:
+ try: raise RuntimeError,`t(10l**400)`
+ except OverflowError: pass
+ except RuntimeError:
+ r = t(10l**400); assert `r` in ['inf','Infinity'],`r`
+
+if __name__=='__main__':
+ #import libwadpy
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
diff --git a/numpy/f2py/tests/f77/callback.py b/numpy/f2py/tests/f77/callback.py
new file mode 100644
index 000000000..672504bc7
--- /dev/null
+++ b/numpy/f2py/tests/f77/callback.py
@@ -0,0 +1,99 @@
+
+__usage__ = """
+Run:
+ python callback.py [<f2py options>]
+Examples:
+ python callback.py --fcompiler=Gnu --no-wrap-functions
+ python callback.py --quiet
+"""
+
+import f2py2e
+import math
+import sys
+from Numeric import array
+
+def build(f2py_opts):
+ try:
+ import f77_ext_callback
+ except ImportError:
+ assert not f2py2e.compile('''\
+ subroutine t(fun,a)
+ integer a
+cf2py intent(out) a
+ external fun
+ call fun(a)
+ end
+
+ subroutine func(a)
+cf2py intent(in,out) a
+ integer a
+ a = a + 11
+ end
+
+ subroutine func0(a)
+cf2py intent(out) a
+ integer a
+ a = 11
+ end
+
+ subroutine t2(a)
+cf2py intent(callback) fun
+ integer a
+cf2py intent(out) a
+ external fun
+ call fun(a)
+ end
+
+''','f77_ext_callback',f2py_opts,source_fn='f77_callback.f')
+
+ from f77_ext_callback import t,t2
+ test_functions = [t,t2]
+ return test_functions
+
+def runtest(t):
+ r = t(lambda : 4)
+ assert r==4,`r`
+ r = t(lambda a:5,fun_extra_args=(6,))
+ assert r==5,`r`
+ r = t(lambda a:a,fun_extra_args=(6,))
+ assert r==6,`r`
+ r = t(lambda a:5+a,fun_extra_args=(7,))
+ assert r==12,`r`
+ if sys.version[:3]>='2.3':
+ r = t(lambda a:math.degrees(a),fun_extra_args=(math.pi,))
+ assert r==180,`r`
+ r = t(math.degrees,fun_extra_args=(math.pi,))
+ assert r==180,`r`
+ from f77_ext_callback import func,func0
+ r = t(func,fun_extra_args=(6,))
+ assert r==17,`r`
+ r = t(func0)
+ assert r==11,`r`
+ r = t(func0._cpointer)
+ assert r==11,`r`
+ class A:
+ def __call__(self):
+ return 7
+ def mth(self):
+ return 9
+ a = A()
+ r = t(a)
+ assert r==7,`r`
+ r = t(a.mth)
+ assert r==9,`r`
+
+if __name__=='__main__':
+ #import libwadpy
+ status = 1
+ try:
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
+ status = 0
+ finally:
+ if status:
+ print '*'*20
+ print 'Running f2py2e.diagnose'
+ import f2py2e.diagnose
+ f2py2e.diagnose.run()
diff --git a/numpy/f2py/tests/f77/return_character.py b/numpy/f2py/tests/f77/return_character.py
new file mode 100644
index 000000000..3361c11f5
--- /dev/null
+++ b/numpy/f2py/tests/f77/return_character.py
@@ -0,0 +1,100 @@
+
+__usage__ = """
+Run:
+ python return_character.py [<f2py options>]
+Examples:
+ python return_character.py --fcompiler=Gnu --no-wrap-functions
+ python return_character.py --quiet
+"""
+
+import sys
+import f2py2e
+from Numeric import array
+
+def build(f2py_opts):
+ try:
+ import f77_ext_return_character
+ except ImportError:
+ assert not f2py2e.compile('''\
+ function t0(value)
+ character value
+ character t0
+ t0 = value
+ end
+ function t1(value)
+ character*1 value
+ character*1 t1
+ t1 = value
+ end
+ function t5(value)
+ character*5 value
+ character*5 t5
+ t5 = value
+ end
+ function ts(value)
+ character*(*) value
+ character*(*) ts
+ ts = value
+ end
+
+ subroutine s0(t0,value)
+ character value
+ character t0
+cf2py intent(out) t0
+ t0 = value
+ end
+ subroutine s1(t1,value)
+ character*1 value
+ character*1 t1
+cf2py intent(out) t1
+ t1 = value
+ end
+ subroutine s5(t5,value)
+ character*5 value
+ character*5 t5
+cf2py intent(out) t5
+ t5 = value
+ end
+ subroutine ss(ts,value)
+ character*(*) value
+ character*10 ts
+cf2py intent(out) ts
+ ts = value
+ end
+''','f77_ext_return_character',f2py_opts,source_fn='f77_ret_char.f')
+
+ from f77_ext_return_character import t0,t1,t5,s0,s1,s5,ss
+ test_functions = [t0,t1,t5,s0,s1,s5,ss]
+ if sys.platform!='win32': # this is acctually compiler dependent case
+ from f77_ext_return_character import ts
+ test_functions.append(ts)
+
+ return test_functions
+
+def runtest(t):
+ tname = t.__doc__.split()[0]
+ if tname in ['t0','t1','s0','s1']:
+ assert t(23)=='2'
+ r = t('ab');assert r=='a',`r`
+ r = t(array('ab'));assert r=='a',`r`
+ r = t(array(77,'1'));assert r=='M',`r`
+ try: raise RuntimeError,`t(array([77,87]))`
+ except ValueError: pass
+ try: raise RuntimeError,`t(array(77))`
+ except ValueError: pass
+ elif tname in ['ts','ss']:
+ assert t(23)=='23 ',`t(23)`
+ assert t('123456789abcdef')=='123456789a'
+ elif tname in ['t5','s5']:
+ assert t(23)=='23 ',`t(23)`
+ assert t('ab')=='ab ',`t('ab')`
+ assert t('123456789abcdef')=='12345'
+ else:
+ raise NotImplementedError
+
+if __name__=='__main__':
+ #import libwadpy
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
diff --git a/numpy/f2py/tests/f77/return_complex.py b/numpy/f2py/tests/f77/return_complex.py
new file mode 100644
index 000000000..39743a9f6
--- /dev/null
+++ b/numpy/f2py/tests/f77/return_complex.py
@@ -0,0 +1,125 @@
+__usage__ = """
+Run:
+ python return_complex.py [<f2py options>]
+Examples:
+ python return_complex.py --fcompiler=Gnu --no-wrap-functions
+ python return_complex.py --quiet
+"""
+
+
+import f2py2e
+from Numeric import array
+
+def build(f2py_opts):
+ try:
+ import f77_ext_return_complex
+ except ImportError:
+ assert not f2py2e.compile('''\
+ function t0(value)
+ complex value
+ complex t0
+ t0 = value
+ end
+ function t8(value)
+ complex*8 value
+ complex*8 t8
+ t8 = value
+ end
+ function t16(value)
+ complex*16 value
+ complex*16 t16
+ t16 = value
+ end
+ function td(value)
+ double complex value
+ double complex td
+ td = value
+ end
+
+ subroutine s0(t0,value)
+ complex value
+ complex t0
+cf2py intent(out) t0
+ t0 = value
+ end
+ subroutine s8(t8,value)
+ complex*8 value
+ complex*8 t8
+cf2py intent(out) t8
+ t8 = value
+ end
+ subroutine s16(t16,value)
+ complex*16 value
+ complex*16 t16
+cf2py intent(out) t16
+ t16 = value
+ end
+ subroutine sd(td,value)
+ double complex value
+ double complex td
+cf2py intent(out) td
+ td = value
+ end
+''','f77_ext_return_complex',f2py_opts)
+
+ from f77_ext_return_complex import t0,t8,t16,td,s0,s8,s16,sd
+ test_functions = [t0,t8,t16,td,s0,s8,s16,sd]
+ return test_functions
+
+
+def runtest(t):
+ tname = t.__doc__.split()[0]
+ if tname in ['t0','t8','s0','s8']:
+ err = 1e-5
+ else:
+ err = 0.0
+ assert abs(t(234j)-234.0j)<=err
+ assert abs(t(234.6)-234.6)<=err
+ assert abs(t(234l)-234.0)<=err
+ assert abs(t(234.6+3j)-(234.6+3j))<=err
+ #assert abs(t('234')-234.)<=err
+ #assert abs(t('234.6')-234.6)<=err
+ assert abs(t(-234)+234.)<=err
+ assert abs(t([234])-234.)<=err
+ assert abs(t((234,))-234.)<=err
+ assert abs(t(array(234))-234.)<=err
+ assert abs(t(array(23+4j,'F'))-(23+4j))<=err
+ assert abs(t(array([234]))-234.)<=err
+ assert abs(t(array([[234]]))-234.)<=err
+ assert abs(t(array([234],'1'))+22.)<=err
+ assert abs(t(array([234],'s'))-234.)<=err
+ assert abs(t(array([234],'i'))-234.)<=err
+ assert abs(t(array([234],'l'))-234.)<=err
+ assert abs(t(array([234],'b'))-234.)<=err
+ assert abs(t(array([234],'f'))-234.)<=err
+ assert abs(t(array([234],'d'))-234.)<=err
+ assert abs(t(array([234+3j],'F'))-(234+3j))<=err
+ assert abs(t(array([234],'D'))-234.)<=err
+
+ try: raise RuntimeError,`t(array([234],'c'))`
+ except TypeError: pass
+ try: raise RuntimeError,`t('abc')`
+ except TypeError: pass
+
+ try: raise RuntimeError,`t([])`
+ except IndexError: pass
+ try: raise RuntimeError,`t(())`
+ except IndexError: pass
+
+ try: raise RuntimeError,`t(t)`
+ except TypeError: pass
+ try: raise RuntimeError,`t({})`
+ except TypeError: pass
+
+ try:
+ try: raise RuntimeError,`t(10l**400)`
+ except OverflowError: pass
+ except RuntimeError:
+ r = t(10l**400); assert `r` in ['(inf+0j)','(Infinity+0j)'],`r`
+
+if __name__=='__main__':
+ #import libwadpy
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
diff --git a/numpy/f2py/tests/f77/return_integer.py b/numpy/f2py/tests/f77/return_integer.py
new file mode 100644
index 000000000..f50ab52da
--- /dev/null
+++ b/numpy/f2py/tests/f77/return_integer.py
@@ -0,0 +1,148 @@
+
+__usage__ = """
+Run:
+ python return_integer.py [<f2py options>]
+Examples:
+ python return_integer.py --fcompiler=Gnu --no-wrap-functions
+ python return_integer.py --quiet
+"""
+
+import scipy.f2py as f2py2e
+from scipy.base import array
+
+def build(f2py_opts):
+ try:
+ import f77_ext_return_integer
+ except ImportError:
+ assert not f2py2e.compile('''\
+ function t0(value)
+ integer value
+ integer t0
+ t0 = value
+ end
+ function t1(value)
+ integer*1 value
+ integer*1 t1
+ t1 = value
+ end
+ function t2(value)
+ integer*2 value
+ integer*2 t2
+ t2 = value
+ end
+ function t4(value)
+ integer*4 value
+ integer*4 t4
+ t4 = value
+ end
+ function t8(value)
+ integer*8 value
+ integer*8 t8
+ t8 = value
+ end
+
+ subroutine s0(t0,value)
+ integer value
+ integer t0
+cf2py intent(out) t0
+ t0 = value
+ end
+ subroutine s1(t1,value)
+ integer*1 value
+ integer*1 t1
+cf2py intent(out) t1
+ t1 = value
+ end
+ subroutine s2(t2,value)
+ integer*2 value
+ integer*2 t2
+cf2py intent(out) t2
+ t2 = value
+ end
+ subroutine s4(t4,value)
+ integer*4 value
+ integer*4 t4
+cf2py intent(out) t4
+ t4 = value
+ end
+ subroutine s8(t8,value)
+ integer*8 value
+ integer*8 t8
+cf2py intent(out) t8
+ t8 = value
+ end
+
+''','f77_ext_return_integer',f2py_opts,source_fn='f77_ret_int.f')
+
+ from f77_ext_return_integer import t0,t1,t2,t4,t8,s0,s1,s2,s4,s8
+ test_functions = [t0,t1,t2,t4,t8,s0,s1,s2,s4,s8]
+ return test_functions
+
+def runtest(t):
+ import sys
+ assert t(123)==123,`t(123)`
+ assert t(123.6)==123
+ assert t(123l)==123
+ if sys.version[:3]<'2.3':
+ assert t(123.6+3j)==123
+ assert t('123')==123
+ assert t(-123)==-123
+ assert t([123])==123
+ assert t((123,))==123
+ assert t(array(123))==123
+ assert t(array([123]))==123
+ assert t(array([[123]]))==123
+ assert t(array([123],'b'))==123
+ assert t(array([123],'h'))==123
+ assert t(array([123],'i'))==123
+ assert t(array([123],'l'))==123
+ assert t(array([123],'B'))==123
+ assert t(array([123],'f'))==123
+ assert t(array([123],'d'))==123
+ if sys.version[:3]<'2.3':
+ assert t(array([123+3j],'F'))==123
+ assert t(array([123],'D'))==123
+
+
+ try: raise RuntimeError,`t(array([123],'c'))`
+ except ValueError: pass
+ try: raise RuntimeError,`t('abc')`
+ except ValueError: pass
+
+ try: raise RuntimeError,`t([])`
+ except IndexError: pass
+ try: raise RuntimeError,`t(())`
+ except IndexError: pass
+
+ try: raise RuntimeError,`t(t)`
+ except TypeError: pass
+ try: raise RuntimeError,`t({})`
+ except TypeError: pass
+
+ if t.__doc__.split()[0] in ['t8','s8']:
+ try: raise RuntimeError,`t(100000000000000000000000l)`
+ except OverflowError: pass
+ try: raise RuntimeError,`t(10000000011111111111111.23)`
+ except OverflowError: pass
+ else:
+ if sys.version[:3]<'2.3':
+ try: raise RuntimeError,`t(10000000000000l)`
+ except OverflowError: pass
+ try: raise RuntimeError,`t(10000000000.23)`
+ except OverflowError: pass
+
+if __name__=='__main__':
+ #import libwadpy
+ status = 1
+ try:
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
+ status = 0
+ finally:
+ if status:
+ print '*'*20
+ print 'Running f2py2e.diagnose'
+ import scipy.f2py.diagnose as diagnose
+ #diagnose.run()
diff --git a/numpy/f2py/tests/f77/return_logical.py b/numpy/f2py/tests/f77/return_logical.py
new file mode 100644
index 000000000..e252e03a6
--- /dev/null
+++ b/numpy/f2py/tests/f77/return_logical.py
@@ -0,0 +1,135 @@
+
+__usage__ = """
+Run:
+ python return_logical.py [<f2py options>]
+Examples:
+ python return_logical.py --fcompiler=Gnu --no-wrap-functions
+ python return_logical.py --quiet
+"""
+
+import f2py2e
+from Numeric import array
+try: True
+except NameError:
+ True = 1
+ False = 0
+
+def build(f2py_opts):
+ try:
+ import f77_ext_return_logical
+ except ImportError:
+ assert not f2py2e.compile('''\
+ function t0(value)
+ logical value
+ logical t0
+ t0 = value
+ end
+ function t1(value)
+ logical*1 value
+ logical*1 t1
+ t1 = value
+ end
+ function t2(value)
+ logical*2 value
+ logical*2 t2
+ t2 = value
+ end
+ function t4(value)
+ logical*4 value
+ logical*4 t4
+ t4 = value
+ end
+c function t8(value)
+c logical*8 value
+c logical*8 t8
+c t8 = value
+c end
+
+ subroutine s0(t0,value)
+ logical value
+ logical t0
+cf2py intent(out) t0
+ t0 = value
+ end
+ subroutine s1(t1,value)
+ logical*1 value
+ logical*1 t1
+cf2py intent(out) t1
+ t1 = value
+ end
+ subroutine s2(t2,value)
+ logical*2 value
+ logical*2 t2
+cf2py intent(out) t2
+ t2 = value
+ end
+ subroutine s4(t4,value)
+ logical*4 value
+ logical*4 t4
+cf2py intent(out) t4
+ t4 = value
+ end
+c subroutine s8(t8,value)
+c logical*8 value
+c logical*8 t8
+cf2py intent(out) t8
+c t8 = value
+c end
+''','f77_ext_return_logical',f2py_opts)
+
+ #from f77_ext_return_logical import t0,t1,t2,t4,t8,s0,s1,s2,s4,s8
+ #test_functions = [t0,t1,t2,t4,t8,s0,s1,s2,s4,s8]
+ from f77_ext_return_logical import t0,t1,t2,t4,s0,s1,s2,s4
+ test_functions = [t0,t1,t2,t4,s0,s1,s2,s4]
+ return test_functions
+
+def runtest(t):
+ assert t(True)==1,`t(True)`
+ assert t(False)==0,`t(False)`
+ assert t(0)==0
+ assert t(None)==0
+ assert t(0.0)==0
+ assert t(0j)==0
+ assert t(1j)==1
+ assert t(234)==1
+ assert t(234.6)==1
+ assert t(234l)==1
+ assert t(234.6+3j)==1
+ assert t('234')==1
+ assert t('aaa')==1
+ assert t('')==0
+ assert t([])==0
+ assert t(())==0
+ assert t({})==0
+ assert t(t)==1
+ assert t(-234)==1
+ assert t(10l**100)==1
+ assert t([234])==1
+ assert t((234,))==1
+ assert t(array(234))==1
+ assert t(array([234]))==1
+ assert t(array([[234]]))==1
+ assert t(array([234],'1'))==1
+ assert t(array([234],'s'))==1
+ assert t(array([234],'i'))==1
+ assert t(array([234],'l'))==1
+ assert t(array([234],'b'))==1
+ assert t(array([234],'f'))==1
+ assert t(array([234],'d'))==1
+ assert t(array([234+3j],'F'))==1
+ assert t(array([234],'D'))==1
+ assert t(array(0))==0
+ assert t(array([0]))==0
+ assert t(array([[0]]))==0
+ assert t(array([0j]))==0
+ assert t(array([1]))==1
+ assert t(array([0,0]))==0
+ assert t(array([0,1]))==1 #XXX: is this expected?
+
+if __name__=='__main__':
+ #import libwadpy
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
+
diff --git a/numpy/f2py/tests/f77/return_real.py b/numpy/f2py/tests/f77/return_real.py
new file mode 100644
index 000000000..37f97a06d
--- /dev/null
+++ b/numpy/f2py/tests/f77/return_real.py
@@ -0,0 +1,127 @@
+__usage__ = """
+Run:
+ python return_real.py [<f2py options>]
+Examples:
+ python return_real.py --fcompiler=Gnu --no-wrap-functions
+ python return_real.py --quiet
+"""
+
+
+import scipy.f2py as f2py2e
+from scipy.base import array
+
+def build(f2py_opts):
+ try:
+ import f77_ext_return_real
+ except ImportError:
+ assert not f2py2e.compile('''\
+ function t0(value)
+ real value
+ real t0
+ t0 = value
+ end
+ function t4(value)
+ real*4 value
+ real*4 t4
+ t4 = value
+ end
+ function t8(value)
+ real*8 value
+ real*8 t8
+ t8 = value
+ end
+ function td(value)
+ double precision value
+ double precision td
+ td = value
+ end
+
+ subroutine s0(t0,value)
+ real value
+ real t0
+cf2py intent(out) t0
+ t0 = value
+ end
+ subroutine s4(t4,value)
+ real*4 value
+ real*4 t4
+cf2py intent(out) t4
+ t4 = value
+ end
+ subroutine s8(t8,value)
+ real*8 value
+ real*8 t8
+cf2py intent(out) t8
+ t8 = value
+ end
+ subroutine sd(td,value)
+ double precision value
+ double precision td
+cf2py intent(out) td
+ td = value
+ end
+''','f77_ext_return_real',f2py_opts,source_fn='f77_ret_real.f')
+
+ from f77_ext_return_real import t0,t4,t8,td,s0,s4,s8,sd
+ test_functions = [t0,t4,t8,td,s0,s4,s8,sd]
+ return test_functions
+
+def runtest(t):
+ import sys
+ if t.__doc__.split()[0] in ['t0','t4','s0','s4']:
+ err = 1e-5
+ else:
+ err = 0.0
+ assert abs(t(234)-234.0)<=err
+ assert abs(t(234.6)-234.6)<=err
+ assert abs(t(234l)-234.0)<=err
+ if sys.version[:3]<'2.3':
+ assert abs(t(234.6+3j)-234.6)<=err
+ assert abs(t('234')-234)<=err
+ assert abs(t('234.6')-234.6)<=err
+ assert abs(t(-234)+234)<=err
+ assert abs(t([234])-234)<=err
+ assert abs(t((234,))-234.)<=err
+ assert abs(t(array(234))-234.)<=err
+ assert abs(t(array([234]))-234.)<=err
+ assert abs(t(array([[234]]))-234.)<=err
+ assert abs(t(array([234],'b'))+22)<=err
+ assert abs(t(array([234],'h'))-234.)<=err
+ assert abs(t(array([234],'i'))-234.)<=err
+ assert abs(t(array([234],'l'))-234.)<=err
+ assert abs(t(array([234],'B'))-234.)<=err
+ assert abs(t(array([234],'f'))-234.)<=err
+ assert abs(t(array([234],'d'))-234.)<=err
+ if sys.version[:3]<'2.3':
+ assert abs(t(array([234+3j],'F'))-234.)<=err
+ assert abs(t(array([234],'D'))-234.)<=err
+ if t.__doc__.split()[0] in ['t0','t4','s0','s4']:
+ assert t(1e200)==t(1e300) # inf
+
+ try: raise RuntimeError,`t(array([234],'c'))`
+ except ValueError: pass
+ try: raise RuntimeError,`t('abc')`
+ except ValueError: pass
+
+ try: raise RuntimeError,`t([])`
+ except IndexError: pass
+ try: raise RuntimeError,`t(())`
+ except IndexError: pass
+
+ try: raise RuntimeError,`t(t)`
+ except TypeError: pass
+ try: raise RuntimeError,`t({})`
+ except TypeError: pass
+
+ try:
+ try: raise RuntimeError,`t(10l**400)`
+ except OverflowError: pass
+ except RuntimeError:
+ r = t(10l**400); assert `r` in ['inf','Infinity'],`r`
+
+if __name__=='__main__':
+ #import libwadpy
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
diff --git a/numpy/f2py/tests/f90/return_character.py b/numpy/f2py/tests/f90/return_character.py
new file mode 100644
index 000000000..0bd7be701
--- /dev/null
+++ b/numpy/f2py/tests/f90/return_character.py
@@ -0,0 +1,100 @@
+
+__usage__ = """
+Run:
+ python return_character.py [<f2py options>]
+Examples:
+ python return_character.py --fcompiler=Gnu --no-wrap-functions
+ python return_character.py --quiet
+"""
+
+import f2py2e
+from Numeric import array
+
+def build(f2py_opts):
+ try:
+ import f90_ext_return_character
+ except ImportError:
+ assert not f2py2e.compile('''\
+module f90_return_char
+ contains
+ function t0(value)
+ character :: value
+ character :: t0
+ t0 = value
+ end function t0
+ function t1(value)
+ character(len=1) :: value
+ character(len=1) :: t1
+ t1 = value
+ end function t1
+ function t5(value)
+ character(len=5) :: value
+ character(len=5) :: t5
+ t5 = value
+ end function t5
+ function ts(value)
+ character(len=*) :: value
+ character(len=10) :: ts
+ ts = value
+ end function ts
+
+ subroutine s0(t0,value)
+ character :: value
+ character :: t0
+!f2py intent(out) t0
+ t0 = value
+ end subroutine s0
+ subroutine s1(t1,value)
+ character(len=1) :: value
+ character(len=1) :: t1
+!f2py intent(out) t1
+ t1 = value
+ end subroutine s1
+ subroutine s5(t5,value)
+ character(len=5) :: value
+ character(len=5) :: t5
+!f2py intent(out) t5
+ t5 = value
+ end subroutine s5
+ subroutine ss(ts,value)
+ character(len=*) :: value
+ character(len=10) :: ts
+!f2py intent(out) ts
+ ts = value
+ end subroutine ss
+end module f90_return_char
+''','f90_ext_return_character',f2py_opts,source_fn='f90_ret_char.f90')
+
+ from f90_ext_return_character import f90_return_char as m
+ test_functions = [m.t0,m.t1,m.t5,m.ts,m.s0,m.s1,m.s5,m.ss]
+ return test_functions
+
+
+def runtest(t):
+ tname = t.__doc__.split()[0]
+ if tname in ['t0','t1','s0','s1']:
+ assert t(23)=='2'
+ r = t('ab');assert r=='a',`r`
+ r = t(array('ab'));assert r=='a',`r`
+ r = t(array(77,'1'));assert r=='M',`r`
+ try: raise RuntimeError,`t(array([77,87]))`
+ except ValueError: pass
+ try: raise RuntimeError,`t(array(77))`
+ except ValueError: pass
+ elif tname in ['ts','ss']:
+ assert t(23)=='23 ',`t(23)`
+ assert t('123456789abcdef')=='123456789a',`t('123456789abcdef')`
+ elif tname in ['t5','s5']:
+ assert t(23)=='23 '
+ assert t('ab')=='ab '
+ assert t('123456789abcdef')=='12345'
+ else:
+ raise NotImplementedError
+
+if __name__=='__main__':
+ #import libwadpy
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
+
diff --git a/numpy/f2py/tests/f90/return_complex.py b/numpy/f2py/tests/f90/return_complex.py
new file mode 100644
index 000000000..36d8707fc
--- /dev/null
+++ b/numpy/f2py/tests/f90/return_complex.py
@@ -0,0 +1,127 @@
+__usage__ = """
+Run:
+ python return_complex.py [<f2py options>]
+Examples:
+ python return_complex.py --quiet
+"""
+
+
+import f2py2e
+from Numeric import array
+
+def build(f2py_opts):
+ try:
+ import f90_ext_return_complex
+ except ImportError:
+ assert not f2py2e.compile('''\
+module f90_return_complex
+ contains
+ function t0(value)
+ complex :: value
+ complex :: t0
+ t0 = value
+ end function t0
+ function t8(value)
+ complex(kind=4) :: value
+ complex(kind=4) :: t8
+ t8 = value
+ end function t8
+ function t16(value)
+ complex(kind=8) :: value
+ complex(kind=8) :: t16
+ t16 = value
+ end function t16
+ function td(value)
+ double complex :: value
+ double complex :: td
+ td = value
+ end function td
+
+ subroutine s0(t0,value)
+ complex :: value
+ complex :: t0
+!f2py intent(out) t0
+ t0 = value
+ end subroutine s0
+ subroutine s8(t8,value)
+ complex(kind=4) :: value
+ complex(kind=4) :: t8
+!f2py intent(out) t8
+ t8 = value
+ end subroutine s8
+ subroutine s16(t16,value)
+ complex(kind=8) :: value
+ complex(kind=8) :: t16
+!f2py intent(out) t16
+ t16 = value
+ end subroutine s16
+ subroutine sd(td,value)
+ double complex :: value
+ double complex :: td
+!f2py intent(out) td
+ td = value
+ end subroutine sd
+end module f90_return_complex
+''','f90_ext_return_complex',f2py_opts,source_fn='f90_ret_cmlx.f90')
+
+ from f90_ext_return_complex import f90_return_complex as m
+ test_functions = [m.t0,m.t8,m.t16,m.td,m.s0,m.s8,m.s16,m.sd]
+ return test_functions
+
+
+def runtest(t):
+ tname = t.__doc__.split()[0]
+ if tname in ['t0','t8','s0','s8']:
+ err = 1e-5
+ else:
+ err = 0.0
+ #assert abs(t(234j)-234.0j)<=err
+ assert abs(t(234.6)-234.6)<=err
+ assert abs(t(234l)-234.0)<=err
+ assert abs(t(234.6+3j)-(234.6+3j))<=err
+ #assert abs(t('234')-234.)<=err
+ #assert abs(t('234.6')-234.6)<=err
+ assert abs(t(-234)+234.)<=err
+ assert abs(t([234])-234.)<=err
+ assert abs(t((234,))-234.)<=err
+ assert abs(t(array(234))-234.)<=err
+ assert abs(t(array(23+4j,'F'))-(23+4j))<=err
+ assert abs(t(array([234]))-234.)<=err
+ assert abs(t(array([[234]]))-234.)<=err
+ assert abs(t(array([234],'1'))+22.)<=err
+ assert abs(t(array([234],'s'))-234.)<=err
+ assert abs(t(array([234],'i'))-234.)<=err
+ assert abs(t(array([234],'l'))-234.)<=err
+ assert abs(t(array([234],'b'))-234.)<=err
+ assert abs(t(array([234],'f'))-234.)<=err
+ assert abs(t(array([234],'d'))-234.)<=err
+ assert abs(t(array([234+3j],'F'))-(234+3j))<=err
+ assert abs(t(array([234],'D'))-234.)<=err
+
+ try: raise RuntimeError,`t(array([234],'c'))`
+ except TypeError: pass
+ try: raise RuntimeError,`t('abc')`
+ except TypeError: pass
+
+ try: raise RuntimeError,`t([])`
+ except IndexError: pass
+ try: raise RuntimeError,`t(())`
+ except IndexError: pass
+
+ try: raise RuntimeError,`t(t)`
+ except TypeError: pass
+ try: raise RuntimeError,`t({})`
+ except TypeError: pass
+
+ try:
+ try: raise RuntimeError,`t(10l**400)`
+ except OverflowError: pass
+ except RuntimeError:
+ r = t(10l**400); assert `r` in ['(inf+0j)','(Infinity+0j)'],`r`
+
+if __name__=='__main__':
+ #import libwadpy
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
diff --git a/numpy/f2py/tests/f90/return_integer.py b/numpy/f2py/tests/f90/return_integer.py
new file mode 100644
index 000000000..635252ce9
--- /dev/null
+++ b/numpy/f2py/tests/f90/return_integer.py
@@ -0,0 +1,152 @@
+
+# XXX: investigate cases that are disabled under win32
+#
+
+__usage__ = """
+Run:
+ python return_integer.py [<f2py options>]
+Examples:
+ python return_integer.py --quiet
+"""
+
+import sys
+import f2py2e
+from Numeric import array
+
+def build(f2py_opts):
+ try:
+ import f90_ext_return_integer
+ except ImportError:
+ assert not f2py2e.compile('''\
+module f90_return_integer
+ contains
+ function t0(value)
+ integer :: value
+ integer :: t0
+ t0 = value
+ end function t0
+ function t1(value)
+ integer(kind=1) :: value
+ integer(kind=1) :: t1
+ t1 = value
+ end function t1
+ function t2(value)
+ integer(kind=2) :: value
+ integer(kind=2) :: t2
+ t2 = value
+ end function t2
+ function t4(value)
+ integer(kind=4) :: value
+ integer(kind=4) :: t4
+ t4 = value
+ end function t4
+ function t8(value)
+ integer(kind=8) :: value
+ integer(kind=8) :: t8
+ t8 = value
+ end function t8
+
+ subroutine s0(t0,value)
+ integer :: value
+ integer :: t0
+!f2py intent(out) t0
+ t0 = value
+ end subroutine s0
+ subroutine s1(t1,value)
+ integer(kind=1) :: value
+ integer(kind=1) :: t1
+!f2py intent(out) t1
+ t1 = value
+ end subroutine s1
+ subroutine s2(t2,value)
+ integer(kind=2) :: value
+ integer(kind=2) :: t2
+!f2py intent(out) t2
+ t2 = value
+ end subroutine s2
+ subroutine s4(t4,value)
+ integer(kind=4) :: value
+ integer(kind=4) :: t4
+!f2py intent(out) t4
+ t4 = value
+ end subroutine s4
+ subroutine s8(t8,value)
+ integer(kind=8) :: value
+ integer(kind=8) :: t8
+!f2py intent(out) t8
+ t8 = value
+ end subroutine s8
+end module f90_return_integer
+''','f90_ext_return_integer',f2py_opts,source_fn='f90_ret_int.f90')
+
+ from f90_ext_return_integer import f90_return_integer as m
+ test_functions = [m.t0,m.t1,m.t2,m.t4,m.t8,m.s0,m.s1,m.s2,m.s4,m.s8]
+ return test_functions
+
+def runtest(t):
+ tname = t.__doc__.split()[0]
+ assert t(123)==123
+ assert t(123.6)==123
+ assert t(123l)==123
+ if sys.version[:3]<='2.2':
+ assert t(123.6+3j)==123
+ assert t('123')==123
+ assert t(-123)==-123
+ assert t([123])==123
+ assert t((123,))==123
+ assert t(array(123))==123
+ assert t(array([123]))==123
+ assert t(array([[123]]))==123
+ assert t(array([123],'1'))==123
+ assert t(array([123],'s'))==123
+ assert t(array([123],'i'))==123
+ assert t(array([123],'l'))==123
+ assert t(array([123],'b'))==123
+ assert t(array([123],'f'))==123
+ assert t(array([123],'d'))==123
+ if sys.version[:3]<='2.2':
+ assert t(array([123+3j],'F'))==123
+ assert t(array([123],'D'))==123
+
+ try: raise RuntimeError,`t(array([123],'c'))`
+ except ValueError: pass
+ try: raise RuntimeError,`t('abc')`
+ except ValueError: pass
+
+ try: raise RuntimeError,`t([])`
+ except IndexError: pass
+ try: raise RuntimeError,`t(())`
+ except IndexError: pass
+
+ try: raise RuntimeError,`t(t)`
+ except TypeError: pass
+ try: raise RuntimeError,`t({})`
+ except TypeError: pass
+
+ if tname in ['t8','s8']:
+ try: raise RuntimeError,`t(100000000000000000000000l)`
+ except OverflowError: pass
+ try: raise RuntimeError,`t(10000000011111111111111.23)`
+ except OverflowError: pass
+ else:
+ if sys.version[:3]<='2.2':
+ try: raise RuntimeError,`t(10000000000000l)`
+ except OverflowError: pass
+ try: raise RuntimeError,`t(10000000000.23)`
+ except OverflowError: pass
+
+if __name__=='__main__':
+ #import libwadpy
+ status = 1
+ try:
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
+ status = 0
+ finally:
+ if status:
+ print '*'*20
+ print 'Running f2py2e.diagnose'
+ import f2py2e.diagnose
+ f2py2e.diagnose.run()
diff --git a/numpy/f2py/tests/f90/return_logical.py b/numpy/f2py/tests/f90/return_logical.py
new file mode 100644
index 000000000..c9479edaf
--- /dev/null
+++ b/numpy/f2py/tests/f90/return_logical.py
@@ -0,0 +1,138 @@
+
+__usage__ = """
+Run:
+ python return_logical.py [<f2py options>]
+Examples:
+ python return_logical.py --quiet
+"""
+
+import f2py2e
+from Numeric import array
+
+try: True
+except NameError:
+ True = 1
+ False = 0
+
+def build(f2py_opts):
+ try:
+ import f90_ext_return_logical
+ except ImportError:
+ assert not f2py2e.compile('''\
+module f90_return_logical
+ contains
+ function t0(value)
+ logical :: value
+ logical :: t0
+ t0 = value
+ end function t0
+ function t1(value)
+ logical(kind=1) :: value
+ logical(kind=1) :: t1
+ t1 = value
+ end function t1
+ function t2(value)
+ logical(kind=2) :: value
+ logical(kind=2) :: t2
+ t2 = value
+ end function t2
+ function t4(value)
+ logical(kind=4) :: value
+ logical(kind=4) :: t4
+ t4 = value
+ end function t4
+ function t8(value)
+ logical(kind=8) :: value
+ logical(kind=8) :: t8
+ t8 = value
+ end function t8
+
+ subroutine s0(t0,value)
+ logical :: value
+ logical :: t0
+!f2py intent(out) t0
+ t0 = value
+ end subroutine s0
+ subroutine s1(t1,value)
+ logical(kind=1) :: value
+ logical(kind=1) :: t1
+!f2py intent(out) t1
+ t1 = value
+ end subroutine s1
+ subroutine s2(t2,value)
+ logical(kind=2) :: value
+ logical(kind=2) :: t2
+!f2py intent(out) t2
+ t2 = value
+ end subroutine s2
+ subroutine s4(t4,value)
+ logical(kind=4) :: value
+ logical(kind=4) :: t4
+!f2py intent(out) t4
+ t4 = value
+ end subroutine s4
+ subroutine s8(t8,value)
+ logical(kind=8) :: value
+ logical(kind=8) :: t8
+!f2py intent(out) t8
+ t8 = value
+ end subroutine s8
+end module f90_return_logical
+''','f90_ext_return_logical',f2py_opts,source_fn='f90_ret_log.f90')
+
+ from f90_ext_return_logical import f90_return_logical as m
+ test_functions = [m.t0,m.t1,m.t2,m.t4,m.t8,m.s0,m.s1,m.s2,m.s4,m.s8]
+ return test_functions
+
+
+
+
+def runtest(t):
+ assert t(True)==1,`t(True)`
+ assert t(False)==0,`t(False)`
+ assert t(0)==0
+ assert t(None)==0
+ assert t(0.0)==0
+ assert t(0j)==0
+ assert t(1j)==1
+ assert t(234)==1
+ assert t(234.6)==1
+ assert t(234l)==1
+ assert t(234.6+3j)==1
+ assert t('234')==1
+ assert t('aaa')==1
+ assert t('')==0
+ assert t([])==0
+ assert t(())==0
+ assert t({})==0
+ assert t(t)==1
+ assert t(-234)==1
+ assert t(10l**100)==1
+ assert t([234])==1
+ assert t((234,))==1
+ assert t(array(234))==1
+ assert t(array([234]))==1
+ assert t(array([[234]]))==1
+ assert t(array([234],'1'))==1
+ assert t(array([234],'s'))==1
+ assert t(array([234],'i'))==1
+ assert t(array([234],'l'))==1
+ assert t(array([234],'b'))==1
+ assert t(array([234],'f'))==1
+ assert t(array([234],'d'))==1
+ assert t(array([234+3j],'F'))==1
+ assert t(array([234],'D'))==1
+ assert t(array(0))==0
+ assert t(array([0]))==0
+ assert t(array([[0]]))==0
+ assert t(array([0j]))==0
+ assert t(array([1]))==1
+ assert t(array([0,0]))==0
+ assert t(array([0,1]))==1 #XXX: is this expected?
+
+if __name__=='__main__':
+ #import libwadpy
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
diff --git a/numpy/f2py/tests/f90/return_real.py b/numpy/f2py/tests/f90/return_real.py
new file mode 100644
index 000000000..263e28165
--- /dev/null
+++ b/numpy/f2py/tests/f90/return_real.py
@@ -0,0 +1,129 @@
+__usage__ = """
+Run:
+ python return_real.py [<f2py options>]
+Examples:
+ python return_real.py --quiet
+"""
+
+import sys
+import f2py2e
+from Numeric import array
+
+def build(f2py_opts):
+ try:
+ import f90_ext_return_real
+ except ImportError:
+ assert not f2py2e.compile('''\
+module f90_return_real
+ contains
+ function t0(value)
+ real :: value
+ real :: t0
+ t0 = value
+ end function t0
+ function t4(value)
+ real(kind=4) :: value
+ real(kind=4) :: t4
+ t4 = value
+ end function t4
+ function t8(value)
+ real(kind=8) :: value
+ real(kind=8) :: t8
+ t8 = value
+ end function t8
+ function td(value)
+ double precision :: value
+ double precision :: td
+ td = value
+ end function td
+
+ subroutine s0(t0,value)
+ real :: value
+ real :: t0
+!f2py intent(out) t0
+ t0 = value
+ end subroutine s0
+ subroutine s4(t4,value)
+ real(kind=4) :: value
+ real(kind=4) :: t4
+!f2py intent(out) t4
+ t4 = value
+ end subroutine s4
+ subroutine s8(t8,value)
+ real(kind=8) :: value
+ real(kind=8) :: t8
+!f2py intent(out) t8
+ t8 = value
+ end subroutine s8
+ subroutine sd(td,value)
+ double precision :: value
+ double precision :: td
+!f2py intent(out) td
+ td = value
+ end subroutine sd
+end module f90_return_real
+''','f90_ext_return_real',f2py_opts,source_fn='f90_ret_real.f90')
+
+ from f90_ext_return_real import f90_return_real as m
+ test_functions = [m.t0,m.t4,m.t8,m.td,m.s0,m.s4,m.s8,m.sd]
+ return test_functions
+
+def runtest(t):
+ tname = t.__doc__.split()[0]
+ if tname in ['t0','t4','s0','s4']:
+ err = 1e-5
+ else:
+ err = 0.0
+ assert abs(t(234)-234.0)<=err
+ assert abs(t(234.6)-234.6)<=err
+ assert abs(t(234l)-234.0)<=err
+ if sys.version[:3]<='2.2':
+ assert abs(t(234.6+3j)-234.6)<=err,`t(234.6+3j)`
+ assert abs(t('234')-234)<=err
+ assert abs(t('234.6')-234.6)<=err
+ assert abs(t(-234)+234)<=err
+ assert abs(t([234])-234)<=err
+ assert abs(t((234,))-234.)<=err
+ assert abs(t(array(234))-234.)<=err
+ assert abs(t(array([234]))-234.)<=err
+ assert abs(t(array([[234]]))-234.)<=err
+ assert abs(t(array([234],'1'))+22)<=err
+ assert abs(t(array([234],'s'))-234.)<=err
+ assert abs(t(array([234],'i'))-234.)<=err
+ assert abs(t(array([234],'l'))-234.)<=err
+ assert abs(t(array([234],'b'))-234.)<=err
+ assert abs(t(array([234],'f'))-234.)<=err
+ assert abs(t(array([234],'d'))-234.)<=err
+ if sys.version[:3]<='2.2':
+ assert abs(t(array([234+3j],'F'))-234.)<=err,`t(array([234+3j],'F'))`
+ assert abs(t(array([234],'D'))-234.)<=err,`t(array([234],'D'))`
+ if tname in ['t0','t4','s0','s4']:
+ assert t(1e200)==t(1e300) # inf
+
+ try: raise RuntimeError,`t(array([234],'c'))`
+ except ValueError: pass
+ try: raise RuntimeError,`t('abc')`
+ except ValueError: pass
+
+ try: raise RuntimeError,`t([])`
+ except IndexError: pass
+ try: raise RuntimeError,`t(())`
+ except IndexError: pass
+
+ try: raise RuntimeError,`t(t)`
+ except TypeError: pass
+ try: raise RuntimeError,`t({})`
+ except TypeError: pass
+
+ try:
+ try: raise RuntimeError,`t(10l**400)`
+ except OverflowError: pass
+ except RuntimeError:
+ r = t(10l**400); assert `r` in ['inf','Infinity'],`r`
+
+if __name__=='__main__':
+ #import libwadpy
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
diff --git a/numpy/f2py/tests/mixed/foo.f b/numpy/f2py/tests/mixed/foo.f
new file mode 100644
index 000000000..c34742578
--- /dev/null
+++ b/numpy/f2py/tests/mixed/foo.f
@@ -0,0 +1,5 @@
+ subroutine bar11(a)
+cf2py intent(out) a
+ integer a
+ a = 11
+ end
diff --git a/numpy/f2py/tests/mixed/foo_fixed.f90 b/numpy/f2py/tests/mixed/foo_fixed.f90
new file mode 100644
index 000000000..7543a6acb
--- /dev/null
+++ b/numpy/f2py/tests/mixed/foo_fixed.f90
@@ -0,0 +1,8 @@
+ module foo_fixed
+ contains
+ subroutine bar12(a)
+!f2py intent(out) a
+ integer a
+ a = 12
+ end subroutine bar12
+ end module foo_fixed
diff --git a/numpy/f2py/tests/mixed/foo_free.f90 b/numpy/f2py/tests/mixed/foo_free.f90
new file mode 100644
index 000000000..c1b641f13
--- /dev/null
+++ b/numpy/f2py/tests/mixed/foo_free.f90
@@ -0,0 +1,8 @@
+module foo_free
+contains
+ subroutine bar13(a)
+ !f2py intent(out) a
+ integer a
+ a = 13
+ end subroutine bar13
+end module foo_free
diff --git a/numpy/f2py/tests/mixed/run.py b/numpy/f2py/tests/mixed/run.py
new file mode 100644
index 000000000..6f4b7d444
--- /dev/null
+++ b/numpy/f2py/tests/mixed/run.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+__usage__ = """
+Run:
+ python run.py [<f2py options>]
+Examples:
+ python run.py --quiet
+"""
+
+import os
+import sys
+import string
+import f2py2e
+from Numeric import array
+
+def build(f2py_opts):
+ try:
+ import mixed_f77_f90
+ except:
+ d,b=os.path.split(sys.argv[0])
+ files = ['foo.f','foo_fixed.f90','foo_free.f90']
+ files = [os.path.join(d,f) for f in files]
+ files = string.join(files)
+ args = ' -c -m mixed_f77_f90 %s %s'%(files,f2py_opts)
+ c = '%s -c "import f2py2e;f2py2e.main()" %s' %(sys.executable,args)
+ s = os.system(c)
+ assert not s
+ from mixed_f77_f90 import bar11
+ test_functions = [bar11]
+ from mixed_f77_f90 import foo_fixed as m
+ test_functions.append(m.bar12)
+ from mixed_f77_f90 import foo_free as m
+ test_functions.append(m.bar13)
+ return test_functions
+
+def runtest(t):
+ tname = t.__doc__.split()[0]
+ if tname=='bar11':
+ assert t()==11
+ elif tname=='bar12':
+ assert t()==12
+ elif tname=='bar13':
+ assert t()==13
+ else:
+ raise NotImplementedError
+
+if __name__=='__main__':
+ repeat,f2py_opts = f2py2e.f2py_testing.cmdline()
+ test_functions = build(f2py_opts)
+ f2py2e.f2py_testing.run(runtest,test_functions,repeat)
+ print 'ok'
diff --git a/numpy/f2py/tests/run_all.py b/numpy/f2py/tests/run_all.py
new file mode 100755
index 000000000..016e68c29
--- /dev/null
+++ b/numpy/f2py/tests/run_all.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+
+import os,sys
+
+opts = sys.argv[1:]
+if not opts:
+ opts = ['10','--quiet']
+
+NUMARRAY = "-DNUMARRAY" in sys.argv
+
+test_f77_files = [\
+ 'f77/return_integer.py',
+ 'f77/return_logical.py',
+ 'f77/return_real.py',
+ 'f77/return_complex.py',
+ 'f77/callback.py',
+ ]
+
+if not NUMARRAY: # no support for character yet in numarray
+ test_f77_files.append('f77/return_character.py')
+
+test_f90_files = [\
+ 'f90/return_integer.py',
+ 'f90/return_logical.py',
+ 'f90/return_real.py',
+ 'f90/return_complex.py',
+ 'f90/return_character.py',
+ 'mixed/run.py',
+ ]
+
+test_files = test_f77_files
+
+if NUMARRAY:
+ print >>sys.stderr,"NOTE: f2py for numarray does not support"\
+ " f90 or character arrays."
+else:
+ test_files += test_f90_files
+
+py_path = os.environ.get('PYTHONPATH')
+if py_path is None:
+ py_path = '.'
+else:
+ py_path = os.pathsep.join(['.',py_path])
+os.environ['PYTHONPATH'] = py_path
+
+for f in test_files:
+ print "**********************************************"
+ ff = os.path.join(sys.path[0],f)
+ args = [sys.executable,ff]+opts
+ print "Running",' '.join(args)
+ status = os.spawnve(os.P_WAIT,sys.executable,args,os.environ)
+ if status:
+ print 'TEST FAILURE (status=%s)' % (status)
+ if f=='f90/return_integer.py':
+ sys.exit()
diff --git a/numpy/f2py/use_rules.py b/numpy/f2py/use_rules.py
new file mode 100644
index 000000000..8affcbe41
--- /dev/null
+++ b/numpy/f2py/use_rules.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+"""
+
+Build 'use others module data' mechanism for f2py2e.
+
+Unfinished.
+
+Copyright 2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the LGPL. See http://www.fsf.org
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2000/09/10 12:35:43 $
+Pearu Peterson
+"""
+
+__version__ = "$Revision: 1.3 $"[10:-1]
+
+f2py_version='See `f2py -v`'
+
+import pprint
+import sys,string,time,types,copy
+errmess=sys.stderr.write
+outmess=sys.stdout.write
+show=pprint.pprint
+
+from auxfuncs import *
+import capi_maps
+import cfuncs
+##############
+
+usemodule_rules={
+ 'body':"""
+#begintitle#
+static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\
+\t #name# = get_#name#()\\n\\
+Arguments:\\n\\
+#docstr#\";
+extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#);
+static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) {
+/*#decl#*/
+\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail;
+printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#));
+\treturn Py_BuildValue(\"\");
+capi_fail:
+\treturn NULL;
+}
+""",
+ 'method':'\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},',
+ 'need':['F_MODFUNC']
+ }
+
+################
+
+def buildusevars(m,r):
+ ret={}
+ outmess('\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n'%(m['name']))
+ varsmap={}
+ revmap={}
+ if r.has_key('map'):
+ for k in r['map'].keys():
+ if revmap.has_key(r['map'][k]):
+ outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n'%(r['map'][k],k,revmap[r['map'][k]]))
+ else:
+ revmap[r['map'][k]]=k
+ if r.has_key('only') and r['only']:
+ for v in r['map'].keys():
+ if m['vars'].has_key(r['map'][v]):
+
+ if revmap[r['map'][v]]==v:
+ varsmap[v]=r['map'][v]
+ else:
+ outmess('\t\t\tIgnoring map "%s=>%s". See above.\n'%(v,r['map'][v]))
+ else:
+ outmess('\t\t\tNo definition for variable "%s=>%s". Skipping.\n'%(v,r['map'][v]))
+ else:
+ for v in m['vars'].keys():
+ if revmap.has_key(v):
+ varsmap[v]=revmap[v]
+ else:
+ varsmap[v]=v
+ for v in varsmap.keys():
+ ret=dictappend(ret,buildusevar(v,varsmap[v],m['vars'],m['name']))
+ return ret
+def buildusevar(name,realname,vars,usemodulename):
+ outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n'%(name,realname))
+ ret={}
+ vrd={'name':name,
+ 'realname':realname,
+ 'REALNAME':string.upper(realname),
+ 'usemodulename':usemodulename,
+ 'USEMODULENAME':string.upper(usemodulename),
+ 'texname':string.replace(name,'_','\\_'),
+ 'begintitle':gentitle('%s=>%s'%(name,realname)),
+ 'endtitle':gentitle('end of %s=>%s'%(name,realname)),
+ 'apiname':'#modulename#_use_%s_from_%s'%(realname,usemodulename)
+ }
+ nummap={0:'Ro',1:'Ri',2:'Rii',3:'Riii',4:'Riv',5:'Rv',6:'Rvi',7:'Rvii',8:'Rviii',9:'Rix'}
+ vrd['texnamename']=name
+ for i in nummap.keys():
+ vrd['texnamename']=string.replace(vrd['texnamename'],`i`,nummap[i])
+ if hasnote(vars[realname]): vrd['note']=vars[realname]['note']
+ rd=dictappend({},vrd)
+ var=vars[realname]
+
+ print name,realname,vars[realname]
+ ret=applyrules(usemodule_rules,rd)
+ return ret
+
+
+
+
+
+
diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
new file mode 100644
index 000000000..39607f993
--- /dev/null
+++ b/numpy/random/__init__.py
@@ -0,0 +1,17 @@
+# To get sub-modules
+from info import __doc__, __all__
+from mtrand import *
+
+# Some aliases:
+ranf = random = sample = random_sample
+__all__.extend(['ranf','random','sample'])
+
+def __RandomState_ctor():
+ """Return a RandomState instance.
+
+ This function exists solely to assist (un)pickling.
+ """
+ return RandomState()
+
+from scipy.testing import ScipyTest
+test = ScipyTest().test
diff --git a/numpy/random/info.py b/numpy/random/info.py
new file mode 100644
index 000000000..44864495e
--- /dev/null
+++ b/numpy/random/info.py
@@ -0,0 +1,56 @@
+"""\
+Core Random Tools
+=================
+
+"""
+
+depends = ['base']
+global_symbols = ['rand','randn']
+
+__all__ = [
+ 'beta',
+ 'binomial',
+ 'bytes',
+ 'chisquare',
+ 'exponential',
+ 'f',
+ 'gamma',
+ 'geometric',
+ 'get_state',
+ 'gumbel',
+ 'hypergeometric',
+ 'laplace',
+ 'logistic',
+ 'lognormal',
+ 'logseries',
+ 'multinomial',
+ 'multivariate_normal',
+ 'negative_binomial',
+ 'noncentral_chisquare',
+ 'noncentral_f',
+ 'normal',
+ 'pareto',
+ 'permutation',
+ 'poisson',
+ 'power',
+ 'rand',
+ 'randint',
+ 'randn',
+ 'random_integers',
+ 'random_sample',
+ 'rayleigh',
+ 'seed',
+ 'set_state',
+ 'shuffle',
+ 'standard_cauchy',
+ 'standard_exponential',
+ 'standard_gamma',
+ 'standard_normal',
+ 'standard_t',
+ 'triangular',
+ 'uniform',
+ 'vonmises',
+ 'wald',
+ 'weibull',
+ 'zipf'
+]
diff --git a/numpy/random/mtrand/Python.pxi b/numpy/random/mtrand/Python.pxi
new file mode 100644
index 000000000..d82b6dc4f
--- /dev/null
+++ b/numpy/random/mtrand/Python.pxi
@@ -0,0 +1,24 @@
+# :Author: Robert Kern
+# :Copyright: 2004, Enthought, Inc.
+# :License: BSD Style
+
+
+cdef extern from "Python.h":
+ ctypedef int size_t
+ char* PyString_AsString(object string)
+ object PyString_FromString(char* c_string)
+
+ void* PyMem_Malloc(size_t n)
+ void* PyMem_Realloc(void* buf, size_t n)
+ void PyMem_Free(void* buf)
+
+ void Py_DECREF(object obj)
+ void Py_XDECREF(object obj)
+ void Py_INCREF(object obj)
+ void Py_XINCREF(object obj)
+
+cdef extern from "string.h":
+ void *memcpy(void *s1, void *s2, int n)
+
+cdef extern from "math.h":
+ double fabs(double x)
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
new file mode 100644
index 000000000..3f5ff2355
--- /dev/null
+++ b/numpy/random/mtrand/distributions.c
@@ -0,0 +1,845 @@
+/* Copyright 2005 Robert Kern (robert.kern@gmail.com)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* The implementations of rk_hypergeometric_hyp(), rk_hypergeometric_hrua(),
+ * and rk_triangular() were adapted from Ivan Frohne's rv.py which has this
+ * license:
+ *
+ * Copyright 1998 by Ivan Frohne; Wasilla, Alaska, U.S.A.
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation for any purpose, free of charge, is granted subject to the
+ * following conditions:
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the software.
+ *
+ * THE SOFTWARE AND DOCUMENTATION IS PROVIDED WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR
+ * OR COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM OR DAMAGES IN A CONTRACT
+ * ACTION, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR ITS DOCUMENTATION.
+ */
+
+#include <math.h>
+#include "distributions.h"
+#include <stdio.h>
+
+#ifndef min
+#define min(x,y) ((x<y)?x:y)
+#define max(x,y) ((x>y)?x:y)
+#endif
+
+/* log-gamma function to support some of these distributions. The
+ * algorithm comes from SPECFUN by Shanjie Zhang and Jianming Jin and their
+ * book "Computation of Special Functions", 1996, John Wiley & Sons, Inc.
+ */
+extern double loggam(double x);
+double loggam(double x)
+{
+ double x0, x2, xp, gl, gl0;
+ long k, n;
+
+ static double a[10] = {8.333333333333333e-02,-2.777777777777778e-03,
+ 7.936507936507937e-04,-5.952380952380952e-04,
+ 8.417508417508418e-04,-1.917526917526918e-03,
+ 6.410256410256410e-03,-2.955065359477124e-02,
+ 1.796443723688307e-01,-1.39243221690590e+00};
+ x0 = x;
+ n = 0;
+ if ((x == 1.0) || (x == 2.0))
+ {
+ return 0.0;
+ }
+ else if (x <= 7.0)
+ {
+ n = (long)(7 - x);
+ x0 = x + n;
+ }
+ x2 = 1.0/(x0*x0);
+ xp = 2*M_PI;
+ gl0 = a[9];
+ for (k=8; k>=0; k--)
+ {
+ gl0 *= x2;
+ gl0 += a[k];
+ }
+ gl = gl0/x0 + 0.5*log(xp) + (x0-0.5)*log(x0) - x0;
+ if (x <= 7.0)
+ {
+ for (k=1; k<=n; k++)
+ {
+ gl -= log(x0-1.0);
+ x0 -= 1.0;
+ }
+ }
+ return gl;
+}
+
+double rk_normal(rk_state *state, double loc, double scale)
+{
+ return loc + scale*rk_gauss(state);
+}
+
+double rk_standard_exponential(rk_state *state)
+{
+ /* We use -log(1-U) since U is [0, 1) */
+ return -log(1.0 - rk_double(state));
+}
+
+double rk_exponential(rk_state *state, double scale)
+{
+ return scale * rk_standard_exponential(state);
+}
+
+double rk_uniform(rk_state *state, double loc, double scale)
+{
+ return loc + scale*rk_double(state);
+}
+
+double rk_standard_gamma(rk_state *state, double shape)
+{
+ double b, c;
+ double U, V, X, Y;
+
+ if (shape == 1.0)
+ {
+ return rk_standard_exponential(state);
+ }
+ else if (shape < 1.0)
+ {
+ for (;;)
+ {
+ U = rk_double(state);
+ V = rk_standard_exponential(state);
+ if (U <= 1.0 - shape)
+ {
+ X = pow(U, 1./shape);
+ if (X <= V)
+ {
+ return X;
+ }
+ }
+ else
+ {
+ Y = -log((1-U)/shape);
+ X = pow(1.0 - shape + shape*Y, 1./shape);
+ if (X <= (V + Y))
+ {
+ return X;
+ }
+ }
+ }
+ }
+ else
+ {
+ b = shape - 1./3.;
+ c = 1./sqrt(9*b);
+ for (;;)
+ {
+ do
+ {
+ X = rk_gauss(state);
+ V = 1.0 + c*X;
+ } while (V <= 0.0);
+
+ V = V*V*V;
+ U = rk_double(state);
+ if (U < 1.0 - 0.0331*(X*X)*(X*X)) return (b*V);
+ if (log(U) < 0.5*X*X + b*(1. - V + log(V))) return (b*V);
+ }
+ }
+}
+
+double rk_gamma(rk_state *state, double shape, double scale)
+{
+ return scale * rk_standard_gamma(state, shape);
+}
+
+double rk_beta(rk_state *state, double a, double b)
+{
+ double Ga, Gb;
+
+ if ((a <= 1.0) && (b <= 1.0))
+ {
+ double U, V, X, Y;
+ /* Use Jonk's algorithm */
+
+ while (1)
+ {
+ U = rk_double(state);
+ V = rk_double(state);
+ X = pow(U, 1.0/a);
+ Y = pow(V, 1.0/b);
+
+ if ((X + Y) <= 1.0)
+ {
+ return X;
+ }
+ }
+ }
+ else
+ {
+ Ga = rk_standard_gamma(state, a);
+ Gb = rk_standard_gamma(state, b);
+ return Ga/(Ga + Gb);
+ }
+}
+
+double rk_chisquare(rk_state *state, double df)
+{
+ return 2.0*rk_standard_gamma(state, df/2.0);
+}
+
+double rk_noncentral_chisquare(rk_state *state, double df, double nonc)
+{
+ double Chi2, N;
+
+ Chi2 = rk_chisquare(state, df-1);
+ N = rk_gauss(state) + sqrt(nonc);
+ return Chi2 + N*N;
+}
+
+double rk_f(rk_state *state, double dfnum, double dfden)
+{
+ return rk_chisquare(state, dfnum) / rk_chisquare(state, dfden);
+}
+
+double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc)
+{
+ return ((rk_noncentral_chisquare(state, dfnum, nonc)*dfden) /
+ (rk_chisquare(state, dfden)*dfnum));
+}
+
+long rk_binomial_btpe(rk_state *state, long n, double p)
+{
+ double r,q,fm,p1,xm,xl,xr,c,laml,lamr,p2,p3,p4;
+ double a,u,v,s,F,rho,t,A,nrq,x1,x2,f1,f2,z,z2,w,w2,x;
+ long m,y,k,i;
+
+ if (!(state->has_binomial) ||
+ (state->nsave != n) ||
+ (state->psave != p))
+ {
+ /* initialize */
+ state->nsave = n;
+ state->psave = p;
+ state->has_binomial = 1;
+ state->r = r = min(p, 1.0-p);
+ state->q = q = 1.0 - r;
+ state->fm = fm = n*r+r;
+ state->m = m = (long)floor(state->fm);
+ state->p1 = p1 = floor(2.195*sqrt(n*r*q)-4.6*q) + 0.5;
+ state->xm = xm = m + 0.5;
+ state->xl = xl = xm - p1;
+ state->xr = xr = xm + p1;
+ state->c = c = 0.134 + 20.5/(15.3 + m);
+ a = (fm - xl)/(fm-xl*r);
+ state->laml = laml = a*(1.0 + a/2.0);
+ a = (xr - fm)/(xr*q);
+ state->lamr = lamr = a*(1.0 + a/2.0);
+ state->p2 = p2 = p1*(1.0 + 2.0*c);
+ state->p3 = p3 = p2 + c/laml;
+ state->p4 = p4 = p3 + c/lamr;
+ }
+ else
+ {
+ r = state->r;
+ q = state->q;
+ fm = state->fm;
+ m = state->m;
+ p1 = state->p1;
+ xm = state->xm;
+ xl = state->xl;
+ xr = state->xr;
+ c = state->c;
+ laml = state->laml;
+ lamr = state->lamr;
+ p2 = state->p2;
+ p3 = state->p3;
+ p4 = state->p4;
+ }
+
+ /* sigh ... */
+ Step10:
+ nrq = n*r*q;
+ u = rk_double(state)*p4;
+ v = rk_double(state);
+ if (u > p1) goto Step20;
+ y = (long)floor(xm - p1*v + u);
+ goto Step60;
+
+ Step20:
+ if (u > p2) goto Step30;
+ x = xl + (u - p1)/c;
+ v = v*c + 1.0 - fabs(m - x + 0.5)/p1;
+ if (v > 1.0) goto Step10;
+ y = (long)floor(x);
+ goto Step50;
+
+ Step30:
+ if (u > p3) goto Step40;
+ y = (long)floor(xl + log(v)/laml);
+ if (y < 0) goto Step10;
+ v = v*(u-p2)*laml;
+ goto Step50;
+
+ Step40:
+ y = (int)floor(xr - log(v)/lamr);
+ if (y > n) goto Step10;
+ v = v*(u-p3)*lamr;
+
+ Step50:
+ k = fabs(y - m);
+ if ((k > 20) && (k < ((nrq)/2.0 - 1))) goto Step52;
+
+ s = r/q;
+ a = s*(n+1);
+ F = 1.0;
+ if (m < y)
+ {
+ for (i=m; i<=y; i++)
+ {
+ F *= (a/i - s);
+ }
+ }
+ else if (m > y)
+ {
+ for (i=y; i<=m; i++)
+ {
+ F /= (a/i - s);
+ }
+ }
+ else
+ {
+ if (v > F) goto Step10;
+ goto Step60;
+ }
+
+ Step52:
+ rho = (k/(nrq))*((k*(k/3.0 + 0.625) + 0.16666666666666666)/nrq + 0.5);
+ t = -k*k/(2*nrq);
+ A = log(v);
+ if (A < (t - rho)) goto Step60;
+ if (A > (t + rho)) goto Step10;
+
+ x1 = y+1;
+ f1 = m+1;
+ z = n+1-m;
+ w = n-y+1;
+ x2 = x1*x1;
+ f2 = f1*f1;
+ z2 = z*z;
+ w2 = w*w;
+ if (A > (xm*log(f1/x1)
+ + (n-m+0.5)*log(z/w)
+ + (y-m)*log(w*r/(x1*q))
+ + (13680.-(462.-(132.-(99.-140./f2)/f2)/f2)/f2)/f1/166320.
+ + (13680.-(462.-(132.-(99.-140./z2)/z2)/z2)/z2)/z/166320.
+ + (13680.-(462.-(132.-(99.-140./x2)/x2)/x2)/x2)/x1/166320.
+ + (13680.-(462.-(132.-(99.-140./w2)/w2)/w2)/w2)/w/166320.))
+ {
+ goto Step10;
+ }
+
+ Step60:
+ if (p > 0.5)
+ {
+ y = n - y;
+ }
+
+ return y;
+}
+
+long rk_binomial_inversion(rk_state *state, long n, double p)
+{
+ double q, qn, np, px, U;
+ long X, bound;
+
+ if (!(state->has_binomial) ||
+ (state->nsave != n) ||
+ (state->psave != p))
+ {
+ state->nsave = n;
+ state->psave = p;
+ state->has_binomial = 1;
+ state->q = q = 1.0 - p;
+ state->r = qn = exp(n * log(q));
+ state->c = np = n*p;
+ state->m = bound = min(n, np + 10.0*sqrt(np));
+ } else
+ {
+ q = state->q;
+ qn = state->r;
+ np = state->c;
+ bound = state->m;
+ }
+ X = 0;
+ px = qn;
+ U = rk_double(state);
+ while (U > px)
+ {
+ X++;
+ if (X > bound)
+ {
+ X = 0;
+ px = qn;
+ U = rk_double(state);
+ } else
+ {
+ U -= px;
+ px = ((n-X+1) * p * px)/(X*q);
+ }
+ }
+ return X;
+}
+
+long rk_binomial(rk_state *state, long n, double p)
+{
+ double q;
+
+ if (p <= 0.5)
+ {
+ if (p*n <= 30.0)
+ {
+ return rk_binomial_inversion(state, n, p);
+ }
+ else
+ {
+ return rk_binomial_btpe(state, n, p);
+ }
+ }
+ else
+ {
+ q = 1.0-p;
+ if (q*n <= 30.0)
+ {
+ return n - rk_binomial_inversion(state, n, q);
+ }
+ else
+ {
+ return n - rk_binomial_btpe(state, n, q);
+ }
+ }
+
+}
+
+long rk_negative_binomial(rk_state *state, long n, double p)
+{
+ double Y;
+
+ Y = rk_gamma(state, n, (1-p)/p);
+ return rk_poisson(state, Y);
+}
+
+long rk_poisson_mult(rk_state *state, double lam)
+{
+ long X;
+ double prod, U, enlam;
+
+ enlam = exp(-lam);
+ X = 0;
+ prod = 1.0;
+ while (1)
+ {
+ U = rk_double(state);
+ prod *= U;
+ if (prod > enlam)
+ {
+ X += 1;
+ }
+ else
+ {
+ return X;
+ }
+ }
+}
+
+#define LS2PI 0.91893853320467267
+#define TWELFTH 0.083333333333333333333333
+long rk_poisson_ptrs(rk_state *state, double lam)
+{
+ long k;
+ double U, V, slam, loglam, a, b, invalpha, vr, us;
+
+ slam = sqrt(lam);
+ loglam = log(lam);
+ b = 0.931 + 2.53*slam;
+ a = -0.059 + 0.02483*b;
+ invalpha = 1.1239 + 1.1328/(b-3.4);
+ vr = 0.9277 - 3.6224/(b-2);
+
+ while (1)
+ {
+ U = rk_double(state) - 0.5;
+ V = rk_double(state);
+ us = 0.5 - fabs(U);
+ k = (long)floor((2*a/us + b)*U + lam + 0.43);
+ if ((us >= 0.07) && (V <= vr))
+ {
+ return k;
+ }
+ if ((k < 0) ||
+ ((us < 0.013) && (V > us)))
+ {
+ continue;
+ }
+ if ((log(V) + log(invalpha) - log(a/(us*us)+b)) <=
+ (-lam + k*loglam - loggam(k+1)))
+ {
+ return k;
+ }
+
+
+ }
+
+}
+
+long rk_poisson(rk_state *state, double lam)
+{
+ if (lam >= 10)
+ {
+ return rk_poisson_ptrs(state, lam);
+ }
+ else
+ {
+ return rk_poisson_mult(state, lam);
+ }
+}
+
+double rk_standard_cauchy(rk_state *state)
+{
+ return rk_gauss(state) / rk_gauss(state);
+}
+
+double rk_standard_t(rk_state *state, double df)
+{
+ double N, G, X;
+
+ N = rk_gauss(state);
+ G = rk_standard_gamma(state, df/2);
+ X = sqrt(df/2)*N/sqrt(G);
+ return X;
+}
+
+double rk_vonmises(rk_state *state, double mu, double kappa)
+{
+ double r, rho, s;
+ double U, V, W, Y, Z;
+ double result, mod;
+
+ if (kappa < 1e-8)
+ {
+ return M_PI * (2*rk_double(state)-1);
+ }
+ else
+ {
+ r = 1 + sqrt(1 + 4*kappa*kappa);
+ rho = (r - sqrt(2*r))/(2*kappa);
+ s = (1 + rho*rho)/(2*rho);
+
+ while (1)
+ {
+ U = 2*rk_double(state) - 1;
+ V = 2*rk_double(state) - 1;
+ Z = cos(M_PI*U);
+ W = (1 + s*Z)/(s + Z);
+ Y = kappa * (s - W);
+ if ((Y*(2-Y) - V >= 0) || (log(Y/V)+1 - Y >= 0))
+ {
+ break;
+ }
+ }
+
+ if (U < 0)
+ {
+ result = acos(W);
+ }
+ else
+ {
+ result = -acos(W);
+ }
+ result += mu + M_PI;
+ mod = fmod(result, 2*M_PI);
+ if (mod && (mod < 0))
+ {
+ mod += 2*M_PI;
+ }
+ return mod - M_PI;
+ }
+}
+
+double rk_pareto(rk_state *state, double a)
+{
+ return exp(rk_standard_exponential(state)/a) - 1;
+}
+
+double rk_weibull(rk_state *state, double a)
+{
+ return pow(rk_standard_exponential(state), 1./a);
+}
+
+double rk_power(rk_state *state, double a)
+{
+ return pow(1 - exp(-rk_standard_exponential(state)), 1./a);
+}
+
+double rk_laplace(rk_state *state, double loc, double scale)
+{
+ double U;
+
+ U = rk_double(state);
+ if (U < 0.5)
+ {
+ U = loc + scale * log(U + U);
+ } else
+ {
+ U = loc - scale * log(2.0 - U - U);
+ }
+ return U;
+}
+
+double rk_gumbel(rk_state *state, double loc, double scale)
+{
+ double U;
+
+ U = 1.0 - rk_double(state);
+ return loc - scale * log(-log(U));
+}
+
+double rk_logistic(rk_state *state, double loc, double scale)
+{
+ double U;
+
+ U = rk_double(state);
+ return loc + scale * log(U/(1.0 - U));
+}
+
+double rk_lognormal(rk_state *state, double mean, double sigma)
+{
+ return exp(rk_normal(state, mean, sigma));
+}
+
+double rk_rayleigh(rk_state *state, double mode)
+{
+ return mode*sqrt(-2.0 * log(1.0 - rk_double(state)));
+}
+
+double rk_wald(rk_state *state, double mean, double scale)
+{
+ double U, X, Y;
+ double mu_2l;
+
+ mu_2l = mean / (2*scale);
+ Y = rk_gauss(state);
+ Y = mean*Y*Y;
+ X = mean + mu_2l*(Y - sqrt(4*scale*Y + Y*Y));
+ U = rk_double(state);
+ if (U <= mean/(mean+X))
+ {
+ return X;
+ } else
+ {
+ return mean*mean/X;
+ }
+}
+
+long rk_zipf(rk_state *state, double a)
+{
+ double T, U, V;
+ long X;
+ double b;
+
+ b = pow(2.0, a-1.0);
+ do
+ {
+ U = rk_double(state);
+ V = rk_double(state);
+ X = (long)floor(pow(U, -1.0/(a-1.0)));
+ T = pow(1.0 + 1.0/X, a-1.0);
+ } while ((V *X*(T-1.0)/(b-1.0)) > (T/b));
+ return X;
+}
+
+long rk_geometric_search(rk_state *state, double p)
+{
+ double U;
+ long X;
+ double sum, prod, q;
+
+ X = 1;
+ sum = prod = p;
+ q = 1.0 - p;
+ U = rk_double(state);
+ while (U > sum)
+ {
+ prod *= q;
+ sum += prod;
+ X++;
+ }
+ return X;
+}
+
+long rk_geometric_inversion(rk_state *state, double p)
+{
+ return (long)ceil(log(1.0-rk_double(state))/log(1.0-p));
+}
+
+long rk_geometric(rk_state *state, double p)
+{
+ if (p >= 0.333333333333333333333333)
+ {
+ return rk_geometric_search(state, p);
+ } else
+ {
+ return rk_geometric_inversion(state, p);
+ }
+}
+
+long rk_hypergeometric_hyp(rk_state *state, long good, long bad, long sample)
+{
+ long d1, K, Z;
+ double d2, U, Y;
+
+ d1 = bad + good - sample;
+ d2 = (double)min(bad, good);
+
+ Y = d2;
+ K = sample;
+ while (Y > 0.0)
+ {
+ U = rk_double(state);
+ Y -= (long)floor(U + Y/(d1 + K));
+ K--;
+ if (K == 0) break;
+ }
+ Z = (long)(d2 - Y);
+ if (bad > good) Z = sample - Z;
+ return Z;
+}
+
+/* D1 = 2*sqrt(2/e) */
+/* D2 = 3 - 2*sqrt(3/e) */
+#define D1 1.7155277699214135
+#define D2 0.8989161620588988
+long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample)
+{
+ long mingoodbad, maxgoodbad, popsize, m, d9;
+ double d4, d5, d6, d7, d8, d10, d11;
+ long Z;
+ double T, W, X, Y;
+
+ mingoodbad = min(good, bad);
+ popsize = good + bad;
+ maxgoodbad = max(good, bad);
+ m = min(sample, popsize - sample);
+ d4 = ((double)mingoodbad) / popsize;
+ d5 = 1.0 - d4;
+ d6 = m*d4 + 0.5;
+ d7 = sqrt((popsize - m) * sample * d4 *d5 / (popsize-1) + 0.5);
+ d8 = D1*d7 + D2;
+ d9 = (long)floor((double)((m+1)*(mingoodbad+1))/(popsize+2));
+ d10 = (loggam(d9+1) + loggam(mingoodbad-d9+1) + loggam(m-d9+1) +
+ loggam(maxgoodbad-m+d9+1));
+ d11 = min(min(m, mingoodbad)+1.0, floor(d6+16*d7));
+ /* 16 for 16-decimal-digit precision in D1 and D2 */
+
+ while (1)
+ {
+ X = rk_double(state);
+ Y = rk_double(state);
+ W = d6 + d8*(Y- 0.5)/X;
+
+ /* fast rejection: */
+ if ((W < 0.0) || (W >= d11)) continue;
+
+ Z = (long)floor(W);
+ T = d10 - (loggam(Z+1) + loggam(mingoodbad-Z+1) + loggam(m-Z+1) +
+ loggam(maxgoodbad-m+Z+1));
+
+ /* fast acceptance: */
+ if ((X*(4.0-X)-3.0) <= T) break;
+
+ /* fast rejection: */
+ if (X*(X-T) >= 1) continue;
+
+ if (2.0*log(X) <= T) break; /* acceptance */
+ }
+
+ /* this is a correction to HRUA* by Ivan Frohne in rv.py */
+ if (bad > good) Z = m - Z;
+
+ /* another fix from rv.py to allow sample to exceed popsize/2 */
+ if (m < sample) Z = bad - Z;
+
+ return Z;
+}
+#undef D1
+#undef D2
+
+long rk_hypergeometric(rk_state *state, long good, long bad, long sample)
+{
+ if (sample > 10)
+ {
+ return rk_hypergeometric_hrua(state, good, bad, sample);
+ } else
+ {
+ return rk_hypergeometric_hyp(state, good, bad, sample);
+ }
+}
+
+double rk_triangular(rk_state *state, double left, double mode, double right)
+{
+ double base, leftbase, ratio, leftprod, rightprod;
+ double U;
+
+ base = right - left;
+ leftbase = mode - left;
+ ratio = leftbase / base;
+ leftprod = leftbase*base;
+ rightprod = (right - mode)*base;
+
+ U = rk_double(state);
+ if (U <= ratio)
+ {
+ return left + sqrt(U*leftprod);
+ } else
+ {
+ return right - sqrt((1.0 - U) * rightprod);
+ }
+}
+
+long rk_logseries(rk_state *state, double p)
+{
+ double q, r, U, V;
+
+ r = log(1.0 - p);
+
+ V = rk_double(state);
+ if (V >= p) return 1;
+ U = rk_double(state);
+ q = 1.0 - exp(r*U);
+ if (V <= q*q) return (long)floor(1 + log(V)/log(q));
+ if (V <= q) return 1;
+ return 2;
+}
diff --git a/numpy/random/mtrand/distributions.h b/numpy/random/mtrand/distributions.h
new file mode 100644
index 000000000..0d6de639d
--- /dev/null
+++ b/numpy/random/mtrand/distributions.h
@@ -0,0 +1,185 @@
+/* Copyright 2005 Robert Kern (robert.kern@gmail.com)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _RK_DISTR_
+#define _RK_DISTR_
+
+#include "randomkit.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* References:
+ *
+ * Devroye, Luc. _Non-Uniform Random Variate Generation_.
+ * Springer-Verlag, New York, 1986.
+ * http://cgm.cs.mcgill.ca/~luc/rnbookindex.html
+ *
+ * Kachitvichyanukul, V. and Schmeiser, B. W. Binomial Random Variate
+ * Generation. Communications of the ACM, 31, 2 (February, 1988) 216.
+ *
+ * Hoermann, W. The Transformed Rejection Method for Generating Poisson Random
+ * Variables. Insurance: Mathematics and Economics, (to appear)
+ * http://citeseer.csail.mit.edu/151115.html
+ *
+ * Marsaglia, G. and Tsang, W. W. A Simple Method for Generating Gamma
+ * Variables. ACM Transactions on Mathematical Software, Vol. 26, No. 3,
+ * September 2000, Pages 363–372.
+ */
+
+/* Normal distribution with mean=loc and standard deviation=scale. */
+extern double rk_normal(rk_state *state, double loc, double scale);
+
+/* Standard exponential distribution (mean=1) computed by inversion of the
+ * CDF. */
+extern double rk_standard_exponential(rk_state *state);
+
+/* Exponential distribution with mean=scale. */
+extern double rk_exponential(rk_state *state, double scale);
+
+/* Uniform distribution on interval [loc, loc+scale). */
+extern double rk_uniform(rk_state *state, double loc, double scale);
+
+/* Standard gamma distribution with shape parameter.
+ * When shape < 1, the algorithm given by (Devroye p. 304) is used.
+ * When shape == 1, a Exponential variate is generated.
+ * When shape > 1, the small and fast method of (Marsaglia and Tsang 2000)
+ * is used.
+ */
+extern double rk_standard_gamma(rk_state *state, double shape);
+
+/* Gamma distribution with shape and scale. */
+extern double rk_gamma(rk_state *state, double shape, double scale);
+
+/* Beta distribution computed by combining two gamma variates (Devroye p. 432).
+ */
+extern double rk_beta(rk_state *state, double a, double b);
+
+/* Chi^2 distribution computed by transforming a gamma variate (it being a
+ * special case Gamma(df/2, 2)). */
+extern double rk_chisquare(rk_state *state, double df);
+
+/* Noncentral Chi^2 distribution computed by modifying a Chi^2 variate. */
+extern double rk_noncentral_chisquare(rk_state *state, double df, double nonc);
+
+/* F distribution computed by taking the ratio of two Chi^2 variates. */
+extern double rk_f(rk_state *state, double dfnum, double dfden);
+
+/* Noncentral F distribution computed by taking the ratio of a noncentral Chi^2
+ * and a Chi^2 variate. */
+extern double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc);
+
+/* Binomial distribution with n Bernoulli trials with success probability p.
+ * When n*p <= 30, the "Second waiting time method" given by (Devroye p. 525) is
+ * used. Otherwise, the BTPE algorithm of (Kachitvichyanukul and Schmeiser 1988)
+ * is used. */
+extern long rk_binomial(rk_state *state, long n, double p);
+
+/* Binomial distribution using BTPE. */
+extern long rk_binomial_btpe(rk_state *state, long n, double p);
+
+/* Binomial distribution using inversion and chop-down */
+extern long rk_binomial_inversion(rk_state *state, long n, double p);
+
+/* Negative binomial distribution computed by generating a Gamma(n, (1-p)/p)
+ * variate Y and returning a Poisson(Y) variate (Devroye p. 543). */
+extern long rk_negative_binomial(rk_state *state, long n, double p);
+
+/* Poisson distribution with mean=lam.
+ * When lam < 10, a basic algorithm using repeated multiplications of uniform
+ * variates is used (Devroye p. 504).
+ * When lam >= 10, algorithm PTRS from (Hoermann 1992) is used.
+ */
+extern long rk_poisson(rk_state *state, double lam);
+
+/* Poisson distribution computed by repeated multiplication of uniform variates.
+ */
+extern long rk_poisson_mult(rk_state *state, double lam);
+
+/* Poisson distribution computer by the PTRS algorithm. */
+extern long rk_poisson_ptrs(rk_state *state, double lam);
+
+/* Standard Cauchy distribution computed by dividing standard gaussians
+ * (Devroye p. 451). */
+extern double rk_standard_cauchy(rk_state *state);
+
+/* Standard t-distribution with df degrees of freedom (Devroye p. 445 as
+ * corrected in the Errata). */
+extern double rk_standard_t(rk_state *state, double df);
+
+/* von Mises circular distribution with center mu and shape kappa on [-pi,pi]
+ * (Devroye p. 476 as corrected in the Errata). */
+extern double rk_vonmises(rk_state *state, double mu, double kappa);
+
+/* Pareto distribution via inversion (Devroye p. 262) */
+extern double rk_pareto(rk_state *state, double a);
+
+/* Weibull distribution via inversion (Devroye p. 262) */
+extern double rk_weibull(rk_state *state, double a);
+
+/* Power distribution via inversion (Devroye p. 262) */
+extern double rk_power(rk_state *state, double a);
+
+/* Laplace distribution */
+extern double rk_laplace(rk_state *state, double loc, double scale);
+
+/* Gumbel distribution */
+extern double rk_gumbel(rk_state *state, double loc, double scale);
+
+/* Logistic distribution */
+extern double rk_logistic(rk_state *state, double loc, double scale);
+
+/* Log-normal distribution */
+extern double rk_lognormal(rk_state *state, double mean, double sigma);
+
+/* Rayleigh distribution */
+extern double rk_rayleigh(rk_state *state, double mode);
+
+/* Wald distribution */
+extern double rk_wald(rk_state *state, double mean, double scale);
+
+/* Zipf distribution */
+extern long rk_zipf(rk_state *state, double a);
+
+/* Geometric distribution */
+extern long rk_geometric(rk_state *state, double p);
+extern long rk_geometric_search(rk_state *state, double p);
+extern long rk_geometric_inversion(rk_state *state, double p);
+
+/* Hypergeometric distribution */
+extern long rk_hypergeometric(rk_state *state, long good, long bad, long sample);
+extern long rk_hypergeometric_hyp(rk_state *state, long good, long bad, long sample);
+extern long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample);
+
+/* Triangular distribution */
+extern double rk_triangular(rk_state *state, double left, double mode, double right);
+
+/* Logarithmic series distribution */
+extern long rk_logseries(rk_state *state, double p);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _RK_DISTR_ */
diff --git a/numpy/random/mtrand/generate_mtrand_c.py b/numpy/random/mtrand/generate_mtrand_c.py
new file mode 100644
index 000000000..3f440985f
--- /dev/null
+++ b/numpy/random/mtrand/generate_mtrand_c.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+import sys
+import re
+import os
+
+unused_internal_funcs = ['__Pyx_PrintItem',
+ '__Pyx_PrintNewline',
+ '__Pyx_ReRaise',
+ '__Pyx_GetExcValue',
+ '__Pyx_ArgTypeTest',
+ '__Pyx_TypeTest',
+ '__Pyx_SetVtable',
+ '__Pyx_GetVtable',
+ '__Pyx_CreateClass']
+
+if __name__ == '__main__':
+ os.system('pyrexc mtrand.pyx')
+ mtrand_c = open('mtrand.c', 'r')
+ processed = open('mtrand_pp.c', 'w')
+ unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'
+ uifpat = re.compile(r'static \w+ \*?'+unused_funcs_str+r'.*/\*proto\*/')
+ for linenum, line in enumerate(mtrand_c):
+ m = re.match(r'^(\s+arrayObject\w*\s*=\s*[(])[(]PyObject\s*[*][)]',
+ line)
+ if m:
+ line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])
+ m = uifpat.match(line)
+ if m:
+ line = ''
+ m = re.search(unused_funcs_str, line)
+ if m:
+ print >>sys.stderr, \
+ "%s was declared unused, but is used at line %d" % (m.group(),
+ linenum+1)
+ processed.write(line)
+ mtrand_c.close()
+ processed.close()
+ os.rename('mtrand_pp.c', 'mtrand.c')
diff --git a/numpy/random/mtrand/initarray.c b/numpy/random/mtrand/initarray.c
new file mode 100644
index 000000000..9bf0ef286
--- /dev/null
+++ b/numpy/random/mtrand/initarray.c
@@ -0,0 +1,134 @@
+/* These function have been adapted from Python 2.4.1's _randommodule.c
+
+ The following changes have been made to it in 2005 by Robert Kern:
+
+ * init_by_array has been declared extern, has a void return, and uses the
+ rk_state structure to hold its data.
+
+ The original file has the following verbatim comments:
+
+ ------------------------------------------------------------------
+ The code in this module was based on a download from:
+ http://www.math.keio.ac.jp/~matumoto/MT2002/emt19937ar.html
+
+ It was modified in 2002 by Raymond Hettinger as follows:
+
+ * the principal computational lines untouched except for tabbing.
+
+ * renamed genrand_res53() to random_random() and wrapped
+ in python calling/return code.
+
+ * genrand_int32() and the helper functions, init_genrand()
+ and init_by_array(), were declared static, wrapped in
+ Python calling/return code. also, their global data
+ references were replaced with structure references.
+
+ * unused functions from the original were deleted.
+ new, original C python code was added to implement the
+ Random() interface.
+
+ The following are the verbatim comments from the original code:
+
+ A C-program for MT19937, with initialization improved 2002/1/26.
+ Coded by Takuji Nishimura and Makoto Matsumoto.
+
+ Before using, initialize the state by using init_genrand(seed)
+ or init_by_array(init_key, key_length).
+
+ Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. The names of its contributors may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+ Any feedback is very welcome.
+ http://www.math.keio.ac.jp/matumoto/emt.html
+ email: matumoto@math.keio.ac.jp
+*/
+
+#include "initarray.h"
+
+static void
+init_genrand(rk_state *self, unsigned long s);
+
+/* initializes mt[RK_STATE_LEN] with a seed */
+static void
+init_genrand(rk_state *self, unsigned long s)
+{
+ int mti;
+ unsigned long *mt;
+
+ mt = self->key;
+ mt[0]= s & 0xffffffffUL;
+ for (mti=1; mti<RK_STATE_LEN; mti++) {
+ mt[mti] =
+ (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti);
+ /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
+ /* In the previous versions, MSBs of the seed affect */
+ /* only MSBs of the array mt[]. */
+ /* 2002/01/09 modified by Makoto Matsumoto */
+ mt[mti] &= 0xffffffffUL;
+ /* for >32 bit machines */
+ }
+ self->pos = mti;
+ return;
+}
+
+
+/* initialize by an array with array-length */
+/* init_key is the array for initializing keys */
+/* key_length is its length */
+extern void
+init_by_array(rk_state *self, unsigned long init_key[], unsigned long key_length)
+{
+ unsigned int i, j, k; /* was signed in the original code. RDH 12/16/2002 */
+ unsigned long *mt;
+
+ mt = self->key;
+ init_genrand(self, 19650218UL);
+ i=1; j=0;
+ k = (RK_STATE_LEN>key_length ? RK_STATE_LEN : key_length);
+ for (; k; k--) {
+ mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1664525UL))
+ + init_key[j] + j; /* non linear */
+ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
+ i++; j++;
+ if (i>=RK_STATE_LEN) { mt[0] = mt[RK_STATE_LEN-1]; i=1; }
+ if (j>=key_length) j=0;
+ }
+ for (k=RK_STATE_LEN-1; k; k--) {
+ mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL))
+ - i; /* non linear */
+ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
+ i++;
+ if (i>=RK_STATE_LEN) { mt[0] = mt[RK_STATE_LEN-1]; i=1; }
+ }
+
+ mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */
+}
+
diff --git a/numpy/random/mtrand/initarray.h b/numpy/random/mtrand/initarray.h
new file mode 100644
index 000000000..a4ac210f4
--- /dev/null
+++ b/numpy/random/mtrand/initarray.h
@@ -0,0 +1,6 @@
+#include "randomkit.h"
+
+extern void
+init_by_array(rk_state *self, unsigned long init_key[],
+ unsigned long key_length);
+
diff --git a/numpy/random/mtrand/mtrand.c b/numpy/random/mtrand/mtrand.c
new file mode 100644
index 000000000..995e4871d
--- /dev/null
+++ b/numpy/random/mtrand/mtrand.c
@@ -0,0 +1,5990 @@
+/* Generated by Pyrex 0.9.3.1 on Sun Dec 25 23:14:18 2005 */
+
+#include "Python.h"
+#include "structmember.h"
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#include "string.h"
+#include "math.h"
+#include "scipy/arrayobject.h"
+#include "randomkit.h"
+#include "distributions.h"
+#include "initarray.h"
+
+
+typedef struct {PyObject **p; char *s;} __Pyx_InternTabEntry; /*proto*/
+typedef struct {PyObject **p; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
+static PyObject *__Pyx_UnpackItem(PyObject *, int); /*proto*/
+static int __Pyx_EndUnpack(PyObject *, int); /*proto*/
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
+static int __Pyx_GetStarArgs(PyObject **args, PyObject **kwds, char *kwd_list[], int nargs, PyObject **args2, PyObject **kwds2); /*proto*/
+static void __Pyx_WriteUnraisable(char *name); /*proto*/
+static void __Pyx_AddTraceback(char *funcname); /*proto*/
+static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name, long size); /*proto*/
+static int __Pyx_InternStrings(__Pyx_InternTabEntry *t); /*proto*/
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static int __pyx_lineno;
+static char *__pyx_filename;
+staticforward char **__pyx_f;
+
+/* Declarations from mtrand */
+
+staticforward PyTypeObject __pyx_type_6mtrand_RandomState;
+
+struct __pyx_obj_6mtrand_RandomState {
+ PyObject_HEAD
+ rk_state (*internal_state);
+};
+
+static PyTypeObject *__pyx_ptype_6mtrand_dtypedescr = 0;
+static PyTypeObject *__pyx_ptype_6mtrand_ndarray = 0;
+static PyTypeObject *__pyx_ptype_6mtrand_RandomState = 0;
+static PyObject *__pyx_k2;
+static PyObject *__pyx_k3;
+static PyObject *__pyx_k4;
+static PyObject *__pyx_k5;
+static PyObject *__pyx_k6;
+static PyObject *__pyx_k7;
+static double __pyx_k8;
+static double __pyx_k9;
+static PyObject *__pyx_k10;
+static PyObject *__pyx_k11;
+static PyObject *__pyx_k12;
+static PyObject *__pyx_k13;
+static double __pyx_k14;
+static double __pyx_k15;
+static PyObject *__pyx_k16;
+static PyObject *__pyx_k17;
+static double __pyx_k18;
+static PyObject *__pyx_k19;
+static PyObject *__pyx_k20;
+static PyObject *__pyx_k21;
+static double __pyx_k22;
+static PyObject *__pyx_k23;
+static PyObject *__pyx_k24;
+static PyObject *__pyx_k25;
+static PyObject *__pyx_k26;
+static PyObject *__pyx_k27;
+static PyObject *__pyx_k28;
+static PyObject *__pyx_k29;
+static PyObject *__pyx_k30;
+static PyObject *__pyx_k31;
+static PyObject *__pyx_k32;
+static PyObject *__pyx_k33;
+static double __pyx_k34;
+static double __pyx_k35;
+static PyObject *__pyx_k36;
+static double __pyx_k37;
+static double __pyx_k38;
+static PyObject *__pyx_k39;
+static double __pyx_k40;
+static double __pyx_k41;
+static PyObject *__pyx_k42;
+static double __pyx_k43;
+static double __pyx_k44;
+static PyObject *__pyx_k45;
+static double __pyx_k46;
+static PyObject *__pyx_k47;
+static PyObject *__pyx_k48;
+static PyObject *__pyx_k49;
+static PyObject *__pyx_k50;
+static PyObject *__pyx_k51;
+static double __pyx_k52;
+static PyObject *__pyx_k53;
+static PyObject *__pyx_k54;
+static PyObject *__pyx_k55;
+static PyObject *__pyx_k56;
+static PyObject *__pyx_k57;
+static PyObject *__pyx_k58;
+static PyObject *__pyx_k59;
+static PyObject *(__pyx_f_6mtrand_cont0_array(rk_state (*),double ((*)(rk_state (*))),PyObject *)); /*proto*/
+static PyObject *(__pyx_f_6mtrand_cont1_array(rk_state (*),double ((*)(rk_state (*),double )),PyObject *,double )); /*proto*/
+static PyObject *(__pyx_f_6mtrand_cont2_array(rk_state (*),double ((*)(rk_state (*),double ,double )),PyObject *,double ,double )); /*proto*/
+static PyObject *(__pyx_f_6mtrand_cont3_array(rk_state (*),double ((*)(rk_state (*),double ,double ,double )),PyObject *,double ,double ,double )); /*proto*/
+static PyObject *(__pyx_f_6mtrand_disc0_array(rk_state (*),long ((*)(rk_state (*))),PyObject *)); /*proto*/
+static PyObject *(__pyx_f_6mtrand_discnp_array(rk_state (*),long ((*)(rk_state (*),long ,double )),PyObject *,long ,double )); /*proto*/
+static PyObject *(__pyx_f_6mtrand_discnmN_array(rk_state (*),long ((*)(rk_state (*),long ,long ,long )),PyObject *,long ,long ,long )); /*proto*/
+static PyObject *(__pyx_f_6mtrand_discd_array(rk_state (*),long ((*)(rk_state (*),double )),PyObject *,double )); /*proto*/
+static double (__pyx_f_6mtrand_kahan_sum(double (*),long )); /*proto*/
+
+/* Implementation of mtrand */
+
+
+static PyObject *__pyx_n__sp;
+static PyObject *__pyx_n__rand;
+static PyObject *__pyx_n_seed;
+static PyObject *__pyx_n_get_state;
+static PyObject *__pyx_n_set_state;
+static PyObject *__pyx_n_random_sample;
+static PyObject *__pyx_n_randint;
+static PyObject *__pyx_n_bytes;
+static PyObject *__pyx_n_uniform;
+static PyObject *__pyx_n_rand;
+static PyObject *__pyx_n_randn;
+static PyObject *__pyx_n_random_integers;
+static PyObject *__pyx_n_standard_normal;
+static PyObject *__pyx_n_normal;
+static PyObject *__pyx_n_beta;
+static PyObject *__pyx_n_exponential;
+static PyObject *__pyx_n_standard_exponential;
+static PyObject *__pyx_n_standard_gamma;
+static PyObject *__pyx_n_gamma;
+static PyObject *__pyx_n_f;
+static PyObject *__pyx_n_noncentral_f;
+static PyObject *__pyx_n_chisquare;
+static PyObject *__pyx_n_noncentral_chisquare;
+static PyObject *__pyx_n_standard_cauchy;
+static PyObject *__pyx_n_standard_t;
+static PyObject *__pyx_n_vonmises;
+static PyObject *__pyx_n_pareto;
+static PyObject *__pyx_n_weibull;
+static PyObject *__pyx_n_power;
+static PyObject *__pyx_n_laplace;
+static PyObject *__pyx_n_gumbel;
+static PyObject *__pyx_n_logistic;
+static PyObject *__pyx_n_lognormal;
+static PyObject *__pyx_n_rayleigh;
+static PyObject *__pyx_n_wald;
+static PyObject *__pyx_n_triangular;
+static PyObject *__pyx_n_binomial;
+static PyObject *__pyx_n_negative_binomial;
+static PyObject *__pyx_n_poisson;
+static PyObject *__pyx_n_zipf;
+static PyObject *__pyx_n_geometric;
+static PyObject *__pyx_n_hypergeometric;
+static PyObject *__pyx_n_logseries;
+static PyObject *__pyx_n_multivariate_normal;
+static PyObject *__pyx_n_multinomial;
+static PyObject *__pyx_n_shuffle;
+static PyObject *__pyx_n_permutation;
+static PyObject *__pyx_n_scipy;
+
+static PyObject *__pyx_n_empty;
+static PyObject *__pyx_n_Float64;
+
+static PyObject *__pyx_f_6mtrand_cont0_array(rk_state (*__pyx_v_state),double ((*__pyx_v_func)(rk_state (*))),PyObject *__pyx_v_size) {
+ double (*__pyx_v_array_data);
+ PyArrayObject *arrayObject;
+ long __pyx_v_length;
+ long __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_size);
+ arrayObject = (void *)Py_None; Py_INCREF((PyObject *) arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":128 */
+ __pyx_1 = __pyx_v_size == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":129 */
+ __pyx_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":131 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_empty); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_Float64); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_INCREF(((PyObject *)__pyx_4));
+ Py_DECREF(((PyObject *)arrayObject));
+ arrayObject = ((PyArrayObject *)__pyx_4);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":132 */
+ __pyx_v_length = PyArray_SIZE(arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":133 */
+ __pyx_v_array_data = ((double (*))arrayObject->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":134 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_length; ++__pyx_v_i) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":135 */
+ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state);
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":136 */
+ Py_INCREF(((PyObject *)arrayObject));
+ __pyx_r = ((PyObject *)arrayObject);
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.cont0_array");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_cont1_array(rk_state (*__pyx_v_state),double ((*__pyx_v_func)(rk_state (*),double )),PyObject *__pyx_v_size,double __pyx_v_a) {
+ double (*__pyx_v_array_data);
+ PyArrayObject *arrayObject;
+ long __pyx_v_length;
+ long __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_size);
+ arrayObject = (void *)Py_None; Py_INCREF((PyObject *) arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":144 */
+ __pyx_1 = __pyx_v_size == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":145 */
+ __pyx_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state,__pyx_v_a)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":147 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_empty); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_Float64); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_INCREF(((PyObject *)__pyx_4));
+ Py_DECREF(((PyObject *)arrayObject));
+ arrayObject = ((PyArrayObject *)__pyx_4);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":148 */
+ __pyx_v_length = PyArray_SIZE(arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":149 */
+ __pyx_v_array_data = ((double (*))arrayObject->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":150 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_length; ++__pyx_v_i) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":151 */
+ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state,__pyx_v_a);
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":152 */
+ Py_INCREF(((PyObject *)arrayObject));
+ __pyx_r = ((PyObject *)arrayObject);
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.cont1_array");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_cont2_array(rk_state (*__pyx_v_state),double ((*__pyx_v_func)(rk_state (*),double ,double )),PyObject *__pyx_v_size,double __pyx_v_a,double __pyx_v_b) {
+ double (*__pyx_v_array_data);
+ PyArrayObject *arrayObject;
+ long __pyx_v_length;
+ long __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_size);
+ arrayObject = (void *)Py_None; Py_INCREF((PyObject *) arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":161 */
+ __pyx_1 = __pyx_v_size == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":162 */
+ __pyx_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state,__pyx_v_a,__pyx_v_b)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":164 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 164; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_empty); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 164; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 164; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_Float64); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 164; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 164; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 164; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_INCREF(((PyObject *)__pyx_4));
+ Py_DECREF(((PyObject *)arrayObject));
+ arrayObject = ((PyArrayObject *)__pyx_4);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":165 */
+ __pyx_v_length = PyArray_SIZE(arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":166 */
+ __pyx_v_array_data = ((double (*))arrayObject->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":167 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_length; ++__pyx_v_i) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":168 */
+ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state,__pyx_v_a,__pyx_v_b);
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":169 */
+ Py_INCREF(((PyObject *)arrayObject));
+ __pyx_r = ((PyObject *)arrayObject);
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.cont2_array");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_cont3_array(rk_state (*__pyx_v_state),double ((*__pyx_v_func)(rk_state (*),double ,double ,double )),PyObject *__pyx_v_size,double __pyx_v_a,double __pyx_v_b,double __pyx_v_c) {
+ double (*__pyx_v_array_data);
+ PyArrayObject *arrayObject;
+ long __pyx_v_length;
+ long __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_size);
+ arrayObject = (void *)Py_None; Py_INCREF((PyObject *) arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":179 */
+ __pyx_1 = __pyx_v_size == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":180 */
+ __pyx_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state,__pyx_v_a,__pyx_v_b,__pyx_v_c)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":182 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_empty); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_Float64); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_INCREF(((PyObject *)__pyx_4));
+ Py_DECREF(((PyObject *)arrayObject));
+ arrayObject = ((PyArrayObject *)__pyx_4);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":183 */
+ __pyx_v_length = PyArray_SIZE(arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":184 */
+ __pyx_v_array_data = ((double (*))arrayObject->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":185 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_length; ++__pyx_v_i) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":186 */
+ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state,__pyx_v_a,__pyx_v_b,__pyx_v_c);
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":187 */
+ Py_INCREF(((PyObject *)arrayObject));
+ __pyx_r = ((PyObject *)arrayObject);
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.cont3_array");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_n_Int;
+
+static PyObject *__pyx_f_6mtrand_disc0_array(rk_state (*__pyx_v_state),long ((*__pyx_v_func)(rk_state (*))),PyObject *__pyx_v_size) {
+ long (*__pyx_v_array_data);
+ PyArrayObject *arrayObject;
+ long __pyx_v_length;
+ long __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_size);
+ arrayObject = (void *)Py_None; Py_INCREF((PyObject *) arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":195 */
+ __pyx_1 = __pyx_v_size == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":196 */
+ __pyx_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":198 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_empty); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_Int); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_INCREF(((PyObject *)__pyx_4));
+ Py_DECREF(((PyObject *)arrayObject));
+ arrayObject = ((PyArrayObject *)__pyx_4);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":199 */
+ __pyx_v_length = PyArray_SIZE(arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":200 */
+ __pyx_v_array_data = ((long (*))arrayObject->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":201 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_length; ++__pyx_v_i) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":202 */
+ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state);
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":203 */
+ Py_INCREF(((PyObject *)arrayObject));
+ __pyx_r = ((PyObject *)arrayObject);
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.disc0_array");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_discnp_array(rk_state (*__pyx_v_state),long ((*__pyx_v_func)(rk_state (*),long ,double )),PyObject *__pyx_v_size,long __pyx_v_n,double __pyx_v_p) {
+ long (*__pyx_v_array_data);
+ PyArrayObject *arrayObject;
+ long __pyx_v_length;
+ long __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_size);
+ arrayObject = (void *)Py_None; Py_INCREF((PyObject *) arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":211 */
+ __pyx_1 = __pyx_v_size == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":212 */
+ __pyx_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state,__pyx_v_n,__pyx_v_p)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":214 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_empty); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_Int); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_INCREF(((PyObject *)__pyx_4));
+ Py_DECREF(((PyObject *)arrayObject));
+ arrayObject = ((PyArrayObject *)__pyx_4);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":215 */
+ __pyx_v_length = PyArray_SIZE(arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":216 */
+ __pyx_v_array_data = ((long (*))arrayObject->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":217 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_length; ++__pyx_v_i) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":218 */
+ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state,__pyx_v_n,__pyx_v_p);
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":219 */
+ Py_INCREF(((PyObject *)arrayObject));
+ __pyx_r = ((PyObject *)arrayObject);
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.discnp_array");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state (*__pyx_v_state),long ((*__pyx_v_func)(rk_state (*),long ,long ,long )),PyObject *__pyx_v_size,long __pyx_v_n,long __pyx_v_m,long __pyx_v_N) {
+ long (*__pyx_v_array_data);
+ PyArrayObject *arrayObject;
+ long __pyx_v_length;
+ long __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_size);
+ arrayObject = (void *)Py_None; Py_INCREF((PyObject *) arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":228 */
+ __pyx_1 = __pyx_v_size == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":229 */
+ __pyx_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state,__pyx_v_n,__pyx_v_m,__pyx_v_N)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":231 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_empty); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_Int); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_INCREF(((PyObject *)__pyx_4));
+ Py_DECREF(((PyObject *)arrayObject));
+ arrayObject = ((PyArrayObject *)__pyx_4);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":232 */
+ __pyx_v_length = PyArray_SIZE(arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":233 */
+ __pyx_v_array_data = ((long (*))arrayObject->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":234 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_length; ++__pyx_v_i) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":235 */
+ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state,__pyx_v_n,__pyx_v_m,__pyx_v_N);
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":236 */
+ Py_INCREF(((PyObject *)arrayObject));
+ __pyx_r = ((PyObject *)arrayObject);
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.discnmN_array");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_discd_array(rk_state (*__pyx_v_state),long ((*__pyx_v_func)(rk_state (*),double )),PyObject *__pyx_v_size,double __pyx_v_a) {
+ long (*__pyx_v_array_data);
+ PyArrayObject *arrayObject;
+ long __pyx_v_length;
+ long __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_size);
+ arrayObject = (void *)Py_None; Py_INCREF((PyObject *) arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":244 */
+ __pyx_1 = __pyx_v_size == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":245 */
+ __pyx_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state,__pyx_v_a)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":247 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_empty); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_Int); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_INCREF(((PyObject *)__pyx_4));
+ Py_DECREF(((PyObject *)arrayObject));
+ arrayObject = ((PyArrayObject *)__pyx_4);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":248 */
+ __pyx_v_length = PyArray_SIZE(arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":249 */
+ __pyx_v_array_data = ((long (*))arrayObject->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":250 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_length; ++__pyx_v_i) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":251 */
+ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state,__pyx_v_a);
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":252 */
+ Py_INCREF(((PyObject *)arrayObject));
+ __pyx_r = ((PyObject *)arrayObject);
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.discd_array");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static double __pyx_f_6mtrand_kahan_sum(double (*__pyx_v_darr),long __pyx_v_n) {
+ double __pyx_v_c;
+ double __pyx_v_y;
+ double __pyx_v_t;
+ double __pyx_v_sum;
+ long __pyx_v_i;
+ double __pyx_r;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":257 */
+ __pyx_v_sum = (__pyx_v_darr[0]);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":258 */
+ __pyx_v_c = 0.0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":259 */
+ for (__pyx_v_i = 1; __pyx_v_i < __pyx_v_n; ++__pyx_v_i) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":260 */
+ __pyx_v_y = ((__pyx_v_darr[__pyx_v_i]) - __pyx_v_c);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":261 */
+ __pyx_v_t = (__pyx_v_sum + __pyx_v_y);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":262 */
+ __pyx_v_c = ((__pyx_v_t - __pyx_v_sum) - __pyx_v_y);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":263 */
+ __pyx_v_sum = __pyx_v_t;
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":264 */
+ __pyx_r = __pyx_v_sum;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __Pyx_WriteUnraisable("mtrand.kahan_sum");
+ __pyx_L0:;
+ return __pyx_r;
+}
+
+static int __pyx_f_6mtrand_11RandomState___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_f_6mtrand_11RandomState___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_seed = 0;
+ int __pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ static char *__pyx_argnames[] = {"seed",0};
+ __pyx_v_seed = __pyx_k2;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|O", __pyx_argnames, &__pyx_v_seed)) return -1;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_seed);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":287 */
+ ((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state = ((rk_state (*))PyMem_Malloc((sizeof(rk_state ))));
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":289 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_seed); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_seed);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_seed);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("mtrand.RandomState.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_seed);
+ return __pyx_r;
+}
+
+static void __pyx_f_6mtrand_11RandomState___dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_f_6mtrand_11RandomState___dealloc__(PyObject *__pyx_v_self) {
+ int __pyx_1;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":292 */
+ __pyx_1 = (((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state != 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":293 */
+ PyMem_Free(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state);
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ goto __pyx_L0;
+ __Pyx_AddTraceback("mtrand.RandomState.__dealloc__");
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+}
+
+static PyObject *__pyx_n_type;
+static PyObject *__pyx_n_int;
+
+static PyObject *__pyx_f_6mtrand_11RandomState_seed(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_seed[] = "Seed the generator.\n\n seed(seed=None)\n\n seed can be an integer, an array (or other sequence) of integers of any\n length, or None. If seed is None, then RandomState will try to read data\n from /dev/urandom (or the Windows analogue) if available or seed from\n the clock otherwise.\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_seed(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_seed = 0;
+ rk_error __pyx_v_errcode;
+ PyArrayObject *arrayObject_obj;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ unsigned long __pyx_5;
+ static char *__pyx_argnames[] = {"seed",0};
+ __pyx_v_seed = __pyx_k3;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|O", __pyx_argnames, &__pyx_v_seed)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_seed);
+ arrayObject_obj = (void *)Py_None; Py_INCREF((PyObject *) arrayObject_obj);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":307 */
+ __pyx_1 = __pyx_v_seed == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":308 */
+ __pyx_v_errcode = rk_randomseed(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state);
+ goto __pyx_L2;
+ }
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_type); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 309; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 309; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_seed);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_seed);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 309; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_int); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 309; goto __pyx_L1;}
+ __pyx_1 = __pyx_4 == __pyx_2;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":310 */
+ __pyx_5 = PyLong_AsUnsignedLong(__pyx_v_seed); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 310; goto __pyx_L1;}
+ rk_seed(__pyx_5,((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state);
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":312 */
+ __pyx_3 = ((PyObject *)PyArray_ContiguousFromObject(__pyx_v_seed,PyArray_LONG,1,1)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; goto __pyx_L1;}
+ Py_INCREF(((PyObject *)__pyx_3));
+ Py_DECREF(((PyObject *)arrayObject_obj));
+ arrayObject_obj = ((PyArrayObject *)__pyx_3);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":313 */
+ init_by_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,((unsigned long (*))arrayObject_obj->data),(arrayObject_obj->dimensions[0]));
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.seed");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject_obj);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_seed);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_n_MT19937;
+
+
+static PyObject *__pyx_f_6mtrand_11RandomState_get_state(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_get_state[] = "Return a tuple representing the internal state of the generator.\n\n get_state() -> (\'MT19937\', int key[624], int pos)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_get_state(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyArrayObject *arrayObject_state;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ arrayObject_state = (void *)Py_None; Py_INCREF((PyObject *) arrayObject_state);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":322 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_empty); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyInt_FromLong(624); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; goto __pyx_L1;}
+ __pyx_3 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetAttr(__pyx_3, __pyx_n_Int); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_3 = PyTuple_New(2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_1);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_4);
+ __pyx_1 = 0;
+ __pyx_4 = 0;
+ __pyx_1 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_INCREF(((PyObject *)__pyx_1));
+ Py_DECREF(((PyObject *)arrayObject_state));
+ arrayObject_state = ((PyArrayObject *)__pyx_1);
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":323 */
+ memcpy(((void (*))arrayObject_state->data),((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state->key,(624 * (sizeof(long ))));
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":324 */
+ __pyx_4 = PyInt_FromLong(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state->pos); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_MT19937);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_n_MT19937);
+ Py_INCREF(((PyObject *)arrayObject_state));
+ PyTuple_SET_ITEM(__pyx_2, 1, ((PyObject *)arrayObject_state));
+ PyTuple_SET_ITEM(__pyx_2, 2, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.get_state");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject_state);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_n_ValueError;
+
+static PyObject *__pyx_k62p;
+static PyObject *__pyx_k63p;
+
+static char (__pyx_k62[]) = "algorithm must be 'MT19937'";
+static char (__pyx_k63[]) = "state must be 624 longs";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_set_state(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_set_state[] = "Set the state from a tuple.\n \n state = (\'MT19937\', int key[624], int pos)\n \n set_state(state)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_set_state(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_state = 0;
+ PyArrayObject *arrayObject_obj;
+ int __pyx_v_pos;
+ PyObject *__pyx_v_algorithm_name;
+ PyObject *__pyx_v_key;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ int __pyx_3;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"state",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_state)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_state);
+ arrayObject_obj = (void *)Py_None; Py_INCREF((PyObject *) arrayObject_obj);
+ __pyx_v_algorithm_name = Py_None; Py_INCREF(__pyx_v_algorithm_name);
+ __pyx_v_key = Py_None; Py_INCREF(__pyx_v_key);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":335 */
+ __pyx_1 = PyInt_FromLong(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetItem(__pyx_v_state, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_algorithm_name);
+ __pyx_v_algorithm_name = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":336 */
+ if (PyObject_Cmp(__pyx_v_algorithm_name, __pyx_n_MT19937, &__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 336; goto __pyx_L1;}
+ __pyx_3 = __pyx_3 != 0;
+ if (__pyx_3) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":337 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 337; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 337; goto __pyx_L1;}
+ Py_INCREF(__pyx_k62p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k62p);
+ __pyx_4 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 337; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 337; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":338 */
+ __pyx_1 = PySequence_GetSlice(__pyx_v_state, 1, 0x7fffffff); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; goto __pyx_L1;}
+ __pyx_2 = __Pyx_UnpackItem(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_key);
+ __pyx_v_key = __pyx_2;
+ __pyx_2 = 0;
+ __pyx_4 = __Pyx_UnpackItem(__pyx_1, 1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; goto __pyx_L1;}
+ __pyx_3 = PyInt_AsLong(__pyx_4); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_v_pos = __pyx_3;
+ if (__Pyx_EndUnpack(__pyx_1, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":339 */
+ __pyx_4 = ((PyObject *)PyArray_ContiguousFromObject(__pyx_v_key,PyArray_LONG,1,1)); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; goto __pyx_L1;}
+ Py_INCREF(((PyObject *)__pyx_4));
+ Py_DECREF(((PyObject *)arrayObject_obj));
+ arrayObject_obj = ((PyArrayObject *)__pyx_4);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":340 */
+ __pyx_3 = ((arrayObject_obj->dimensions[0]) != 624);
+ if (__pyx_3) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":341 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; goto __pyx_L1;}
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; goto __pyx_L1;}
+ Py_INCREF(__pyx_k63p);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_k63p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":342 */
+ memcpy(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state->key,((void (*))arrayObject_obj->data),(624 * (sizeof(long ))));
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":343 */
+ ((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state->pos = __pyx_v_pos;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.set_state");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject_obj);
+ Py_DECREF(__pyx_v_algorithm_name);
+ Py_DECREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_state);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState___getstate__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6mtrand_11RandomState___getstate__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":347 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_get_state); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 347; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 347; goto __pyx_L1;}
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 347; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_r = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("mtrand.RandomState.__getstate__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState___setstate__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6mtrand_11RandomState___setstate__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_state = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ static char *__pyx_argnames[] = {"state",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_state)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_state);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":350 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_set_state); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_state);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("mtrand.RandomState.__setstate__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_state);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_n_random;
+static PyObject *__pyx_n___RandomState_ctor;
+
+static PyObject *__pyx_f_6mtrand_11RandomState___reduce__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6mtrand_11RandomState___reduce__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":353 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_random); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n___RandomState_ctor); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_v_self, __pyx_n_get_state); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(0); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; goto __pyx_L1;}
+ __pyx_5 = PyObject_CallObject(__pyx_3, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = PyTuple_New(3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_1);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_2);
+ PyTuple_SET_ITEM(__pyx_3, 2, __pyx_5);
+ __pyx_1 = 0;
+ __pyx_2 = 0;
+ __pyx_5 = 0;
+ __pyx_r = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("mtrand.RandomState.__reduce__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState_random_sample(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_random_sample[] = "Return random floats in the half-open interval [0.0, 1.0).\n\n random_sample(size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_random_sample(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"size",0};
+ __pyx_v_size = __pyx_k4;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|O", __pyx_argnames, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":361 */
+ __pyx_1 = __pyx_f_6mtrand_cont0_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_double,__pyx_v_size); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("mtrand.RandomState.random_sample");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState_tomaxint(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_tomaxint[] = "Returns random integers x such that 0 <= x <= sys.maxint.\n\n tomaxint(size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_tomaxint(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"size",0};
+ __pyx_v_size = __pyx_k5;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|O", __pyx_argnames, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":368 */
+ __pyx_1 = __pyx_f_6mtrand_disc0_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_long,__pyx_v_size); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 368; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("mtrand.RandomState.tomaxint");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k64p;
+
+static char (__pyx_k64[]) = "low >= high";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_randint(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_randint[] = "Return random integers x such that low <= x < high.\n\n randint(low, high=None, size=None) -> random values\n\n If high is None, then 0 <= x < low.\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_randint(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_low = 0;
+ PyObject *__pyx_v_high = 0;
+ PyObject *__pyx_v_size = 0;
+ long __pyx_v_lo;
+ long __pyx_v_hi;
+ long __pyx_v_diff;
+ long (*__pyx_v_array_data);
+ PyArrayObject *arrayObject;
+ long __pyx_v_length;
+ long __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ long __pyx_2;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ static char *__pyx_argnames[] = {"low","high","size",0};
+ __pyx_v_high = __pyx_k6;
+ __pyx_v_size = __pyx_k7;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O|OO", __pyx_argnames, &__pyx_v_low, &__pyx_v_high, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_low);
+ Py_INCREF(__pyx_v_high);
+ Py_INCREF(__pyx_v_size);
+ arrayObject = (void *)Py_None; Py_INCREF((PyObject *) arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":383 */
+ __pyx_1 = __pyx_v_high == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":384 */
+ __pyx_v_lo = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":385 */
+ __pyx_2 = PyInt_AsLong(__pyx_v_low); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; goto __pyx_L1;}
+ __pyx_v_hi = __pyx_2;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":387 */
+ __pyx_2 = PyInt_AsLong(__pyx_v_low); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; goto __pyx_L1;}
+ __pyx_v_lo = __pyx_2;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":388 */
+ __pyx_2 = PyInt_AsLong(__pyx_v_high); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; goto __pyx_L1;}
+ __pyx_v_hi = __pyx_2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":390 */
+ __pyx_v_diff = ((__pyx_v_hi - __pyx_v_lo) - 1);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":391 */
+ __pyx_1 = (__pyx_v_diff < 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":392 */
+ __pyx_3 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; goto __pyx_L1;}
+ Py_INCREF(__pyx_k64p);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_k64p);
+ __pyx_5 = PyObject_CallObject(__pyx_3, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_5, 0, 0);
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":394 */
+ __pyx_1 = __pyx_v_size == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":395 */
+ __pyx_3 = PyLong_FromUnsignedLong(rk_interval(__pyx_v_diff,((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; goto __pyx_L1;}
+ __pyx_r = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":397 */
+ __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; goto __pyx_L1;}
+ __pyx_5 = PyObject_GetAttr(__pyx_4, __pyx_n_empty); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetAttr(__pyx_3, __pyx_n_Int); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_3 = PyTuple_New(2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_5, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_INCREF(((PyObject *)__pyx_4));
+ Py_DECREF(((PyObject *)arrayObject));
+ arrayObject = ((PyArrayObject *)__pyx_4);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":398 */
+ __pyx_v_length = PyArray_SIZE(arrayObject);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":399 */
+ __pyx_v_array_data = ((long (*))arrayObject->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":400 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_length; ++__pyx_v_i) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":401 */
+ (__pyx_v_array_data[__pyx_v_i]) = (__pyx_v_lo + ((long )rk_interval(__pyx_v_diff,((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state)));
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":402 */
+ Py_INCREF(((PyObject *)arrayObject));
+ __pyx_r = ((PyObject *)arrayObject);
+ goto __pyx_L0;
+ }
+ __pyx_L4:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("mtrand.RandomState.randint");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_low);
+ Py_DECREF(__pyx_v_high);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState_bytes(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_bytes[] = "Return random bytes.\n\n bytes(length) -> str\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_bytes(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ unsigned int __pyx_v_length;
+ void (*__pyx_v_bytes);
+ PyObject *__pyx_v_bytestring;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"length",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "i", __pyx_argnames, &__pyx_v_length)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_bytestring = Py_None; Py_INCREF(__pyx_v_bytestring);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":410 */
+ __pyx_v_bytes = PyMem_Malloc(__pyx_v_length);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":411 */
+ rk_fill(__pyx_v_bytes,__pyx_v_length,((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":412 */
+ __pyx_1 = PyString_FromString(((char (*))__pyx_v_bytes)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_bytestring);
+ __pyx_v_bytestring = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":413 */
+ PyMem_Free(__pyx_v_bytes);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":414 */
+ Py_INCREF(__pyx_v_bytestring);
+ __pyx_r = __pyx_v_bytestring;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("mtrand.RandomState.bytes");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_bytestring);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState_uniform(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_uniform[] = "Uniform distribution over [low, high).\n\n uniform(low=0.0, high=1.0, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_uniform(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_low;
+ double __pyx_v_high;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"low","high","size",0};
+ __pyx_v_low = __pyx_k8;
+ __pyx_v_high = __pyx_k9;
+ __pyx_v_size = __pyx_k10;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|ddO", __pyx_argnames, &__pyx_v_low, &__pyx_v_high, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":421 */
+ __pyx_1 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_uniform,__pyx_v_size,__pyx_v_low,(__pyx_v_high - __pyx_v_low)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("mtrand.RandomState.uniform");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_n_len;
+static PyObject *__pyx_n_size;
+
+
+static PyObject *__pyx_f_6mtrand_11RandomState_rand(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_rand[] = "Return an array of the given dimensions which is initialized to \n random numbers from a uniform distribution in the range [0,1).\n\n rand(d0, d1, ..., dn) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_rand(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_args = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ PyObject *__pyx_5 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (__Pyx_GetStarArgs(&__pyx_args, &__pyx_kwds, __pyx_argnames, 0, &__pyx_v_args, 0) < 0) return 0;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) {
+ Py_XDECREF(__pyx_args);
+ Py_XDECREF(__pyx_kwds);
+ Py_XDECREF(__pyx_v_args);
+ return 0;
+ }
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":430 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_len); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_args);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_args);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = PyInt_FromLong(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; goto __pyx_L1;}
+ if (PyObject_Cmp(__pyx_3, __pyx_1, &__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; goto __pyx_L1;}
+ __pyx_4 = __pyx_4 == 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (__pyx_4) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":431 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_self, __pyx_n_random_sample); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 431; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 431; goto __pyx_L1;}
+ __pyx_1 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 431; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":433 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_self, __pyx_n_random_sample); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 433; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 433; goto __pyx_L1;}
+ __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 433; goto __pyx_L1;}
+ if (PyDict_SetItem(__pyx_1, __pyx_n_size, __pyx_v_args) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 433; goto __pyx_L1;}
+ __pyx_5 = PyEval_CallObjectWithKeywords(__pyx_2, __pyx_3, __pyx_1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 433; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_5;
+ __pyx_5 = 0;
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("mtrand.RandomState.rand");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_XDECREF(__pyx_v_args);
+ Py_DECREF(__pyx_v_self);
+ Py_XDECREF(__pyx_args);
+ Py_XDECREF(__pyx_kwds);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState_randn(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_randn[] = "Returns zero-mean, unit-variance Gaussian random numbers in an \n array of shape (d0, d1, ..., dn).\n\n randn(d0, d1, ..., dn) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_randn(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_args = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ static char *__pyx_argnames[] = {0};
+ if (__Pyx_GetStarArgs(&__pyx_args, &__pyx_kwds, __pyx_argnames, 0, &__pyx_v_args, 0) < 0) return 0;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) {
+ Py_XDECREF(__pyx_args);
+ Py_XDECREF(__pyx_kwds);
+ Py_XDECREF(__pyx_v_args);
+ return 0;
+ }
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":441 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_len); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_args);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_args);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = PyInt_FromLong(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; goto __pyx_L1;}
+ if (PyObject_Cmp(__pyx_3, __pyx_1, &__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; goto __pyx_L1;}
+ __pyx_4 = __pyx_4 == 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (__pyx_4) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":442 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_self, __pyx_n_standard_normal); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 442; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 442; goto __pyx_L1;}
+ __pyx_1 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 442; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":444 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_self, __pyx_n_standard_normal); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_args);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_args);
+ __pyx_1 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("mtrand.RandomState.randn");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_XDECREF(__pyx_v_args);
+ Py_DECREF(__pyx_v_self);
+ Py_XDECREF(__pyx_args);
+ Py_XDECREF(__pyx_kwds);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState_random_integers(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_random_integers[] = "Return random integers x such that low <= x <= high.\n\n random_integers(low, high=None, size=None) -> random values.\n\n If high is None, then 1 <= x <= low.\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_random_integers(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_low = 0;
+ PyObject *__pyx_v_high = 0;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"low","high","size",0};
+ __pyx_v_high = __pyx_k11;
+ __pyx_v_size = __pyx_k12;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O|OO", __pyx_argnames, &__pyx_v_low, &__pyx_v_high, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_low);
+ Py_INCREF(__pyx_v_high);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":453 */
+ __pyx_1 = __pyx_v_high == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":454 */
+ Py_INCREF(__pyx_v_low);
+ Py_DECREF(__pyx_v_high);
+ __pyx_v_high = __pyx_v_low;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":455 */
+ __pyx_2 = PyInt_FromLong(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_low);
+ __pyx_v_low = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":456 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_self, __pyx_n_randint); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromLong(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; goto __pyx_L1;}
+ __pyx_4 = PyNumber_Add(__pyx_v_high, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_3 = PyTuple_New(3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_low);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_low);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_4);
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_size);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_r = __pyx_4;
+ __pyx_4 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.random_integers");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_low);
+ Py_DECREF(__pyx_v_high);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState_standard_normal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_standard_normal[] = "Standard Normal distribution (mean=0, stdev=1).\n\n standard_normal(size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_standard_normal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"size",0};
+ __pyx_v_size = __pyx_k13;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|O", __pyx_argnames, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":464 */
+ __pyx_1 = __pyx_f_6mtrand_cont0_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_gauss,__pyx_v_size); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("mtrand.RandomState.standard_normal");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k66p;
+
+static char (__pyx_k66[]) = "scale <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_normal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_normal[] = "Normal distribution (mean=loc, stdev=scale).\n\n normal(loc=0.0, scale=1.0, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_normal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_loc;
+ double __pyx_v_scale;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"loc","scale","size",0};
+ __pyx_v_loc = __pyx_k14;
+ __pyx_v_scale = __pyx_k15;
+ __pyx_v_size = __pyx_k16;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|ddO", __pyx_argnames, &__pyx_v_loc, &__pyx_v_scale, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":471 */
+ __pyx_1 = (__pyx_v_scale <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":472 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 472; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 472; goto __pyx_L1;}
+ Py_INCREF(__pyx_k66p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k66p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 472; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 472; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":473 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_normal,__pyx_v_size,__pyx_v_loc,__pyx_v_scale); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 473; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.normal");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k67p;
+static PyObject *__pyx_k68p;
+
+static char (__pyx_k67[]) = "a <= 0";
+static char (__pyx_k68[]) = "b <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_beta(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_beta[] = "Beta distribution over [0, 1].\n\n beta(a, b, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_beta(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_a;
+ double __pyx_v_b;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"a","b","size",0};
+ __pyx_v_size = __pyx_k17;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "dd|O", __pyx_argnames, &__pyx_v_a, &__pyx_v_b, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":480 */
+ __pyx_1 = (__pyx_v_a <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":481 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; goto __pyx_L1;}
+ Py_INCREF(__pyx_k67p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k67p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_b <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":483 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; goto __pyx_L1;}
+ Py_INCREF(__pyx_k68p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k68p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":484 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_beta,__pyx_v_size,__pyx_v_a,__pyx_v_b); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.beta");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k69p;
+
+static char (__pyx_k69[]) = "scale <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_exponential(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_exponential[] = "Exponential distribution.\n\n exponential(scale=1.0, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_exponential(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_scale;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"scale","size",0};
+ __pyx_v_scale = __pyx_k18;
+ __pyx_v_size = __pyx_k19;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|dO", __pyx_argnames, &__pyx_v_scale, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":491 */
+ __pyx_1 = (__pyx_v_scale <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":492 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 492; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 492; goto __pyx_L1;}
+ Py_INCREF(__pyx_k69p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k69p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 492; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 492; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":493 */
+ __pyx_2 = __pyx_f_6mtrand_cont1_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_exponential,__pyx_v_size,__pyx_v_scale); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 493; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.exponential");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState_standard_exponential(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_standard_exponential[] = "Standard exponential distribution (scale=1).\n\n standard_exponential(size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_standard_exponential(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"size",0};
+ __pyx_v_size = __pyx_k20;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|O", __pyx_argnames, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":500 */
+ __pyx_1 = __pyx_f_6mtrand_cont0_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_standard_exponential,__pyx_v_size); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 500; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("mtrand.RandomState.standard_exponential");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k70p;
+
+static char (__pyx_k70[]) = "shape <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_standard_gamma(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_standard_gamma[] = "Standard Gamma distribution.\n\n standard_gamma(shape, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_standard_gamma(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_shape;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"shape","size",0};
+ __pyx_v_size = __pyx_k21;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "d|O", __pyx_argnames, &__pyx_v_shape, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":507 */
+ __pyx_1 = (__pyx_v_shape <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":508 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 508; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 508; goto __pyx_L1;}
+ Py_INCREF(__pyx_k70p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k70p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 508; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 508; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":509 */
+ __pyx_2 = __pyx_f_6mtrand_cont1_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_standard_gamma,__pyx_v_size,__pyx_v_shape); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 509; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.standard_gamma");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k71p;
+static PyObject *__pyx_k72p;
+
+static char (__pyx_k71[]) = "shape <= 0";
+static char (__pyx_k72[]) = "scale <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_gamma(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_gamma[] = "Gamma distribution.\n\n gamma(shape, scale=1.0, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_gamma(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_shape;
+ double __pyx_v_scale;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"shape","scale","size",0};
+ __pyx_v_scale = __pyx_k22;
+ __pyx_v_size = __pyx_k23;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "d|dO", __pyx_argnames, &__pyx_v_shape, &__pyx_v_scale, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":516 */
+ __pyx_1 = (__pyx_v_shape <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":517 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; goto __pyx_L1;}
+ Py_INCREF(__pyx_k71p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k71p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_scale <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":519 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; goto __pyx_L1;}
+ Py_INCREF(__pyx_k72p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k72p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":520 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_gamma,__pyx_v_size,__pyx_v_shape,__pyx_v_scale); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 520; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.gamma");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k73p;
+static PyObject *__pyx_k74p;
+
+static char (__pyx_k73[]) = "dfnum <= 0";
+static char (__pyx_k74[]) = "dfden <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_f(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_f[] = "F distribution.\n\n f(dfnum, dfden, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_f(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_dfnum;
+ double __pyx_v_dfden;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"dfnum","dfden","size",0};
+ __pyx_v_size = __pyx_k24;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "dd|O", __pyx_argnames, &__pyx_v_dfnum, &__pyx_v_dfden, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":527 */
+ __pyx_1 = (__pyx_v_dfnum <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":528 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 528; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 528; goto __pyx_L1;}
+ Py_INCREF(__pyx_k73p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k73p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 528; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 528; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_dfden <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":530 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; goto __pyx_L1;}
+ Py_INCREF(__pyx_k74p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k74p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":531 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_f,__pyx_v_size,__pyx_v_dfnum,__pyx_v_dfden); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 531; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.f");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k75p;
+static PyObject *__pyx_k76p;
+static PyObject *__pyx_k77p;
+
+static char (__pyx_k75[]) = "dfnum <= 1";
+static char (__pyx_k76[]) = "dfden <= 0";
+static char (__pyx_k77[]) = "nonc < 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_noncentral_f(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_noncentral_f[] = "Noncentral F distribution.\n\n noncentral_f(dfnum, dfden, nonc, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_noncentral_f(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_dfnum;
+ double __pyx_v_dfden;
+ double __pyx_v_nonc;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"dfnum","dfden","nonc","size",0};
+ __pyx_v_size = __pyx_k25;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "ddd|O", __pyx_argnames, &__pyx_v_dfnum, &__pyx_v_dfden, &__pyx_v_nonc, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":538 */
+ __pyx_1 = (__pyx_v_dfnum <= 1);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":539 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 539; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 539; goto __pyx_L1;}
+ Py_INCREF(__pyx_k75p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k75p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 539; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 539; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_dfden <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":541 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 541; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 541; goto __pyx_L1;}
+ Py_INCREF(__pyx_k76p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k76p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 541; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 541; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_nonc < 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":543 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; goto __pyx_L1;}
+ Py_INCREF(__pyx_k77p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k77p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":544 */
+ __pyx_2 = __pyx_f_6mtrand_cont3_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_noncentral_f,__pyx_v_size,__pyx_v_dfnum,__pyx_v_dfden,__pyx_v_nonc); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.noncentral_f");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k78p;
+
+static char (__pyx_k78[]) = "df <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_chisquare(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_chisquare[] = "Chi^2 distribution.\n\n chisquare(df, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_chisquare(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_df;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"df","size",0};
+ __pyx_v_size = __pyx_k26;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "d|O", __pyx_argnames, &__pyx_v_df, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":552 */
+ __pyx_1 = (__pyx_v_df <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":553 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; goto __pyx_L1;}
+ Py_INCREF(__pyx_k78p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k78p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":554 */
+ __pyx_2 = __pyx_f_6mtrand_cont1_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_chisquare,__pyx_v_size,__pyx_v_df); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 554; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.chisquare");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k79p;
+static PyObject *__pyx_k80p;
+
+static char (__pyx_k79[]) = "df <= 1";
+static char (__pyx_k80[]) = "nonc < 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_noncentral_chisquare(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_noncentral_chisquare[] = "Noncentral Chi^2 distribution.\n\n noncentral_chisquare(df, nonc, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_noncentral_chisquare(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_df;
+ double __pyx_v_nonc;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"df","nonc","size",0};
+ __pyx_v_size = __pyx_k27;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "dd|O", __pyx_argnames, &__pyx_v_df, &__pyx_v_nonc, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":561 */
+ __pyx_1 = (__pyx_v_df <= 1);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":562 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; goto __pyx_L1;}
+ Py_INCREF(__pyx_k79p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k79p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_nonc < 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":564 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; goto __pyx_L1;}
+ Py_INCREF(__pyx_k80p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k80p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":565 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_noncentral_chisquare,__pyx_v_size,__pyx_v_df,__pyx_v_nonc); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.noncentral_chisquare");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState_standard_cauchy(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_standard_cauchy[] = "Standard Cauchy with mode=0.\n\n standard_cauchy(size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_standard_cauchy(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"size",0};
+ __pyx_v_size = __pyx_k28;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|O", __pyx_argnames, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":573 */
+ __pyx_1 = __pyx_f_6mtrand_cont0_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_standard_cauchy,__pyx_v_size); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("mtrand.RandomState.standard_cauchy");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k81p;
+
+static char (__pyx_k81[]) = "df <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_standard_t(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_standard_t[] = "Standard Student\'s t distribution with df degrees of freedom.\n\n standard_t(df, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_standard_t(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_df;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"df","size",0};
+ __pyx_v_size = __pyx_k29;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "d|O", __pyx_argnames, &__pyx_v_df, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":580 */
+ __pyx_1 = (__pyx_v_df <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":581 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; goto __pyx_L1;}
+ Py_INCREF(__pyx_k81p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k81p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":582 */
+ __pyx_2 = __pyx_f_6mtrand_cont1_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_standard_t,__pyx_v_size,__pyx_v_df); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 582; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.standard_t");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k82p;
+
+static char (__pyx_k82[]) = "kappa < 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_vonmises(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_vonmises[] = "von Mises circular distribution with mode mu and dispersion parameter\n kappa on [-pi, pi].\n\n vonmises(mu, kappa, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_vonmises(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_mu;
+ double __pyx_v_kappa;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"mu","kappa","size",0};
+ __pyx_v_size = __pyx_k30;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "dd|O", __pyx_argnames, &__pyx_v_mu, &__pyx_v_kappa, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":590 */
+ __pyx_1 = (__pyx_v_kappa < 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":591 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; goto __pyx_L1;}
+ Py_INCREF(__pyx_k82p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k82p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":592 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_vonmises,__pyx_v_size,__pyx_v_mu,__pyx_v_kappa); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.vonmises");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k83p;
+
+static char (__pyx_k83[]) = "a <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_pareto(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_pareto[] = "Pareto distribution.\n\n pareto(a, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_pareto(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_a;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"a","size",0};
+ __pyx_v_size = __pyx_k31;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "d|O", __pyx_argnames, &__pyx_v_a, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":599 */
+ __pyx_1 = (__pyx_v_a <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":600 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; goto __pyx_L1;}
+ Py_INCREF(__pyx_k83p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k83p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":601 */
+ __pyx_2 = __pyx_f_6mtrand_cont1_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_pareto,__pyx_v_size,__pyx_v_a); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 601; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.pareto");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k84p;
+
+static char (__pyx_k84[]) = "a <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_weibull(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_weibull[] = "Weibull distribution.\n\n weibull(a, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_weibull(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_a;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"a","size",0};
+ __pyx_v_size = __pyx_k32;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "d|O", __pyx_argnames, &__pyx_v_a, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":608 */
+ __pyx_1 = (__pyx_v_a <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":609 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; goto __pyx_L1;}
+ Py_INCREF(__pyx_k84p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k84p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":610 */
+ __pyx_2 = __pyx_f_6mtrand_cont1_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_weibull,__pyx_v_size,__pyx_v_a); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 610; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.weibull");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k85p;
+
+static char (__pyx_k85[]) = "a <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_power(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_power[] = "Power distribution.\n\n power(a, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_power(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_a;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"a","size",0};
+ __pyx_v_size = __pyx_k33;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "d|O", __pyx_argnames, &__pyx_v_a, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":617 */
+ __pyx_1 = (__pyx_v_a <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":618 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; goto __pyx_L1;}
+ Py_INCREF(__pyx_k85p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k85p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":619 */
+ __pyx_2 = __pyx_f_6mtrand_cont1_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_power,__pyx_v_size,__pyx_v_a); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 619; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.power");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k86p;
+
+static char (__pyx_k86[]) = "scale <= 0.0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_laplace(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_laplace[] = "Laplace distribution.\n \n laplace(loc=0.0, scale=1.0, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_laplace(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_loc;
+ double __pyx_v_scale;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"loc","scale","size",0};
+ __pyx_v_loc = __pyx_k34;
+ __pyx_v_scale = __pyx_k35;
+ __pyx_v_size = __pyx_k36;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|ddO", __pyx_argnames, &__pyx_v_loc, &__pyx_v_scale, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":626 */
+ __pyx_1 = (__pyx_v_scale <= 0.0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":627 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; goto __pyx_L1;}
+ Py_INCREF(__pyx_k86p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k86p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":628 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_laplace,__pyx_v_size,__pyx_v_loc,__pyx_v_scale); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 628; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.laplace");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k87p;
+
+static char (__pyx_k87[]) = "scale <= 0.0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_gumbel(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_gumbel[] = "Gumbel distribution.\n \n gumbel(loc=0.0, scale=1.0, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_gumbel(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_loc;
+ double __pyx_v_scale;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"loc","scale","size",0};
+ __pyx_v_loc = __pyx_k37;
+ __pyx_v_scale = __pyx_k38;
+ __pyx_v_size = __pyx_k39;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|ddO", __pyx_argnames, &__pyx_v_loc, &__pyx_v_scale, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":635 */
+ __pyx_1 = (__pyx_v_scale <= 0.0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":636 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; goto __pyx_L1;}
+ Py_INCREF(__pyx_k87p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k87p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":637 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_gumbel,__pyx_v_size,__pyx_v_loc,__pyx_v_scale); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.gumbel");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k88p;
+
+static char (__pyx_k88[]) = "scale <= 0.0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_logistic(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_logistic[] = "Logistic distribution.\n \n logistic(loc=0.0, scale=1.0, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_logistic(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_loc;
+ double __pyx_v_scale;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"loc","scale","size",0};
+ __pyx_v_loc = __pyx_k40;
+ __pyx_v_scale = __pyx_k41;
+ __pyx_v_size = __pyx_k42;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|ddO", __pyx_argnames, &__pyx_v_loc, &__pyx_v_scale, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":644 */
+ __pyx_1 = (__pyx_v_scale <= 0.0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":645 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; goto __pyx_L1;}
+ Py_INCREF(__pyx_k88p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k88p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":646 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_logistic,__pyx_v_size,__pyx_v_loc,__pyx_v_scale); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 646; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.logistic");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k89p;
+
+static char (__pyx_k89[]) = "sigma <= 0.0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_lognormal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_lognormal[] = "Log-normal distribution.\n \n Note that the mean parameter is not the mean of this distribution, but \n the underlying normal distribution.\n \n lognormal(mean, sigma) <=> exp(normal(mean, sigma))\n \n lognormal(mean=0.0, sigma=1.0, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_lognormal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_mean;
+ double __pyx_v_sigma;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"mean","sigma","size",0};
+ __pyx_v_mean = __pyx_k43;
+ __pyx_v_sigma = __pyx_k44;
+ __pyx_v_size = __pyx_k45;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|ddO", __pyx_argnames, &__pyx_v_mean, &__pyx_v_sigma, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":658 */
+ __pyx_1 = (__pyx_v_sigma <= 0.0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":659 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; goto __pyx_L1;}
+ Py_INCREF(__pyx_k89p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k89p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":660 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_lognormal,__pyx_v_size,__pyx_v_mean,__pyx_v_sigma); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.lognormal");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k90p;
+
+static char (__pyx_k90[]) = "scale <= 0.0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_rayleigh(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_rayleigh[] = "Rayleigh distribution.\n \n rayleigh(scale=1.0, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_rayleigh(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_scale;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"scale","size",0};
+ __pyx_v_scale = __pyx_k46;
+ __pyx_v_size = __pyx_k47;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|dO", __pyx_argnames, &__pyx_v_scale, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":667 */
+ __pyx_1 = (__pyx_v_scale <= 0.0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":668 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 668; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 668; goto __pyx_L1;}
+ Py_INCREF(__pyx_k90p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k90p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 668; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 668; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":669 */
+ __pyx_2 = __pyx_f_6mtrand_cont1_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_rayleigh,__pyx_v_size,__pyx_v_scale); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 669; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.rayleigh");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k91p;
+static PyObject *__pyx_k92p;
+
+static char (__pyx_k91[]) = "mean <= 0.0";
+static char (__pyx_k92[]) = "scale <= 0.0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_wald(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_wald[] = "Wald (inverse Gaussian) distribution.\n \n wald(mean, scale, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_wald(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_mean;
+ double __pyx_v_scale;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"mean","scale","size",0};
+ __pyx_v_size = __pyx_k48;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "dd|O", __pyx_argnames, &__pyx_v_mean, &__pyx_v_scale, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":676 */
+ __pyx_1 = (__pyx_v_mean <= 0.0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":677 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; goto __pyx_L1;}
+ Py_INCREF(__pyx_k91p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k91p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_scale <= 0.0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":679 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; goto __pyx_L1;}
+ Py_INCREF(__pyx_k92p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k92p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":680 */
+ __pyx_2 = __pyx_f_6mtrand_cont2_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_wald,__pyx_v_size,__pyx_v_mean,__pyx_v_scale); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 680; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.wald");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k93p;
+static PyObject *__pyx_k94p;
+static PyObject *__pyx_k95p;
+
+static char (__pyx_k93[]) = "left > mode";
+static char (__pyx_k94[]) = "mode > right";
+static char (__pyx_k95[]) = "left == right";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_triangular(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_triangular[] = "Triangular distribution starting at left, peaking at mode, and \n ending at right (left <= mode <= right).\n \n triangular(left, mode, right, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_triangular(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_left;
+ double __pyx_v_mode;
+ double __pyx_v_right;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"left","mode","right","size",0};
+ __pyx_v_size = __pyx_k49;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "ddd|O", __pyx_argnames, &__pyx_v_left, &__pyx_v_mode, &__pyx_v_right, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":688 */
+ __pyx_1 = (__pyx_v_left > __pyx_v_mode);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":689 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; goto __pyx_L1;}
+ Py_INCREF(__pyx_k93p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k93p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_mode > __pyx_v_right);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":691 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; goto __pyx_L1;}
+ Py_INCREF(__pyx_k94p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k94p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_left == __pyx_v_right);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":693 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; goto __pyx_L1;}
+ Py_INCREF(__pyx_k95p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k95p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":694 */
+ __pyx_2 = __pyx_f_6mtrand_cont3_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_triangular,__pyx_v_size,__pyx_v_left,__pyx_v_mode,__pyx_v_right); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.triangular");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k96p;
+static PyObject *__pyx_k97p;
+static PyObject *__pyx_k98p;
+
+static char (__pyx_k96[]) = "n <= 0";
+static char (__pyx_k97[]) = "p < 0";
+static char (__pyx_k98[]) = "p > 1";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_binomial(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_binomial[] = "Binomial distribution of n trials and p probability of success.\n\n binomial(n, p, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_binomial(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ long __pyx_v_n;
+ double __pyx_v_p;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"n","p","size",0};
+ __pyx_v_size = __pyx_k50;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "ld|O", __pyx_argnames, &__pyx_v_n, &__pyx_v_p, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":703 */
+ __pyx_1 = (__pyx_v_n <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":704 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; goto __pyx_L1;}
+ Py_INCREF(__pyx_k96p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k96p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_p < 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":706 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; goto __pyx_L1;}
+ Py_INCREF(__pyx_k97p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k97p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_p > 1);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":708 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; goto __pyx_L1;}
+ Py_INCREF(__pyx_k98p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k98p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":709 */
+ __pyx_2 = __pyx_f_6mtrand_discnp_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_binomial,__pyx_v_size,__pyx_v_n,__pyx_v_p); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.binomial");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k99p;
+static PyObject *__pyx_k100p;
+static PyObject *__pyx_k101p;
+
+static char (__pyx_k99[]) = "n <= 0";
+static char (__pyx_k100[]) = "p < 0";
+static char (__pyx_k101[]) = "p > 1";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_negative_binomial(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_negative_binomial[] = "Negative Binomial distribution.\n\n negative_binomial(n, p, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_negative_binomial(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ long __pyx_v_n;
+ double __pyx_v_p;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"n","p","size",0};
+ __pyx_v_size = __pyx_k51;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "ld|O", __pyx_argnames, &__pyx_v_n, &__pyx_v_p, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":716 */
+ __pyx_1 = (__pyx_v_n <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":717 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; goto __pyx_L1;}
+ Py_INCREF(__pyx_k99p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k99p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_p < 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":719 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; goto __pyx_L1;}
+ Py_INCREF(__pyx_k100p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k100p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_p > 1);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":721 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; goto __pyx_L1;}
+ Py_INCREF(__pyx_k101p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k101p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":722 */
+ __pyx_2 = __pyx_f_6mtrand_discnp_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_negative_binomial,__pyx_v_size,__pyx_v_n,__pyx_v_p); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 722; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.negative_binomial");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k102p;
+
+static char (__pyx_k102[]) = "lam <= 0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_poisson(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_poisson[] = "Poisson distribution.\n\n poisson(lam=1.0, size=None) -> random values\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_poisson(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_lam;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"lam","size",0};
+ __pyx_v_lam = __pyx_k52;
+ __pyx_v_size = __pyx_k53;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|dO", __pyx_argnames, &__pyx_v_lam, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":730 */
+ __pyx_1 = (__pyx_v_lam <= 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":731 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; goto __pyx_L1;}
+ Py_INCREF(__pyx_k102p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k102p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":732 */
+ __pyx_2 = __pyx_f_6mtrand_discd_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_poisson,__pyx_v_size,__pyx_v_lam); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 732; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.poisson");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k103p;
+
+static char (__pyx_k103[]) = "a <= 1.0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_zipf(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_zipf[] = "Zipf distribution.\n \n zipf(a, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_zipf(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_a;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"a","size",0};
+ __pyx_v_size = __pyx_k54;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "d|O", __pyx_argnames, &__pyx_v_a, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":739 */
+ __pyx_1 = (__pyx_v_a <= 1.0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":740 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 740; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 740; goto __pyx_L1;}
+ Py_INCREF(__pyx_k103p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k103p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 740; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 740; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":741 */
+ __pyx_2 = __pyx_f_6mtrand_discd_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_zipf,__pyx_v_size,__pyx_v_a); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 741; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.zipf");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k104p;
+static PyObject *__pyx_k105p;
+
+static char (__pyx_k104[]) = "p < 0.0";
+static char (__pyx_k105[]) = "p > 1.0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_geometric(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_geometric[] = "Geometric distribution with p being the probability of \"success\" on\n an individual trial.\n \n geometric(p, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_geometric(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_p;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"p","size",0};
+ __pyx_v_size = __pyx_k55;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "d|O", __pyx_argnames, &__pyx_v_p, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":749 */
+ __pyx_1 = (__pyx_v_p < 0.0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":750 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 750; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 750; goto __pyx_L1;}
+ Py_INCREF(__pyx_k104p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k104p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 750; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 750; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_p > 1.0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":752 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 752; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 752; goto __pyx_L1;}
+ Py_INCREF(__pyx_k105p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k105p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 752; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 752; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":753 */
+ __pyx_2 = __pyx_f_6mtrand_discd_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_geometric,__pyx_v_size,__pyx_v_p); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 753; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.geometric");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k106p;
+static PyObject *__pyx_k107p;
+static PyObject *__pyx_k108p;
+static PyObject *__pyx_k109p;
+
+static char (__pyx_k106[]) = "ngood < 1";
+static char (__pyx_k107[]) = "nbad < 1";
+static char (__pyx_k108[]) = "ngood + nbad < nsample";
+static char (__pyx_k109[]) = "nsample < 1";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_hypergeometric(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_hypergeometric[] = "Hypergeometric distribution.\n \n Consider an urn with ngood \"good\" balls and nbad \"bad\" balls. If one \n were to draw nsample balls from the urn without replacement, then \n the hypergeometric distribution describes the distribution of \"good\" \n balls in the sample.\n \n hypergeometric(ngood, nbad, nsample, size=None) \n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_hypergeometric(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ long __pyx_v_ngood;
+ long __pyx_v_nbad;
+ long __pyx_v_nsample;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"ngood","nbad","nsample","size",0};
+ __pyx_v_size = __pyx_k56;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "lll|O", __pyx_argnames, &__pyx_v_ngood, &__pyx_v_nbad, &__pyx_v_nsample, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":765 */
+ __pyx_1 = (__pyx_v_ngood < 1);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":766 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 766; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 766; goto __pyx_L1;}
+ Py_INCREF(__pyx_k106p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k106p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 766; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 766; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_nbad < 1);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":768 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 768; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 768; goto __pyx_L1;}
+ Py_INCREF(__pyx_k107p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k107p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 768; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 768; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = ((__pyx_v_ngood + __pyx_v_nbad) < __pyx_v_nsample);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":770 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 770; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 770; goto __pyx_L1;}
+ Py_INCREF(__pyx_k108p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k108p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 770; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 770; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_nsample < 1);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":772 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; goto __pyx_L1;}
+ Py_INCREF(__pyx_k109p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k109p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":773 */
+ __pyx_2 = __pyx_f_6mtrand_discnmN_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_hypergeometric,__pyx_v_size,__pyx_v_ngood,__pyx_v_nbad,__pyx_v_nsample); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 773; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.hypergeometric");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_k110p;
+static PyObject *__pyx_k111p;
+
+static char (__pyx_k110[]) = "p < 0";
+static char (__pyx_k111[]) = "p > 1";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_logseries(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_logseries[] = "Logarithmic series distribution.\n \n logseries(p, size=None)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_logseries(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ double __pyx_v_p;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"p","size",0};
+ __pyx_v_size = __pyx_k57;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "d|O", __pyx_argnames, &__pyx_v_p, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_size);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":781 */
+ __pyx_1 = (__pyx_v_p < 0);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":782 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 782; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 782; goto __pyx_L1;}
+ Py_INCREF(__pyx_k110p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k110p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 782; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 782; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_1 = (__pyx_v_p > 1);
+ if (__pyx_1) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":784 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 784; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 784; goto __pyx_L1;}
+ Py_INCREF(__pyx_k111p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k111p);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 784; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 784; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":785 */
+ __pyx_2 = __pyx_f_6mtrand_discd_array(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,rk_logseries,__pyx_v_size,__pyx_v_p); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 785; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("mtrand.RandomState.logseries");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_n_array;
+static PyObject *__pyx_n_shape;
+static PyObject *__pyx_n_ArgumentError;
+static PyObject *__pyx_n_isinstance;
+static PyObject *__pyx_n_list;
+static PyObject *__pyx_n_append;
+static PyObject *__pyx_n_multiply;
+static PyObject *__pyx_n_reduce;
+static PyObject *__pyx_n_svd;
+static PyObject *__pyx_n_matrixmultiply;
+static PyObject *__pyx_n_sqrt;
+static PyObject *__pyx_n_add;
+static PyObject *__pyx_n_tuple;
+
+static PyObject *__pyx_k112p;
+static PyObject *__pyx_k113p;
+static PyObject *__pyx_k114p;
+static PyObject *__pyx_k115p;
+
+static char (__pyx_k112[]) = "mean must be 1 dimensional";
+static char (__pyx_k113[]) = "cov must be 2 dimensional and square";
+static char (__pyx_k114[]) = "mean and cov must have same length";
+static char (__pyx_k115[]) = "scipy.corelinalg";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_multivariate_normal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_multivariate_normal[] = "Return an array containing multivariate normally distributed random numbers\n with specified mean and covariance.\n\n multivariate_normal(mean, cov) -> random values\n multivariate_normal(mean, cov, [m, n, ...]) -> random values\n\n mean must be a 1 dimensional array. cov must be a square two dimensional\n array with the same number of rows and columns as mean has elements.\n\n The first form returns a single 1-D array containing a multivariate\n normal.\n\n The second form returns an array of shape (m, n, ..., cov.shape[0]).\n In this case, output[i,j,...,:] is a 1-D array containing a multivariate\n normal.\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_multivariate_normal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_mean = 0;
+ PyObject *__pyx_v_cov = 0;
+ PyObject *__pyx_v_size = 0;
+ PyObject *__pyx_v_shape;
+ PyObject *__pyx_v_final_shape;
+ PyObject *__pyx_v_x;
+ PyObject *__pyx_v_svd;
+ PyObject *__pyx_v_u;
+ PyObject *__pyx_v_s;
+ PyObject *__pyx_v_v;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ PyObject *__pyx_5 = 0;
+ static char *__pyx_argnames[] = {"mean","cov","size",0};
+ __pyx_v_size = __pyx_k58;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OO|O", __pyx_argnames, &__pyx_v_mean, &__pyx_v_cov, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_mean);
+ Py_INCREF(__pyx_v_cov);
+ Py_INCREF(__pyx_v_size);
+ __pyx_v_shape = Py_None; Py_INCREF(__pyx_v_shape);
+ __pyx_v_final_shape = Py_None; Py_INCREF(__pyx_v_final_shape);
+ __pyx_v_x = Py_None; Py_INCREF(__pyx_v_x);
+ __pyx_v_svd = Py_None; Py_INCREF(__pyx_v_svd);
+ __pyx_v_u = Py_None; Py_INCREF(__pyx_v_u);
+ __pyx_v_s = Py_None; Py_INCREF(__pyx_v_s);
+ __pyx_v_v = Py_None; Py_INCREF(__pyx_v_v);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":806 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 806; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_array); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 806; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 806; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_mean);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_mean);
+ __pyx_3 = PyObject_CallObject(__pyx_2, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 806; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_mean);
+ __pyx_v_mean = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":807 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; goto __pyx_L1;}
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_array); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_cov);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_cov);
+ __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_v_cov);
+ __pyx_v_cov = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":808 */
+ __pyx_4 = __pyx_v_size == Py_None;
+ if (__pyx_4) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":809 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 809; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_shape);
+ __pyx_v_shape = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":811 */
+ Py_INCREF(__pyx_v_size);
+ Py_DECREF(__pyx_v_shape);
+ __pyx_v_shape = __pyx_v_size;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":812 */
+ __pyx_3 = __Pyx_GetName(__pyx_b, __pyx_n_len); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_v_mean, __pyx_n_shape); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; goto __pyx_L1;}
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_3, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_3 = PyInt_FromLong(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; goto __pyx_L1;}
+ if (PyObject_Cmp(__pyx_2, __pyx_3, &__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; goto __pyx_L1;}
+ __pyx_4 = __pyx_4 != 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ if (__pyx_4) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":813 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_ArgumentError); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 813; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 813; goto __pyx_L1;}
+ Py_INCREF(__pyx_k112p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k112p);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 813; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 813; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":814 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_len); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_v_cov, __pyx_n_shape); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_1 = PyInt_FromLong(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ if (PyObject_Cmp(__pyx_2, __pyx_1, &__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ __pyx_4 = __pyx_4 != 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (!__pyx_4) {
+ __pyx_3 = PyObject_GetAttr(__pyx_v_cov, __pyx_n_shape); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ __pyx_2 = PyInt_FromLong(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ __pyx_1 = PyObject_GetItem(__pyx_3, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_3 = PyObject_GetAttr(__pyx_v_cov, __pyx_n_shape); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ __pyx_2 = PyInt_FromLong(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ __pyx_5 = PyObject_GetItem(__pyx_3, __pyx_2); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ if (PyObject_Cmp(__pyx_1, __pyx_5, &__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; goto __pyx_L1;}
+ __pyx_4 = __pyx_4 != 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ }
+ if (__pyx_4) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":815 */
+ __pyx_3 = __Pyx_GetName(__pyx_b, __pyx_n_ArgumentError); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; goto __pyx_L1;}
+ Py_INCREF(__pyx_k113p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k113p);
+ __pyx_1 = PyObject_CallObject(__pyx_3, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_1, 0, 0);
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":816 */
+ __pyx_5 = PyObject_GetAttr(__pyx_v_mean, __pyx_n_shape); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 816; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromLong(0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 816; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetItem(__pyx_5, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 816; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_v_cov, __pyx_n_shape); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 816; goto __pyx_L1;}
+ __pyx_5 = PyInt_FromLong(0); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 816; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetItem(__pyx_1, __pyx_5); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 816; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ if (PyObject_Cmp(__pyx_2, __pyx_3, &__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 816; goto __pyx_L1;}
+ __pyx_4 = __pyx_4 != 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ if (__pyx_4) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":817 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_ArgumentError); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 817; goto __pyx_L1;}
+ __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 817; goto __pyx_L1;}
+ Py_INCREF(__pyx_k114p);
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_k114p);
+ __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 817; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 817; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":819 */
+ __pyx_3 = __Pyx_GetName(__pyx_b, __pyx_n_isinstance); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 819; goto __pyx_L1;}
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_int); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 819; goto __pyx_L1;}
+ __pyx_5 = PyTuple_New(2); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 819; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_shape);
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_v_shape);
+ PyTuple_SET_ITEM(__pyx_5, 1, __pyx_1);
+ __pyx_1 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_3, __pyx_5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 819; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ __pyx_4 = PyObject_IsTrue(__pyx_2); if (__pyx_4 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 819; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ if (__pyx_4) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":820 */
+ __pyx_1 = PyList_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_shape);
+ PyList_SET_ITEM(__pyx_1, 0, __pyx_v_shape);
+ Py_DECREF(__pyx_v_shape);
+ __pyx_v_shape = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":821 */
+ __pyx_3 = __Pyx_GetName(__pyx_b, __pyx_n_list); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 821; goto __pyx_L1;}
+ __pyx_5 = PySequence_GetSlice(__pyx_v_shape, 0, 0x7fffffff); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 821; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 821; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_5);
+ __pyx_5 = 0;
+ __pyx_1 = PyObject_CallObject(__pyx_3, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 821; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_final_shape);
+ __pyx_v_final_shape = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":822 */
+ __pyx_5 = PyObject_GetAttr(__pyx_v_final_shape, __pyx_n_append); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 822; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_v_mean, __pyx_n_shape); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 822; goto __pyx_L1;}
+ __pyx_2 = PyInt_FromLong(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 822; goto __pyx_L1;}
+ __pyx_1 = PyObject_GetItem(__pyx_3, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 822; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 822; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_1);
+ __pyx_1 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_5, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 822; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":826 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_standard_normal); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; goto __pyx_L1;}
+ __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_5, __pyx_n_multiply); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ __pyx_2 = PyObject_GetAttr(__pyx_3, __pyx_n_reduce); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_final_shape);
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_v_final_shape);
+ __pyx_3 = PyObject_CallObject(__pyx_2, __pyx_5); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_5 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_x);
+ __pyx_v_x = __pyx_5;
+ __pyx_5 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":827 */
+ __pyx_3 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ __pyx_1 = PyObject_GetAttr(__pyx_3, __pyx_n_multiply); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_reduce); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_5 = __Pyx_GetName(__pyx_b, __pyx_n_len); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_final_shape);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_final_shape);
+ __pyx_1 = PyObject_CallObject(__pyx_5, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_5 = PyInt_FromLong(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ __pyx_3 = PyNumber_Subtract(__pyx_1, __pyx_5); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ __pyx_4 = PyInt_AsLong(__pyx_3); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_1 = PySequence_GetSlice(__pyx_v_final_shape, 0, __pyx_4); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_1);
+ __pyx_1 = 0;
+ __pyx_3 = PyObject_CallObject(__pyx_2, __pyx_5); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_v_mean, __pyx_n_shape); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 828; goto __pyx_L1;}
+ __pyx_2 = PyInt_FromLong(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 828; goto __pyx_L1;}
+ __pyx_5 = PyObject_GetItem(__pyx_1, __pyx_2); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 828; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = PyTuple_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3);
+ PyTuple_SET_ITEM(__pyx_1, 1, __pyx_5);
+ __pyx_3 = 0;
+ __pyx_5 = 0;
+ if (PyObject_SetAttr(__pyx_v_x, __pyx_n_shape, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":836 */
+ __pyx_2 = PyList_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_svd);
+ PyList_SET_ITEM(__pyx_2, 0, __pyx_n_svd);
+ __pyx_3 = __Pyx_Import(__pyx_k115p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_5 = PyObject_GetAttr(__pyx_3, __pyx_n_svd); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_svd);
+ __pyx_v_svd = __pyx_5;
+ __pyx_5 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":838 */
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_cov);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_cov);
+ __pyx_2 = PyObject_CallObject(__pyx_v_svd, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_3 = __Pyx_UnpackItem(__pyx_2, 0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_u);
+ __pyx_v_u = __pyx_3;
+ __pyx_3 = 0;
+ __pyx_5 = __Pyx_UnpackItem(__pyx_2, 1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_s);
+ __pyx_v_s = __pyx_5;
+ __pyx_5 = 0;
+ __pyx_1 = __Pyx_UnpackItem(__pyx_2, 2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_v);
+ __pyx_v_v = __pyx_1;
+ __pyx_1 = 0;
+ if (__Pyx_EndUnpack(__pyx_2, 3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":839 */
+ __pyx_3 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 839; goto __pyx_L1;}
+ __pyx_5 = PyObject_GetAttr(__pyx_3, __pyx_n_matrixmultiply); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 839; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 839; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_sqrt); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 839; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 839; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_s);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_s);
+ __pyx_1 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 839; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = PyNumber_Multiply(__pyx_v_x, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 839; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_3 = PyTuple_New(2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 839; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ Py_INCREF(__pyx_v_v);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_v_v);
+ __pyx_2 = 0;
+ __pyx_1 = PyObject_CallObject(__pyx_5, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 839; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_v_x);
+ __pyx_v_x = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":842 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; goto __pyx_L1;}
+ __pyx_5 = PyObject_GetAttr(__pyx_2, __pyx_n_add); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_3 = PyTuple_New(3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_mean);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_mean);
+ Py_INCREF(__pyx_v_x);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_v_x);
+ Py_INCREF(__pyx_v_x);
+ PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_x);
+ __pyx_1 = PyObject_CallObject(__pyx_5, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":843 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_tuple); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 843; goto __pyx_L1;}
+ __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 843; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_final_shape);
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_v_final_shape);
+ __pyx_3 = PyObject_CallObject(__pyx_2, __pyx_5); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 843; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ if (PyObject_SetAttr(__pyx_v_x, __pyx_n_shape, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 843; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":844 */
+ Py_INCREF(__pyx_v_x);
+ __pyx_r = __pyx_v_x;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("mtrand.RandomState.multivariate_normal");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_shape);
+ Py_DECREF(__pyx_v_final_shape);
+ Py_DECREF(__pyx_v_x);
+ Py_DECREF(__pyx_v_svd);
+ Py_DECREF(__pyx_v_u);
+ Py_DECREF(__pyx_v_s);
+ Py_DECREF(__pyx_v_v);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_mean);
+ Py_DECREF(__pyx_v_cov);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_n_zeros;
+
+static PyObject *__pyx_k117p;
+
+static char (__pyx_k117[]) = "sum(pvals) > 1.0";
+
+static PyObject *__pyx_f_6mtrand_11RandomState_multinomial(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_multinomial[] = "Multinomial distribution.\n \n multinomial(n, pvals, size=None) -> random values\n\n pvals is a sequence of probabilities that should sum to 1 (however, the\n last element is always assumed to account for the remaining probability\n as long as sum(pvals[:-1]) <= 1).\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_multinomial(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ long __pyx_v_n;
+ PyObject *__pyx_v_pvals = 0;
+ PyObject *__pyx_v_size = 0;
+ long __pyx_v_d;
+ PyArrayObject *arrayObject_parr;
+ PyArrayObject *arrayObject_mnarr;
+ double (*__pyx_v_pix);
+ long (*__pyx_v_mnix);
+ long __pyx_v_i;
+ long __pyx_v_j;
+ long __pyx_v_dn;
+ double __pyx_v_Sum;
+ PyObject *__pyx_v_shape;
+ PyObject *__pyx_v_multin;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ long __pyx_4;
+ int __pyx_5;
+ static char *__pyx_argnames[] = {"n","pvals","size",0};
+ __pyx_v_size = __pyx_k59;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "lO|O", __pyx_argnames, &__pyx_v_n, &__pyx_v_pvals, &__pyx_v_size)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_pvals);
+ Py_INCREF(__pyx_v_size);
+ arrayObject_parr = (void *)Py_None; Py_INCREF((PyObject *) arrayObject_parr);
+ arrayObject_mnarr = (void *)Py_None; Py_INCREF((PyObject *) arrayObject_mnarr);
+ __pyx_v_shape = Py_None; Py_INCREF(__pyx_v_shape);
+ __pyx_v_multin = Py_None; Py_INCREF(__pyx_v_multin);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":862 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_len); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 862; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 862; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_pvals);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_pvals);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 862; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_4 = PyInt_AsLong(__pyx_3); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 862; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_v_d = __pyx_4;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":863 */
+ __pyx_1 = ((PyObject *)PyArray_ContiguousFromObject(__pyx_v_pvals,PyArray_DOUBLE,1,1)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 863; goto __pyx_L1;}
+ Py_INCREF(((PyObject *)__pyx_1));
+ Py_DECREF(((PyObject *)arrayObject_parr));
+ arrayObject_parr = ((PyArrayObject *)__pyx_1);
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":864 */
+ __pyx_v_pix = ((double (*))arrayObject_parr->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":866 */
+ __pyx_5 = (__pyx_f_6mtrand_kahan_sum(__pyx_v_pix,(__pyx_v_d - 1)) > 1.0);
+ if (__pyx_5) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":867 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_ValueError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 867; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 867; goto __pyx_L1;}
+ Py_INCREF(__pyx_k117p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k117p);
+ __pyx_1 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 867; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_1, 0, 0);
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 867; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":869 */
+ __pyx_5 = __pyx_v_size == Py_None;
+ if (__pyx_5) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":870 */
+ __pyx_2 = PyInt_FromLong(__pyx_v_d); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 870; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 870; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ Py_DECREF(__pyx_v_shape);
+ __pyx_v_shape = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L3;
+ }
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_type); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 871; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 871; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_size);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 871; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_int); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 871; goto __pyx_L1;}
+ __pyx_5 = __pyx_3 == __pyx_1;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (__pyx_5) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":872 */
+ __pyx_2 = PyInt_FromLong(__pyx_v_d); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 872; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 872; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_2);
+ __pyx_2 = 0;
+ Py_DECREF(__pyx_v_shape);
+ __pyx_v_shape = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":874 */
+ __pyx_1 = PyInt_FromLong(__pyx_v_d); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 874; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 874; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_1);
+ __pyx_1 = 0;
+ __pyx_3 = PyNumber_Add(__pyx_v_size, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 874; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_shape);
+ __pyx_v_shape = __pyx_3;
+ __pyx_3 = 0;
+ }
+ __pyx_L3:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":876 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_zeros); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_3 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; goto __pyx_L1;}
+ __pyx_1 = PyObject_GetAttr(__pyx_3, __pyx_n_Int); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_3 = PyTuple_New(2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_shape);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_shape);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_1);
+ __pyx_1 = 0;
+ __pyx_1 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_v_multin);
+ __pyx_v_multin = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":877 */
+ Py_INCREF(((PyObject *)__pyx_v_multin));
+ Py_DECREF(((PyObject *)arrayObject_mnarr));
+ arrayObject_mnarr = ((PyArrayObject *)__pyx_v_multin);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":878 */
+ __pyx_v_mnix = ((long (*))arrayObject_mnarr->data);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":879 */
+ __pyx_v_i = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":880 */
+ while (1) {
+ __pyx_5 = (__pyx_v_i < PyArray_SIZE(arrayObject_mnarr));
+ if (!__pyx_5) break;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":881 */
+ __pyx_v_Sum = 1.0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":882 */
+ __pyx_v_dn = __pyx_v_n;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":883 */
+ __pyx_4 = (__pyx_v_d - 1);
+ for (__pyx_v_j = 0; __pyx_v_j < __pyx_4; ++__pyx_v_j) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":884 */
+ (__pyx_v_mnix[(__pyx_v_i + __pyx_v_j)]) = rk_binomial(((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state,__pyx_v_dn,((__pyx_v_pix[__pyx_v_j]) / __pyx_v_Sum));
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":885 */
+ __pyx_v_dn = (__pyx_v_dn - (__pyx_v_mnix[(__pyx_v_i + __pyx_v_j)]));
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":886 */
+ __pyx_5 = (__pyx_v_dn <= 0);
+ if (__pyx_5) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":887 */
+ goto __pyx_L7;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":888 */
+ __pyx_v_Sum = (__pyx_v_Sum - (__pyx_v_pix[__pyx_v_j]));
+ }
+ __pyx_L7:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":889 */
+ __pyx_5 = (__pyx_v_dn > 0);
+ if (__pyx_5) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":890 */
+ (__pyx_v_mnix[((__pyx_v_i + __pyx_v_d) - 1)]) = __pyx_v_dn;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":892 */
+ __pyx_v_i = (__pyx_v_i + __pyx_v_d);
+ }
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":894 */
+ Py_INCREF(__pyx_v_multin);
+ __pyx_r = __pyx_v_multin;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("mtrand.RandomState.multinomial");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(arrayObject_parr);
+ Py_DECREF(arrayObject_mnarr);
+ Py_DECREF(__pyx_v_shape);
+ Py_DECREF(__pyx_v_multin);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_pvals);
+ Py_DECREF(__pyx_v_size);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6mtrand_11RandomState_shuffle(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_shuffle[] = "Modify a sequence in-place by shuffling its contents.\n \n shuffle(x)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_shuffle(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_x = 0;
+ long __pyx_v_i;
+ long __pyx_v_j;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ long __pyx_4;
+ int __pyx_5;
+ static char *__pyx_argnames[] = {"x",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_x)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_x);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":905 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_len); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_x);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_x);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = PyInt_FromLong(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; goto __pyx_L1;}
+ __pyx_2 = PyNumber_Subtract(__pyx_3, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_4 = PyInt_AsLong(__pyx_2); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_v_i = __pyx_4;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":906 */
+ while (1) {
+ __pyx_5 = (__pyx_v_i > 0);
+ if (!__pyx_5) break;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":907 */
+ __pyx_v_j = rk_interval(__pyx_v_i,((struct __pyx_obj_6mtrand_RandomState *)__pyx_v_self)->internal_state);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":908 */
+ __pyx_3 = PyInt_FromLong(__pyx_v_j); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ __pyx_1 = PyObject_GetItem(__pyx_v_x, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = PyInt_FromLong(__pyx_v_i); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetItem(__pyx_v_x, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyInt_FromLong(__pyx_v_i); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ if (PyObject_SetItem(__pyx_v_x, __pyx_2, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_2 = PyInt_FromLong(__pyx_v_j); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ if (PyObject_SetItem(__pyx_v_x, __pyx_2, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":909 */
+ __pyx_v_i = (__pyx_v_i - 1);
+ }
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("mtrand.RandomState.shuffle");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_x);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_n_arange;
+
+static PyObject *__pyx_f_6mtrand_11RandomState_permutation(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6mtrand_11RandomState_permutation[] = "Given an integer, return a shuffled sequence of integers >= 0 and \n < x; given a sequence, return a shuffled array copy.\n\n permutation(x)\n ";
+static PyObject *__pyx_f_6mtrand_11RandomState_permutation(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_x = 0;
+ PyObject *__pyx_v_arr;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ static char *__pyx_argnames[] = {"x",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_x)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_x);
+ __pyx_v_arr = Py_None; Py_INCREF(__pyx_v_arr);
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":917 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_type); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 917; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 917; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_x);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_x);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 917; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_int); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 917; goto __pyx_L1;}
+ __pyx_4 = __pyx_3 == __pyx_1;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (__pyx_4) {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":918 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 918; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_arange); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 918; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 918; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_x);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_x);
+ __pyx_2 = PyObject_CallObject(__pyx_3, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 918; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_arr);
+ __pyx_v_arr = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":920 */
+ __pyx_3 = __Pyx_GetName(__pyx_m, __pyx_n__sp); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 920; goto __pyx_L1;}
+ __pyx_1 = PyObject_GetAttr(__pyx_3, __pyx_n_array); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 920; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 920; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_x);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_x);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 920; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_arr);
+ __pyx_v_arr = __pyx_3;
+ __pyx_3 = 0;
+ }
+ __pyx_L2:;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":921 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_shuffle); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 921; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 921; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_arr);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_arr);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 921; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":922 */
+ Py_INCREF(__pyx_v_arr);
+ __pyx_r = __pyx_v_arr;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(__pyx_r);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("mtrand.RandomState.permutation");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_arr);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_x);
+ return __pyx_r;
+}
+
+static __Pyx_InternTabEntry __pyx_intern_tab[] = {
+ {&__pyx_n_ArgumentError, "ArgumentError"},
+ {&__pyx_n_Float64, "Float64"},
+ {&__pyx_n_Int, "Int"},
+ {&__pyx_n_MT19937, "MT19937"},
+ {&__pyx_n_ValueError, "ValueError"},
+ {&__pyx_n___RandomState_ctor, "__RandomState_ctor"},
+ {&__pyx_n__rand, "_rand"},
+ {&__pyx_n__sp, "_sp"},
+ {&__pyx_n_add, "add"},
+ {&__pyx_n_append, "append"},
+ {&__pyx_n_arange, "arange"},
+ {&__pyx_n_array, "array"},
+ {&__pyx_n_beta, "beta"},
+ {&__pyx_n_binomial, "binomial"},
+ {&__pyx_n_bytes, "bytes"},
+ {&__pyx_n_chisquare, "chisquare"},
+ {&__pyx_n_empty, "empty"},
+ {&__pyx_n_exponential, "exponential"},
+ {&__pyx_n_f, "f"},
+ {&__pyx_n_gamma, "gamma"},
+ {&__pyx_n_geometric, "geometric"},
+ {&__pyx_n_get_state, "get_state"},
+ {&__pyx_n_gumbel, "gumbel"},
+ {&__pyx_n_hypergeometric, "hypergeometric"},
+ {&__pyx_n_int, "int"},
+ {&__pyx_n_isinstance, "isinstance"},
+ {&__pyx_n_laplace, "laplace"},
+ {&__pyx_n_len, "len"},
+ {&__pyx_n_list, "list"},
+ {&__pyx_n_logistic, "logistic"},
+ {&__pyx_n_lognormal, "lognormal"},
+ {&__pyx_n_logseries, "logseries"},
+ {&__pyx_n_matrixmultiply, "matrixmultiply"},
+ {&__pyx_n_multinomial, "multinomial"},
+ {&__pyx_n_multiply, "multiply"},
+ {&__pyx_n_multivariate_normal, "multivariate_normal"},
+ {&__pyx_n_negative_binomial, "negative_binomial"},
+ {&__pyx_n_noncentral_chisquare, "noncentral_chisquare"},
+ {&__pyx_n_noncentral_f, "noncentral_f"},
+ {&__pyx_n_normal, "normal"},
+ {&__pyx_n_pareto, "pareto"},
+ {&__pyx_n_permutation, "permutation"},
+ {&__pyx_n_poisson, "poisson"},
+ {&__pyx_n_power, "power"},
+ {&__pyx_n_rand, "rand"},
+ {&__pyx_n_randint, "randint"},
+ {&__pyx_n_randn, "randn"},
+ {&__pyx_n_random, "random"},
+ {&__pyx_n_random_integers, "random_integers"},
+ {&__pyx_n_random_sample, "random_sample"},
+ {&__pyx_n_rayleigh, "rayleigh"},
+ {&__pyx_n_reduce, "reduce"},
+ {&__pyx_n_scipy, "scipy"},
+ {&__pyx_n_seed, "seed"},
+ {&__pyx_n_set_state, "set_state"},
+ {&__pyx_n_shape, "shape"},
+ {&__pyx_n_shuffle, "shuffle"},
+ {&__pyx_n_size, "size"},
+ {&__pyx_n_sqrt, "sqrt"},
+ {&__pyx_n_standard_cauchy, "standard_cauchy"},
+ {&__pyx_n_standard_exponential, "standard_exponential"},
+ {&__pyx_n_standard_gamma, "standard_gamma"},
+ {&__pyx_n_standard_normal, "standard_normal"},
+ {&__pyx_n_standard_t, "standard_t"},
+ {&__pyx_n_svd, "svd"},
+ {&__pyx_n_triangular, "triangular"},
+ {&__pyx_n_tuple, "tuple"},
+ {&__pyx_n_type, "type"},
+ {&__pyx_n_uniform, "uniform"},
+ {&__pyx_n_vonmises, "vonmises"},
+ {&__pyx_n_wald, "wald"},
+ {&__pyx_n_weibull, "weibull"},
+ {&__pyx_n_zeros, "zeros"},
+ {&__pyx_n_zipf, "zipf"},
+ {0, 0}
+};
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_k62p, __pyx_k62, sizeof(__pyx_k62)},
+ {&__pyx_k63p, __pyx_k63, sizeof(__pyx_k63)},
+ {&__pyx_k64p, __pyx_k64, sizeof(__pyx_k64)},
+ {&__pyx_k66p, __pyx_k66, sizeof(__pyx_k66)},
+ {&__pyx_k67p, __pyx_k67, sizeof(__pyx_k67)},
+ {&__pyx_k68p, __pyx_k68, sizeof(__pyx_k68)},
+ {&__pyx_k69p, __pyx_k69, sizeof(__pyx_k69)},
+ {&__pyx_k70p, __pyx_k70, sizeof(__pyx_k70)},
+ {&__pyx_k71p, __pyx_k71, sizeof(__pyx_k71)},
+ {&__pyx_k72p, __pyx_k72, sizeof(__pyx_k72)},
+ {&__pyx_k73p, __pyx_k73, sizeof(__pyx_k73)},
+ {&__pyx_k74p, __pyx_k74, sizeof(__pyx_k74)},
+ {&__pyx_k75p, __pyx_k75, sizeof(__pyx_k75)},
+ {&__pyx_k76p, __pyx_k76, sizeof(__pyx_k76)},
+ {&__pyx_k77p, __pyx_k77, sizeof(__pyx_k77)},
+ {&__pyx_k78p, __pyx_k78, sizeof(__pyx_k78)},
+ {&__pyx_k79p, __pyx_k79, sizeof(__pyx_k79)},
+ {&__pyx_k80p, __pyx_k80, sizeof(__pyx_k80)},
+ {&__pyx_k81p, __pyx_k81, sizeof(__pyx_k81)},
+ {&__pyx_k82p, __pyx_k82, sizeof(__pyx_k82)},
+ {&__pyx_k83p, __pyx_k83, sizeof(__pyx_k83)},
+ {&__pyx_k84p, __pyx_k84, sizeof(__pyx_k84)},
+ {&__pyx_k85p, __pyx_k85, sizeof(__pyx_k85)},
+ {&__pyx_k86p, __pyx_k86, sizeof(__pyx_k86)},
+ {&__pyx_k87p, __pyx_k87, sizeof(__pyx_k87)},
+ {&__pyx_k88p, __pyx_k88, sizeof(__pyx_k88)},
+ {&__pyx_k89p, __pyx_k89, sizeof(__pyx_k89)},
+ {&__pyx_k90p, __pyx_k90, sizeof(__pyx_k90)},
+ {&__pyx_k91p, __pyx_k91, sizeof(__pyx_k91)},
+ {&__pyx_k92p, __pyx_k92, sizeof(__pyx_k92)},
+ {&__pyx_k93p, __pyx_k93, sizeof(__pyx_k93)},
+ {&__pyx_k94p, __pyx_k94, sizeof(__pyx_k94)},
+ {&__pyx_k95p, __pyx_k95, sizeof(__pyx_k95)},
+ {&__pyx_k96p, __pyx_k96, sizeof(__pyx_k96)},
+ {&__pyx_k97p, __pyx_k97, sizeof(__pyx_k97)},
+ {&__pyx_k98p, __pyx_k98, sizeof(__pyx_k98)},
+ {&__pyx_k99p, __pyx_k99, sizeof(__pyx_k99)},
+ {&__pyx_k100p, __pyx_k100, sizeof(__pyx_k100)},
+ {&__pyx_k101p, __pyx_k101, sizeof(__pyx_k101)},
+ {&__pyx_k102p, __pyx_k102, sizeof(__pyx_k102)},
+ {&__pyx_k103p, __pyx_k103, sizeof(__pyx_k103)},
+ {&__pyx_k104p, __pyx_k104, sizeof(__pyx_k104)},
+ {&__pyx_k105p, __pyx_k105, sizeof(__pyx_k105)},
+ {&__pyx_k106p, __pyx_k106, sizeof(__pyx_k106)},
+ {&__pyx_k107p, __pyx_k107, sizeof(__pyx_k107)},
+ {&__pyx_k108p, __pyx_k108, sizeof(__pyx_k108)},
+ {&__pyx_k109p, __pyx_k109, sizeof(__pyx_k109)},
+ {&__pyx_k110p, __pyx_k110, sizeof(__pyx_k110)},
+ {&__pyx_k111p, __pyx_k111, sizeof(__pyx_k111)},
+ {&__pyx_k112p, __pyx_k112, sizeof(__pyx_k112)},
+ {&__pyx_k113p, __pyx_k113, sizeof(__pyx_k113)},
+ {&__pyx_k114p, __pyx_k114, sizeof(__pyx_k114)},
+ {&__pyx_k115p, __pyx_k115, sizeof(__pyx_k115)},
+ {&__pyx_k117p, __pyx_k117, sizeof(__pyx_k117)},
+ {0, 0, 0}
+};
+
+static PyObject *__pyx_tp_new_6mtrand_RandomState(PyTypeObject *t, PyObject *a, PyObject *k) {
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ struct __pyx_obj_6mtrand_RandomState *p = (struct __pyx_obj_6mtrand_RandomState *)o;
+ return o;
+}
+
+static void __pyx_tp_dealloc_6mtrand_RandomState(PyObject *o) {
+ {
+ PyObject *etype, *eval, *etb;
+ PyErr_Fetch(&etype, &eval, &etb);
+ ++o->ob_refcnt;
+ __pyx_f_6mtrand_11RandomState___dealloc__(o);
+ if (PyErr_Occurred()) PyErr_WriteUnraisable(o);
+ --o->ob_refcnt;
+ PyErr_Restore(etype, eval, etb);
+ }
+ (*o->ob_type->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6mtrand_RandomState(PyObject *o, visitproc v, void *a) {
+ return 0;
+}
+
+static int __pyx_tp_clear_6mtrand_RandomState(PyObject *o) {
+ return 0;
+}
+
+static struct PyMethodDef __pyx_methods_6mtrand_RandomState[] = {
+ {"seed", (PyCFunction)__pyx_f_6mtrand_11RandomState_seed, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_seed},
+ {"get_state", (PyCFunction)__pyx_f_6mtrand_11RandomState_get_state, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_get_state},
+ {"set_state", (PyCFunction)__pyx_f_6mtrand_11RandomState_set_state, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_set_state},
+ {"__getstate__", (PyCFunction)__pyx_f_6mtrand_11RandomState___getstate__, METH_VARARGS|METH_KEYWORDS, 0},
+ {"__setstate__", (PyCFunction)__pyx_f_6mtrand_11RandomState___setstate__, METH_VARARGS|METH_KEYWORDS, 0},
+ {"__reduce__", (PyCFunction)__pyx_f_6mtrand_11RandomState___reduce__, METH_VARARGS|METH_KEYWORDS, 0},
+ {"random_sample", (PyCFunction)__pyx_f_6mtrand_11RandomState_random_sample, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_random_sample},
+ {"tomaxint", (PyCFunction)__pyx_f_6mtrand_11RandomState_tomaxint, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_tomaxint},
+ {"randint", (PyCFunction)__pyx_f_6mtrand_11RandomState_randint, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_randint},
+ {"bytes", (PyCFunction)__pyx_f_6mtrand_11RandomState_bytes, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_bytes},
+ {"uniform", (PyCFunction)__pyx_f_6mtrand_11RandomState_uniform, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_uniform},
+ {"rand", (PyCFunction)__pyx_f_6mtrand_11RandomState_rand, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_rand},
+ {"randn", (PyCFunction)__pyx_f_6mtrand_11RandomState_randn, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_randn},
+ {"random_integers", (PyCFunction)__pyx_f_6mtrand_11RandomState_random_integers, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_random_integers},
+ {"standard_normal", (PyCFunction)__pyx_f_6mtrand_11RandomState_standard_normal, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_standard_normal},
+ {"normal", (PyCFunction)__pyx_f_6mtrand_11RandomState_normal, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_normal},
+ {"beta", (PyCFunction)__pyx_f_6mtrand_11RandomState_beta, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_beta},
+ {"exponential", (PyCFunction)__pyx_f_6mtrand_11RandomState_exponential, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_exponential},
+ {"standard_exponential", (PyCFunction)__pyx_f_6mtrand_11RandomState_standard_exponential, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_standard_exponential},
+ {"standard_gamma", (PyCFunction)__pyx_f_6mtrand_11RandomState_standard_gamma, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_standard_gamma},
+ {"gamma", (PyCFunction)__pyx_f_6mtrand_11RandomState_gamma, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_gamma},
+ {"f", (PyCFunction)__pyx_f_6mtrand_11RandomState_f, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_f},
+ {"noncentral_f", (PyCFunction)__pyx_f_6mtrand_11RandomState_noncentral_f, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_noncentral_f},
+ {"chisquare", (PyCFunction)__pyx_f_6mtrand_11RandomState_chisquare, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_chisquare},
+ {"noncentral_chisquare", (PyCFunction)__pyx_f_6mtrand_11RandomState_noncentral_chisquare, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_noncentral_chisquare},
+ {"standard_cauchy", (PyCFunction)__pyx_f_6mtrand_11RandomState_standard_cauchy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_standard_cauchy},
+ {"standard_t", (PyCFunction)__pyx_f_6mtrand_11RandomState_standard_t, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_standard_t},
+ {"vonmises", (PyCFunction)__pyx_f_6mtrand_11RandomState_vonmises, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_vonmises},
+ {"pareto", (PyCFunction)__pyx_f_6mtrand_11RandomState_pareto, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_pareto},
+ {"weibull", (PyCFunction)__pyx_f_6mtrand_11RandomState_weibull, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_weibull},
+ {"power", (PyCFunction)__pyx_f_6mtrand_11RandomState_power, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_power},
+ {"laplace", (PyCFunction)__pyx_f_6mtrand_11RandomState_laplace, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_laplace},
+ {"gumbel", (PyCFunction)__pyx_f_6mtrand_11RandomState_gumbel, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_gumbel},
+ {"logistic", (PyCFunction)__pyx_f_6mtrand_11RandomState_logistic, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_logistic},
+ {"lognormal", (PyCFunction)__pyx_f_6mtrand_11RandomState_lognormal, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_lognormal},
+ {"rayleigh", (PyCFunction)__pyx_f_6mtrand_11RandomState_rayleigh, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_rayleigh},
+ {"wald", (PyCFunction)__pyx_f_6mtrand_11RandomState_wald, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_wald},
+ {"triangular", (PyCFunction)__pyx_f_6mtrand_11RandomState_triangular, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_triangular},
+ {"binomial", (PyCFunction)__pyx_f_6mtrand_11RandomState_binomial, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_binomial},
+ {"negative_binomial", (PyCFunction)__pyx_f_6mtrand_11RandomState_negative_binomial, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_negative_binomial},
+ {"poisson", (PyCFunction)__pyx_f_6mtrand_11RandomState_poisson, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_poisson},
+ {"zipf", (PyCFunction)__pyx_f_6mtrand_11RandomState_zipf, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_zipf},
+ {"geometric", (PyCFunction)__pyx_f_6mtrand_11RandomState_geometric, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_geometric},
+ {"hypergeometric", (PyCFunction)__pyx_f_6mtrand_11RandomState_hypergeometric, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_hypergeometric},
+ {"logseries", (PyCFunction)__pyx_f_6mtrand_11RandomState_logseries, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_logseries},
+ {"multivariate_normal", (PyCFunction)__pyx_f_6mtrand_11RandomState_multivariate_normal, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_multivariate_normal},
+ {"multinomial", (PyCFunction)__pyx_f_6mtrand_11RandomState_multinomial, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_multinomial},
+ {"shuffle", (PyCFunction)__pyx_f_6mtrand_11RandomState_shuffle, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_shuffle},
+ {"permutation", (PyCFunction)__pyx_f_6mtrand_11RandomState_permutation, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6mtrand_11RandomState_permutation},
+ {0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_RandomState = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_RandomState = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_RandomState = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_RandomState = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+statichere PyTypeObject __pyx_type_6mtrand_RandomState = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "mtrand.RandomState", /*tp_name*/
+ sizeof(struct __pyx_obj_6mtrand_RandomState), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6mtrand_RandomState, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_RandomState, /*tp_as_number*/
+ &__pyx_tp_as_sequence_RandomState, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_RandomState, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_RandomState, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ "Container for the Mersenne Twister PRNG.\n\n Constructor\n -----------\n RandomState(seed=None): initializes the PRNG with the given seed. See the\n seed() method for details.\n\n Distribution Methods\n -----------------\n RandomState exposes a number of methods for generating random numbers drawn\n from a variety of probability distributions. In addition to the\n distribution-specific arguments, each method takes a keyword argument\n size=None. If size is None, then a single value is generated and returned.\n If size is an integer, then a 1-D scipy array filled with generated values\n is returned. If size is a tuple, then a scipy array with that shape is\n filled and returned.\n ", /*tp_doc*/
+ __pyx_tp_traverse_6mtrand_RandomState, /*tp_traverse*/
+ __pyx_tp_clear_6mtrand_RandomState, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6mtrand_RandomState, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_f_6mtrand_11RandomState___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6mtrand_RandomState, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+
+static struct PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+DL_EXPORT(void) initmtrand(void); /*proto*/
+DL_EXPORT(void) initmtrand(void) {
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ __pyx_m = Py_InitModule4("mtrand", __pyx_methods, 0, 0, PYTHON_API_VERSION);
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;};
+ __pyx_b = PyImport_AddModule("__builtin__");
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;};
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;};
+ if (__Pyx_InternStrings(__pyx_intern_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;};
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;};
+ __pyx_ptype_6mtrand_dtypedescr = __Pyx_ImportType("scipy", "dtypedescr", sizeof(PyArray_Descr)); if (!__pyx_ptype_6mtrand_dtypedescr) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 32; goto __pyx_L1;}
+ __pyx_ptype_6mtrand_ndarray = __Pyx_ImportType("scipy", "ndarray", sizeof(PyArrayObject)); if (!__pyx_ptype_6mtrand_ndarray) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 36; goto __pyx_L1;}
+ if (PyType_Ready(&__pyx_type_6mtrand_RandomState) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 266; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "RandomState", (PyObject *)&__pyx_type_6mtrand_RandomState) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 266; goto __pyx_L1;}
+ __pyx_ptype_6mtrand_RandomState = &__pyx_type_6mtrand_RandomState;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":118 */
+ import_array();
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":120 */
+ __pyx_1 = __Pyx_Import(__pyx_n_scipy, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n__sp, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":286 */
+ Py_INCREF(Py_None);
+ __pyx_k2 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":295 */
+ Py_INCREF(Py_None);
+ __pyx_k3 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":356 */
+ Py_INCREF(Py_None);
+ __pyx_k4 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":363 */
+ Py_INCREF(Py_None);
+ __pyx_k5 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":370 */
+ Py_INCREF(Py_None);
+ __pyx_k6 = Py_None;
+ Py_INCREF(Py_None);
+ __pyx_k7 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":416 */
+ __pyx_k8 = 0.0;
+ __pyx_k9 = 1.0;
+ Py_INCREF(Py_None);
+ __pyx_k10 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":446 */
+ Py_INCREF(Py_None);
+ __pyx_k11 = Py_None;
+ Py_INCREF(Py_None);
+ __pyx_k12 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":459 */
+ Py_INCREF(Py_None);
+ __pyx_k13 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":466 */
+ __pyx_k14 = 0.0;
+ __pyx_k15 = 1.0;
+ Py_INCREF(Py_None);
+ __pyx_k16 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":475 */
+ Py_INCREF(Py_None);
+ __pyx_k17 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":486 */
+ __pyx_k18 = 1.0;
+ Py_INCREF(Py_None);
+ __pyx_k19 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":495 */
+ Py_INCREF(Py_None);
+ __pyx_k20 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":502 */
+ Py_INCREF(Py_None);
+ __pyx_k21 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":511 */
+ __pyx_k22 = 1.0;
+ Py_INCREF(Py_None);
+ __pyx_k23 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":522 */
+ Py_INCREF(Py_None);
+ __pyx_k24 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":533 */
+ Py_INCREF(Py_None);
+ __pyx_k25 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":547 */
+ Py_INCREF(Py_None);
+ __pyx_k26 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":556 */
+ Py_INCREF(Py_None);
+ __pyx_k27 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":568 */
+ Py_INCREF(Py_None);
+ __pyx_k28 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":575 */
+ Py_INCREF(Py_None);
+ __pyx_k29 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":584 */
+ Py_INCREF(Py_None);
+ __pyx_k30 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":594 */
+ Py_INCREF(Py_None);
+ __pyx_k31 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":603 */
+ Py_INCREF(Py_None);
+ __pyx_k32 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":612 */
+ Py_INCREF(Py_None);
+ __pyx_k33 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":621 */
+ __pyx_k34 = 0.0;
+ __pyx_k35 = 1.0;
+ Py_INCREF(Py_None);
+ __pyx_k36 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":630 */
+ __pyx_k37 = 0.0;
+ __pyx_k38 = 1.0;
+ Py_INCREF(Py_None);
+ __pyx_k39 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":639 */
+ __pyx_k40 = 0.0;
+ __pyx_k41 = 1.0;
+ Py_INCREF(Py_None);
+ __pyx_k42 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":648 */
+ __pyx_k43 = 0.0;
+ __pyx_k44 = 1.0;
+ Py_INCREF(Py_None);
+ __pyx_k45 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":662 */
+ __pyx_k46 = 1.0;
+ Py_INCREF(Py_None);
+ __pyx_k47 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":671 */
+ Py_INCREF(Py_None);
+ __pyx_k48 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":682 */
+ Py_INCREF(Py_None);
+ __pyx_k49 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":698 */
+ Py_INCREF(Py_None);
+ __pyx_k50 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":711 */
+ Py_INCREF(Py_None);
+ __pyx_k51 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":725 */
+ __pyx_k52 = 1.0;
+ Py_INCREF(Py_None);
+ __pyx_k53 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":734 */
+ Py_INCREF(Py_None);
+ __pyx_k54 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":743 */
+ Py_INCREF(Py_None);
+ __pyx_k55 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":755 */
+ Py_INCREF(Py_None);
+ __pyx_k56 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":776 */
+ Py_INCREF(Py_None);
+ __pyx_k57 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":788 */
+ Py_INCREF(Py_None);
+ __pyx_k58 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":846 */
+ Py_INCREF(Py_None);
+ __pyx_k59 = Py_None;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":924 */
+ __pyx_1 = PyTuple_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 924; goto __pyx_L1;}
+ __pyx_2 = PyObject_CallObject(((PyObject*)__pyx_ptype_6mtrand_RandomState), __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 924; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n__rand, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 924; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":925 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 925; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_seed); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 925; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_seed, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 925; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":926 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 926; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_get_state); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 926; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_get_state, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 926; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":927 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 927; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_set_state); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 927; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_set_state, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 927; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":928 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 928; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_random_sample); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 928; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_random_sample, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 928; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":929 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_randint); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_randint, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":930 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 930; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_bytes); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 930; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_bytes, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 930; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":931 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 931; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_uniform); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 931; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_uniform, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 931; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":932 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_rand); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_rand, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":933 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 933; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_randn); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 933; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_randn, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 933; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":934 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 934; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_random_integers); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 934; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_random_integers, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 934; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":935 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 935; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_standard_normal); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 935; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_standard_normal, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 935; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":936 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_normal); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_normal, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":937 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 937; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_beta); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 937; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_beta, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 937; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":938 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 938; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_exponential); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 938; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_exponential, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 938; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":939 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 939; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_standard_exponential); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 939; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_standard_exponential, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 939; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":940 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 940; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_standard_gamma); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 940; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_standard_gamma, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 940; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":941 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 941; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_gamma); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 941; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_gamma, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 941; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":942 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 942; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_f); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 942; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_f, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 942; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":943 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 943; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_noncentral_f); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 943; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_noncentral_f, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 943; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":944 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_chisquare); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_chisquare, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":945 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 945; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_noncentral_chisquare); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 945; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_noncentral_chisquare, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 945; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":946 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_standard_cauchy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_standard_cauchy, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":947 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 947; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_standard_t); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 947; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_standard_t, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 947; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":948 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 948; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_vonmises); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 948; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_vonmises, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 948; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":949 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 949; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_pareto); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 949; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_pareto, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 949; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":950 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 950; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_weibull); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 950; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_weibull, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 950; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":951 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 951; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_power); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 951; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_power, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 951; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":952 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 952; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_laplace); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 952; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_laplace, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 952; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":953 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 953; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_gumbel); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 953; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_gumbel, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 953; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":954 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 954; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_logistic); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 954; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_logistic, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 954; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":955 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 955; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_lognormal); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 955; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_lognormal, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 955; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":956 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_rayleigh); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_rayleigh, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":957 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_wald); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_wald, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":958 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 958; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_triangular); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 958; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_triangular, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 958; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":960 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_binomial); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_binomial, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":961 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 961; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_negative_binomial); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 961; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_negative_binomial, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 961; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":962 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 962; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_poisson); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 962; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_poisson, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 962; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":963 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 963; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_zipf); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 963; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_zipf, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 963; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":964 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 964; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_geometric); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 964; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_geometric, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 964; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":965 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 965; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_hypergeometric); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 965; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_hypergeometric, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 965; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":966 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_logseries); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_logseries, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":968 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 968; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_multivariate_normal); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 968; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_multivariate_normal, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 968; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":969 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 969; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_multinomial); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 969; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_multinomial, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 969; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":971 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 971; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_shuffle); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 971; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_shuffle, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 971; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/oliphant/core/scipy/lib/mtrand/mtrand.pyx":972 */
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__rand); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_permutation); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_permutation, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ return;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("mtrand");
+}
+
+static char *__pyx_filenames[] = {
+ "mtrand.pyx",
+ "scipy.pxi",
+};
+statichere char **__pyx_f = __pyx_filenames;
+
+/* Runtime support code */
+
+static int __Pyx_GetStarArgs(
+ PyObject **args,
+ PyObject **kwds,
+ char *kwd_list[],
+ int nargs,
+ PyObject **args2,
+ PyObject **kwds2)
+{
+ PyObject *x = 0, *args1 = 0, *kwds1 = 0;
+
+ if (args2)
+ *args2 = 0;
+ if (kwds2)
+ *kwds2 = 0;
+
+ if (args2) {
+ args1 = PyTuple_GetSlice(*args, 0, nargs);
+ if (!args1)
+ goto bad;
+ *args2 = PyTuple_GetSlice(*args, nargs, PyTuple_Size(*args));
+ if (!*args2)
+ goto bad;
+ }
+ else {
+ args1 = *args;
+ Py_INCREF(args1);
+ }
+
+ if (kwds2) {
+ if (*kwds) {
+ char **p;
+ kwds1 = PyDict_New();
+ if (!kwds)
+ goto bad;
+ *kwds2 = PyDict_Copy(*kwds);
+ if (!*kwds2)
+ goto bad;
+ for (p = kwd_list; *p; p++) {
+ x = PyDict_GetItemString(*kwds, *p);
+ if (x) {
+ if (PyDict_SetItemString(kwds1, *p, x) < 0)
+ goto bad;
+ if (PyDict_DelItemString(*kwds2, *p) < 0)
+ goto bad;
+ }
+ }
+ }
+ else {
+ *kwds2 = PyDict_New();
+ if (!*kwds2)
+ goto bad;
+ }
+ }
+ else {
+ kwds1 = *kwds;
+ Py_XINCREF(kwds1);
+ }
+
+ *args = args1;
+ *kwds = kwds1;
+ return 0;
+bad:
+ Py_XDECREF(args1);
+ Py_XDECREF(kwds1);
+ Py_XDECREF(*args2);
+ Py_XDECREF(*kwds2);
+ return -1;
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
+ PyObject *__import__ = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ __import__ = PyObject_GetAttrString(__pyx_b, "__import__");
+ if (!__import__)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ module = PyObject_CallFunction(__import__, "OOOO",
+ name, global_dict, empty_dict, list);
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(__import__);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+static void __Pyx_WriteUnraisable(char *name) {
+ PyObject *old_exc, *old_val, *old_tb;
+ PyObject *ctx;
+ PyErr_Fetch(&old_exc, &old_val, &old_tb);
+ ctx = PyString_FromString(name);
+ PyErr_Restore(old_exc, old_val, old_tb);
+ if (!ctx)
+ ctx = Py_None;
+ PyErr_WriteUnraisable(ctx);
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ /* Next, repeatedly, replace a tuple exception with its first item */
+ while (PyTuple_Check(type) && PyTuple_Size(type) > 0) {
+ PyObject *tmp = type;
+ type = PyTuple_GET_ITEM(type, 0);
+ Py_INCREF(type);
+ Py_DECREF(tmp);
+ }
+ if (PyString_Check(type))
+ ;
+ else if (PyClass_Check(type))
+ ; /*PyErr_NormalizeException(&type, &value, &tb);*/
+ else if (PyInstance_Check(type)) {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ else {
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ }
+ else {
+ /* Not something you can raise. You get an exception
+ anyway, just not what you specified :-) */
+ PyErr_Format(PyExc_TypeError,
+ "exceptions must be strings, classes, or "
+ "instances, not %s", type->ob_type->tp_name);
+ goto raise_error;
+ }
+ PyErr_Restore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+static void __Pyx_UnpackError(void) {
+ PyErr_SetString(PyExc_ValueError, "unpack sequence of wrong size");
+}
+
+static PyObject *__Pyx_UnpackItem(PyObject *seq, int i) {
+ PyObject *item = PySequence_GetItem(seq, i);
+ if (!item) {
+ if (PyErr_ExceptionMatches(PyExc_IndexError))
+ __Pyx_UnpackError();
+ }
+ return item;
+}
+
+static int __Pyx_EndUnpack(PyObject *seq, int i) {
+ PyObject *item = PySequence_GetItem(seq, i);
+ if (item) {
+ Py_DECREF(item);
+ __Pyx_UnpackError();
+ return -1;
+ }
+ PyErr_Clear();
+ return 0;
+}
+
+static int __Pyx_InternStrings(__Pyx_InternTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_InternFromString(t->s);
+ if (!*t->p)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ if (!*t->p)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name,
+ long size)
+{
+ PyObject *py_module_name = 0;
+ PyObject *py_class_name = 0;
+ PyObject *py_name_list = 0;
+ PyObject *py_module = 0;
+ PyObject *result = 0;
+
+ py_module_name = PyString_FromString(module_name);
+ if (!py_module_name)
+ goto bad;
+ py_class_name = PyString_FromString(class_name);
+ if (!py_class_name)
+ goto bad;
+ py_name_list = PyList_New(1);
+ if (!py_name_list)
+ goto bad;
+ Py_INCREF(py_class_name);
+ if (PyList_SetItem(py_name_list, 0, py_class_name) < 0)
+ goto bad;
+ py_module = __Pyx_Import(py_module_name, py_name_list);
+ if (!py_module)
+ goto bad;
+ result = PyObject_GetAttr(py_module, py_class_name);
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s.%s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+ if (((PyTypeObject *)result)->tp_basicsize != size) {
+ PyErr_Format(PyExc_ValueError,
+ "%s.%s does not appear to be the correct type object",
+ module_name, class_name);
+ goto bad;
+ }
+ goto done;
+bad:
+ Py_XDECREF(result);
+ result = 0;
+done:
+ Py_XDECREF(py_module_name);
+ Py_XDECREF(py_class_name);
+ Py_XDECREF(py_name_list);
+ return (PyTypeObject *)result;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyObject *empty_tuple = 0;
+ PyObject *empty_string = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ py_srcfile = PyString_FromString(__pyx_filename);
+ if (!py_srcfile) goto bad;
+ py_funcname = PyString_FromString(funcname);
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ empty_tuple = PyTuple_New(0);
+ if (!empty_tuple) goto bad;
+ empty_string = PyString_FromString("");
+ if (!empty_string) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ empty_string, /*PyObject *code,*/
+ empty_tuple, /*PyObject *consts,*/
+ empty_tuple, /*PyObject *names,*/
+ empty_tuple, /*PyObject *varnames,*/
+ empty_tuple, /*PyObject *freevars,*/
+ empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ empty_string /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_Get(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(empty_tuple);
+ Py_XDECREF(empty_string);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
new file mode 100644
index 000000000..bf20ac913
--- /dev/null
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -0,0 +1,972 @@
+# mtrand.pyx -- A Pyrex wrapper of Jean-Sebastien Roy's RandomKit
+#
+# Copyright 2005 Robert Kern (robert.kern@gmail.com)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+include "Python.pxi"
+include "scipy.pxi"
+
+cdef extern from "math.h":
+ double exp(double x)
+ double log(double x)
+ double floor(double x)
+ double sin(double x)
+ double cos(double x)
+
+cdef extern from "randomkit.h":
+
+ ctypedef struct rk_state:
+ unsigned long key[624]
+ int pos
+
+ ctypedef enum rk_error:
+ RK_NOERR = 0
+ RK_ENODEV = 1
+ RK_ERR_MAX = 2
+
+ char *rk_strerror[2]
+
+ # 0xFFFFFFFFUL
+ unsigned long RK_MAX
+
+ void rk_seed(unsigned long seed, rk_state *state)
+ rk_error rk_randomseed(rk_state *state)
+ unsigned long rk_random(rk_state *state)
+ long rk_long(rk_state *state)
+ unsigned long rk_ulong(rk_state *state)
+ unsigned long rk_interval(unsigned long max, rk_state *state)
+ double rk_double(rk_state *state)
+ void rk_fill(void *buffer, size_t size, rk_state *state)
+ rk_error rk_devfill(void *buffer, size_t size, int strong)
+ rk_error rk_altfill(void *buffer, size_t size, int strong,
+ rk_state *state)
+ double rk_gauss(rk_state *state)
+
+cdef extern from "distributions.h":
+
+ double rk_normal(rk_state *state, double loc, double scale)
+ double rk_standard_exponential(rk_state *state)
+ double rk_exponential(rk_state *state, double scale)
+ double rk_uniform(rk_state *state, double loc, double scale)
+ double rk_standard_gamma(rk_state *state, double shape)
+ double rk_gamma(rk_state *state, double shape, double scale)
+ double rk_beta(rk_state *state, double a, double b)
+ double rk_chisquare(rk_state *state, double df)
+ double rk_noncentral_chisquare(rk_state *state, double df, double nonc)
+ double rk_f(rk_state *state, double dfnum, double dfden)
+ double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc)
+ double rk_standard_cauchy(rk_state *state)
+ double rk_standard_t(rk_state *state, double df)
+ double rk_vonmises(rk_state *state, double mu, double kappa)
+ double rk_pareto(rk_state *state, double a)
+ double rk_weibull(rk_state *state, double a)
+ double rk_power(rk_state *state, double a)
+ double rk_laplace(rk_state *state, double loc, double scale)
+ double rk_gumbel(rk_state *state, double loc, double scale)
+ double rk_logistic(rk_state *state, double loc, double scale)
+ double rk_lognormal(rk_state *state, double mode, double sigma)
+ double rk_rayleigh(rk_state *state, double mode)
+ double rk_wald(rk_state *state, double mean, double scale)
+ double rk_triangular(rk_state *state, double left, double mode, double right)
+
+ long rk_binomial(rk_state *state, long n, double p)
+ long rk_binomial_btpe(rk_state *state, long n, double p)
+ long rk_binomial_inversion(rk_state *state, long n, double p)
+ long rk_negative_binomial(rk_state *state, long n, double p)
+ long rk_poisson(rk_state *state, double lam)
+ long rk_poisson_mult(rk_state *state, double lam)
+ long rk_poisson_ptrs(rk_state *state, double lam)
+ long rk_zipf(rk_state *state, double a)
+ long rk_geometric(rk_state *state, double p)
+ long rk_hypergeometric(rk_state *state, long good, long bad, long sample)
+ long rk_logseries(rk_state *state, double p)
+
+ctypedef double (* rk_cont0)(rk_state *state)
+ctypedef double (* rk_cont1)(rk_state *state, double a)
+ctypedef double (* rk_cont2)(rk_state *state, double a, double b)
+ctypedef double (* rk_cont3)(rk_state *state, double a, double b, double c)
+
+ctypedef long (* rk_disc0)(rk_state *state)
+ctypedef long (* rk_discnp)(rk_state *state, long n, double p)
+ctypedef long (* rk_discnmN)(rk_state *state, long n, long m, long N)
+ctypedef long (* rk_discd)(rk_state *state, double a)
+
+
+cdef extern from "initarray.h":
+ void init_by_array(rk_state *self, unsigned long *init_key,
+ unsigned long key_length)
+
+# Initialize scipy
+import_array()
+
+import scipy as _sp
+
+cdef object cont0_array(rk_state *state, rk_cont0 func, object size):
+ cdef double *array_data
+ cdef ndarray array "arrayObject"
+ cdef long length
+ cdef long i
+
+ if size is None:
+ return func(state)
+ else:
+ array = <ndarray>_sp.empty(size, _sp.Float64)
+ length = PyArray_SIZE(array)
+ array_data = <double *>array.data
+ for i from 0 <= i < length:
+ array_data[i] = func(state)
+ return array
+
+cdef object cont1_array(rk_state *state, rk_cont1 func, object size, double a):
+ cdef double *array_data
+ cdef ndarray array "arrayObject"
+ cdef long length
+ cdef long i
+
+ if size is None:
+ return func(state, a)
+ else:
+ array = <ndarray>_sp.empty(size, _sp.Float64)
+ length = PyArray_SIZE(array)
+ array_data = <double *>array.data
+ for i from 0 <= i < length:
+ array_data[i] = func(state, a)
+ return array
+
+cdef object cont2_array(rk_state *state, rk_cont2 func, object size, double a,
+ double b):
+ cdef double *array_data
+ cdef ndarray array "arrayObject"
+ cdef long length
+ cdef long i
+
+ if size is None:
+ return func(state, a, b)
+ else:
+ array = <ndarray>_sp.empty(size, _sp.Float64)
+ length = PyArray_SIZE(array)
+ array_data = <double *>array.data
+ for i from 0 <= i < length:
+ array_data[i] = func(state, a, b)
+ return array
+
+cdef object cont3_array(rk_state *state, rk_cont3 func, object size, double a,
+ double b, double c):
+
+ cdef double *array_data
+ cdef ndarray array "arrayObject"
+ cdef long length
+ cdef long i
+
+ if size is None:
+ return func(state, a, b, c)
+ else:
+ array = <ndarray>_sp.empty(size, _sp.Float64)
+ length = PyArray_SIZE(array)
+ array_data = <double *>array.data
+ for i from 0 <= i < length:
+ array_data[i] = func(state, a, b, c)
+ return array
+
+cdef object disc0_array(rk_state *state, rk_disc0 func, object size):
+ cdef long *array_data
+ cdef ndarray array "arrayObject"
+ cdef long length
+ cdef long i
+
+ if size is None:
+ return func(state)
+ else:
+ array = <ndarray>_sp.empty(size, _sp.Int)
+ length = PyArray_SIZE(array)
+ array_data = <long *>array.data
+ for i from 0 <= i < length:
+ array_data[i] = func(state)
+ return array
+
+cdef object discnp_array(rk_state *state, rk_discnp func, object size, long n, double p):
+ cdef long *array_data
+ cdef ndarray array "arrayObject"
+ cdef long length
+ cdef long i
+
+ if size is None:
+ return func(state, n, p)
+ else:
+ array = <ndarray>_sp.empty(size, _sp.Int)
+ length = PyArray_SIZE(array)
+ array_data = <long *>array.data
+ for i from 0 <= i < length:
+ array_data[i] = func(state, n, p)
+ return array
+
+cdef object discnmN_array(rk_state *state, rk_discnmN func, object size,
+ long n, long m, long N):
+ cdef long *array_data
+ cdef ndarray array "arrayObject"
+ cdef long length
+ cdef long i
+
+ if size is None:
+ return func(state, n, m, N)
+ else:
+ array = <ndarray>_sp.empty(size, _sp.Int)
+ length = PyArray_SIZE(array)
+ array_data = <long *>array.data
+ for i from 0 <= i < length:
+ array_data[i] = func(state, n, m, N)
+ return array
+
+cdef object discd_array(rk_state *state, rk_discd func, object size, double a):
+ cdef long *array_data
+ cdef ndarray array "arrayObject"
+ cdef long length
+ cdef long i
+
+ if size is None:
+ return func(state, a)
+ else:
+ array = <ndarray>_sp.empty(size, _sp.Int)
+ length = PyArray_SIZE(array)
+ array_data = <long *>array.data
+ for i from 0 <= i < length:
+ array_data[i] = func(state, a)
+ return array
+
+cdef double kahan_sum(double *darr, long n):
+ cdef double c, y, t, sum
+ cdef long i
+ sum = darr[0]
+ c = 0.0
+ for i from 1 <= i < n:
+ y = darr[i] - c
+ t = sum + y
+ c = (t-sum) - y
+ sum = t
+ return sum
+
+cdef class RandomState:
+ """Container for the Mersenne Twister PRNG.
+
+ Constructor
+ -----------
+ RandomState(seed=None): initializes the PRNG with the given seed. See the
+ seed() method for details.
+
+ Distribution Methods
+ -----------------
+ RandomState exposes a number of methods for generating random numbers drawn
+ from a variety of probability distributions. In addition to the
+ distribution-specific arguments, each method takes a keyword argument
+ size=None. If size is None, then a single value is generated and returned.
+ If size is an integer, then a 1-D scipy array filled with generated values
+ is returned. If size is a tuple, then a scipy array with that shape is
+ filled and returned.
+ """
+ cdef rk_state *internal_state
+
+ def __init__(self, seed=None):
+ self.internal_state = <rk_state*>PyMem_Malloc(sizeof(rk_state))
+
+ self.seed(seed)
+
+ def __dealloc__(self):
+ if self.internal_state != NULL:
+ PyMem_Free(self.internal_state)
+
+ def seed(self, seed=None):
+ """Seed the generator.
+
+ seed(seed=None)
+
+ seed can be an integer, an array (or other sequence) of integers of any
+ length, or None. If seed is None, then RandomState will try to read data
+ from /dev/urandom (or the Windows analogue) if available or seed from
+ the clock otherwise.
+ """
+ cdef rk_error errcode
+ cdef ndarray obj "arrayObject_obj"
+ if seed is None:
+ errcode = rk_randomseed(self.internal_state)
+ elif type(seed) is int:
+ rk_seed(seed, self.internal_state)
+ else:
+ obj = <ndarray>PyArray_ContiguousFromObject(seed, PyArray_LONG, 1, 1)
+ init_by_array(self.internal_state, <unsigned long *>(obj.data),
+ obj.dimensions[0])
+
+ def get_state(self):
+ """Return a tuple representing the internal state of the generator.
+
+ get_state() -> ('MT19937', int key[624], int pos)
+ """
+ cdef ndarray state "arrayObject_state"
+ state = <ndarray>_sp.empty(624, _sp.Int)
+ memcpy(<void*>(state.data), self.internal_state.key, 624*sizeof(long))
+ return ('MT19937', state, self.internal_state.pos)
+
+ def set_state(self, state):
+ """Set the state from a tuple.
+
+ state = ('MT19937', int key[624], int pos)
+
+ set_state(state)
+ """
+ cdef ndarray obj "arrayObject_obj"
+ cdef int pos
+ algorithm_name = state[0]
+ if algorithm_name != 'MT19937':
+ raise ValueError("algorithm must be 'MT19937'")
+ key, pos = state[1:]
+ obj = <ndarray>PyArray_ContiguousFromObject(key, PyArray_LONG, 1, 1)
+ if obj.dimensions[0] != 624:
+ raise ValueError("state must be 624 longs")
+ memcpy(self.internal_state.key, <void*>(obj.data), 624*sizeof(long))
+ self.internal_state.pos = pos
+
+ # Pickling support:
+ def __getstate__(self):
+ return self.get_state()
+
+ def __setstate__(self, state):
+ self.set_state(state)
+
+ def __reduce__(self):
+ return (_sp.random.__RandomState_ctor, (), self.get_state())
+
+ # Basic distributions:
+ def random_sample(self, size=None):
+ """Return random floats in the half-open interval [0.0, 1.0).
+
+ random_sample(size=None) -> random values
+ """
+ return cont0_array(self.internal_state, rk_double, size)
+
+ def tomaxint(self, size=None):
+ """Returns random integers x such that 0 <= x <= sys.maxint.
+
+ tomaxint(size=None) -> random values
+ """
+ return disc0_array(self.internal_state, rk_long, size)
+
+ def randint(self, low, high=None, size=None):
+ """Return random integers x such that low <= x < high.
+
+ randint(low, high=None, size=None) -> random values
+
+ If high is None, then 0 <= x < low.
+ """
+ cdef long lo, hi, diff
+ cdef long *array_data
+ cdef ndarray array "arrayObject"
+ cdef long length
+ cdef long i
+
+ if high is None:
+ lo = 0
+ hi = low
+ else:
+ lo = low
+ hi = high
+
+ diff = hi - lo - 1
+ if diff < 0:
+ raise ValueError("low >= high")
+
+ if size is None:
+ return rk_interval(diff, self.internal_state)
+ else:
+ array = <ndarray>_sp.empty(size, _sp.Int)
+ length = PyArray_SIZE(array)
+ array_data = <long *>array.data
+ for i from 0 <= i < length:
+ array_data[i] = lo + <long>rk_interval(diff, self.internal_state)
+ return array
+
+ def bytes(self, unsigned int length):
+ """Return random bytes.
+
+ bytes(length) -> str
+ """
+ cdef void *bytes
+ bytes = PyMem_Malloc(length)
+ rk_fill(bytes, length, self.internal_state)
+ bytestring = PyString_FromString(<char*>bytes)
+ PyMem_Free(bytes)
+ return bytestring
+
+ def uniform(self, double low=0.0, double high=1.0, size=None):
+ """Uniform distribution over [low, high).
+
+ uniform(low=0.0, high=1.0, size=None) -> random values
+ """
+ return cont2_array(self.internal_state, rk_uniform, size, low,
+ high-low)
+
+ def rand(self, *args):
+ """Return an array of the given dimensions which is initialized to
+ random numbers from a uniform distribution in the range [0,1).
+
+ rand(d0, d1, ..., dn) -> random values
+ """
+ if len(args) == 0:
+ return self.random_sample()
+ else:
+ return self.random_sample(size=args)
+
+ def randn(self, *args):
+ """Returns zero-mean, unit-variance Gaussian random numbers in an
+ array of shape (d0, d1, ..., dn).
+
+ randn(d0, d1, ..., dn) -> random values
+ """
+ if len(args) == 0:
+ return self.standard_normal()
+ else:
+ return self.standard_normal(args)
+
+ def random_integers(self, low, high=None, size=None):
+ """Return random integers x such that low <= x <= high.
+
+ random_integers(low, high=None, size=None) -> random values.
+
+ If high is None, then 1 <= x <= low.
+ """
+ if high is None:
+ high = low
+ low = 1
+ return self.randint(low, high+1, size)
+
+ # Complicated, continuous distributions:
+ def standard_normal(self, size=None):
+ """Standard Normal distribution (mean=0, stdev=1).
+
+ standard_normal(size=None) -> random values
+ """
+ return cont0_array(self.internal_state, rk_gauss, size)
+
+ def normal(self, double loc=0.0, double scale=1.0, size=None):
+ """Normal distribution (mean=loc, stdev=scale).
+
+ normal(loc=0.0, scale=1.0, size=None) -> random values
+ """
+ if scale <= 0:
+ raise ValueError("scale <= 0")
+ return cont2_array(self.internal_state, rk_normal, size, loc, scale)
+
+ def beta(self, double a, double b, size=None):
+ """Beta distribution over [0, 1].
+
+ beta(a, b, size=None) -> random values
+ """
+ if a <= 0:
+ raise ValueError("a <= 0")
+ elif b <= 0:
+ raise ValueError("b <= 0")
+ return cont2_array(self.internal_state, rk_beta, size, a, b)
+
+ def exponential(self, double scale=1.0, size=None):
+ """Exponential distribution.
+
+ exponential(scale=1.0, size=None) -> random values
+ """
+ if scale <= 0:
+ raise ValueError("scale <= 0")
+ return cont1_array(self.internal_state, rk_exponential, size, scale)
+
+ def standard_exponential(self, size=None):
+ """Standard exponential distribution (scale=1).
+
+ standard_exponential(size=None) -> random values
+ """
+ return cont0_array(self.internal_state, rk_standard_exponential, size)
+
+ def standard_gamma(self, double shape, size=None):
+ """Standard Gamma distribution.
+
+ standard_gamma(shape, size=None) -> random values
+ """
+ if shape <= 0:
+ raise ValueError("shape <= 0")
+ return cont1_array(self.internal_state, rk_standard_gamma, size, shape)
+
+ def gamma(self, double shape, double scale=1.0, size=None):
+ """Gamma distribution.
+
+ gamma(shape, scale=1.0, size=None) -> random values
+ """
+ if shape <= 0:
+ raise ValueError("shape <= 0")
+ elif scale <= 0:
+ raise ValueError("scale <= 0")
+ return cont2_array(self.internal_state, rk_gamma, size, shape, scale)
+
+ def f(self, double dfnum, double dfden, size=None):
+ """F distribution.
+
+ f(dfnum, dfden, size=None) -> random values
+ """
+ if dfnum <= 0:
+ raise ValueError("dfnum <= 0")
+ elif dfden <= 0:
+ raise ValueError("dfden <= 0")
+ return cont2_array(self.internal_state, rk_f, size, dfnum, dfden)
+
+ def noncentral_f(self, double dfnum, double dfden, double nonc, size=None):
+ """Noncentral F distribution.
+
+ noncentral_f(dfnum, dfden, nonc, size=None) -> random values
+ """
+ if dfnum <= 1:
+ raise ValueError("dfnum <= 1")
+ elif dfden <= 0:
+ raise ValueError("dfden <= 0")
+ elif nonc < 0:
+ raise ValueError("nonc < 0")
+ return cont3_array(self.internal_state, rk_noncentral_f, size, dfnum,
+ dfden, nonc)
+
+ def chisquare(self, double df, size=None):
+ """Chi^2 distribution.
+
+ chisquare(df, size=None) -> random values
+ """
+ if df <= 0:
+ raise ValueError("df <= 0")
+ return cont1_array(self.internal_state, rk_chisquare, size, df)
+
+ def noncentral_chisquare(self, double df, double nonc, size=None):
+ """Noncentral Chi^2 distribution.
+
+ noncentral_chisquare(df, nonc, size=None) -> random values
+ """
+ if df <= 1:
+ raise ValueError("df <= 1")
+ elif nonc < 0:
+ raise ValueError("nonc < 0")
+ return cont2_array(self.internal_state, rk_noncentral_chisquare, size,
+ df, nonc)
+
+ def standard_cauchy(self, size=None):
+ """Standard Cauchy with mode=0.
+
+ standard_cauchy(size=None)
+ """
+ return cont0_array(self.internal_state, rk_standard_cauchy, size)
+
+ def standard_t(self, double df, size=None):
+ """Standard Student's t distribution with df degrees of freedom.
+
+ standard_t(df, size=None)
+ """
+ if df <= 0:
+ raise ValueError("df <= 0")
+ return cont1_array(self.internal_state, rk_standard_t, size, df)
+
+ def vonmises(self, double mu, double kappa, size=None):
+ """von Mises circular distribution with mode mu and dispersion parameter
+ kappa on [-pi, pi].
+
+ vonmises(mu, kappa, size=None)
+ """
+ if kappa < 0:
+ raise ValueError("kappa < 0")
+ return cont2_array(self.internal_state, rk_vonmises, size, mu, kappa)
+
+ def pareto(self, double a, size=None):
+ """Pareto distribution.
+
+ pareto(a, size=None)
+ """
+ if a <= 0:
+ raise ValueError("a <= 0")
+ return cont1_array(self.internal_state, rk_pareto, size, a)
+
+ def weibull(self, double a, size=None):
+ """Weibull distribution.
+
+ weibull(a, size=None)
+ """
+ if a <= 0:
+ raise ValueError("a <= 0")
+ return cont1_array(self.internal_state, rk_weibull, size, a)
+
+ def power(self, double a, size=None):
+ """Power distribution.
+
+ power(a, size=None)
+ """
+ if a <= 0:
+ raise ValueError("a <= 0")
+ return cont1_array(self.internal_state, rk_power, size, a)
+
+ def laplace(self, double loc=0.0, double scale=1.0, size=None):
+ """Laplace distribution.
+
+ laplace(loc=0.0, scale=1.0, size=None)
+ """
+ if scale <= 0.0:
+ raise ValueError("scale <= 0.0")
+ return cont2_array(self.internal_state, rk_laplace, size, loc, scale)
+
+ def gumbel(self, double loc=0.0, double scale=1.0, size=None):
+ """Gumbel distribution.
+
+ gumbel(loc=0.0, scale=1.0, size=None)
+ """
+ if scale <= 0.0:
+ raise ValueError("scale <= 0.0")
+ return cont2_array(self.internal_state, rk_gumbel, size, loc, scale)
+
+ def logistic(self, double loc=0.0, double scale=1.0, size=None):
+ """Logistic distribution.
+
+ logistic(loc=0.0, scale=1.0, size=None)
+ """
+ if scale <= 0.0:
+ raise ValueError("scale <= 0.0")
+ return cont2_array(self.internal_state, rk_logistic, size, loc, scale)
+
+ def lognormal(self, double mean=0.0, double sigma=1.0, size=None):
+ """Log-normal distribution.
+
+ Note that the mean parameter is not the mean of this distribution, but
+ the underlying normal distribution.
+
+ lognormal(mean, sigma) <=> exp(normal(mean, sigma))
+
+ lognormal(mean=0.0, sigma=1.0, size=None)
+ """
+ if sigma <= 0.0:
+ raise ValueError("sigma <= 0.0")
+ return cont2_array(self.internal_state, rk_lognormal, size, mean, sigma)
+
+ def rayleigh(self, double scale=1.0, size=None):
+ """Rayleigh distribution.
+
+ rayleigh(scale=1.0, size=None)
+ """
+ if scale <= 0.0:
+ raise ValueError("scale <= 0.0")
+ return cont1_array(self.internal_state, rk_rayleigh, size, scale)
+
+ def wald(self, double mean, double scale, size=None):
+ """Wald (inverse Gaussian) distribution.
+
+ wald(mean, scale, size=None)
+ """
+ if mean <= 0.0:
+ raise ValueError("mean <= 0.0")
+ elif scale <= 0.0:
+ raise ValueError("scale <= 0.0")
+ return cont2_array(self.internal_state, rk_wald, size, mean, scale)
+
+ def triangular(self, double left, double mode, double right, size=None):
+ """Triangular distribution starting at left, peaking at mode, and
+ ending at right (left <= mode <= right).
+
+ triangular(left, mode, right, size=None)
+ """
+ if left > mode:
+ raise ValueError("left > mode")
+ elif mode > right:
+ raise ValueError("mode > right")
+ elif left == right:
+ raise ValueError("left == right")
+ return cont3_array(self.internal_state, rk_triangular, size, left,
+ mode, right)
+
+ # Complicated, discrete distributions:
+ def binomial(self, long n, double p, size=None):
+ """Binomial distribution of n trials and p probability of success.
+
+ binomial(n, p, size=None) -> random values
+ """
+ if n <= 0:
+ raise ValueError("n <= 0")
+ elif p < 0:
+ raise ValueError("p < 0")
+ elif p > 1:
+ raise ValueError("p > 1")
+ return discnp_array(self.internal_state, rk_binomial, size, n, p)
+
+ def negative_binomial(self, long n, double p, size=None):
+ """Negative Binomial distribution.
+
+ negative_binomial(n, p, size=None) -> random values
+ """
+ if n <= 0:
+ raise ValueError("n <= 0")
+ elif p < 0:
+ raise ValueError("p < 0")
+ elif p > 1:
+ raise ValueError("p > 1")
+ return discnp_array(self.internal_state, rk_negative_binomial, size, n,
+ p)
+
+ def poisson(self, double lam=1.0, size=None):
+ """Poisson distribution.
+
+ poisson(lam=1.0, size=None) -> random values
+ """
+ if lam <= 0:
+ raise ValueError("lam <= 0")
+ return discd_array(self.internal_state, rk_poisson, size, lam)
+
+ def zipf(self, double a, size=None):
+ """Zipf distribution.
+
+ zipf(a, size=None)
+ """
+ if a <= 1.0:
+ raise ValueError("a <= 1.0")
+ return discd_array(self.internal_state, rk_zipf, size, a)
+
+ def geometric(self, double p, size=None):
+ """Geometric distribution with p being the probability of "success" on
+ an individual trial.
+
+ geometric(p, size=None)
+ """
+ if p < 0.0:
+ raise ValueError("p < 0.0")
+ elif p > 1.0:
+ raise ValueError("p > 1.0")
+ return discd_array(self.internal_state, rk_geometric, size, p)
+
+ def hypergeometric(self, long ngood, long nbad, long nsample, size=None):
+ """Hypergeometric distribution.
+
+ Consider an urn with ngood "good" balls and nbad "bad" balls. If one
+ were to draw nsample balls from the urn without replacement, then
+ the hypergeometric distribution describes the distribution of "good"
+ balls in the sample.
+
+ hypergeometric(ngood, nbad, nsample, size=None)
+ """
+ if ngood < 1:
+ raise ValueError("ngood < 1")
+ elif nbad < 1:
+ raise ValueError("nbad < 1")
+ elif ngood + nbad < nsample:
+ raise ValueError("ngood + nbad < nsample")
+ elif nsample < 1:
+ raise ValueError("nsample < 1")
+ return discnmN_array(self.internal_state, rk_hypergeometric, size,
+ ngood, nbad, nsample)
+
+ def logseries(self, double p, size=None):
+ """Logarithmic series distribution.
+
+ logseries(p, size=None)
+ """
+ if p < 0:
+ raise ValueError("p < 0")
+ elif p > 1:
+ raise ValueError("p > 1")
+ return discd_array(self.internal_state, rk_logseries, size, p)
+
+ # Multivariate distributions:
+ def multivariate_normal(self, mean, cov, size=None):
+ """Return an array containing multivariate normally distributed random numbers
+ with specified mean and covariance.
+
+ multivariate_normal(mean, cov) -> random values
+ multivariate_normal(mean, cov, [m, n, ...]) -> random values
+
+ mean must be a 1 dimensional array. cov must be a square two dimensional
+ array with the same number of rows and columns as mean has elements.
+
+ The first form returns a single 1-D array containing a multivariate
+ normal.
+
+ The second form returns an array of shape (m, n, ..., cov.shape[0]).
+ In this case, output[i,j,...,:] is a 1-D array containing a multivariate
+ normal.
+ """
+ # Check preconditions on arguments
+ mean = _sp.array(mean)
+ cov = _sp.array(cov)
+ if size is None:
+ shape = []
+ else:
+ shape = size
+ if len(mean.shape) != 1:
+ raise ArgumentError("mean must be 1 dimensional")
+ if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
+ raise ArgumentError("cov must be 2 dimensional and square")
+ if mean.shape[0] != cov.shape[0]:
+ raise ArgumentError("mean and cov must have same length")
+ # Compute shape of output
+ if isinstance(shape, int):
+ shape = [shape]
+ final_shape = list(shape[:])
+ final_shape.append(mean.shape[0])
+ # Create a matrix of independent standard normally distributed random
+ # numbers. The matrix has rows with the same length as mean and as
+ # many rows are necessary to form a matrix of shape final_shape.
+ x = standard_normal(_sp.multiply.reduce(final_shape))
+ x.shape = (_sp.multiply.reduce(final_shape[0:len(final_shape)-1]),
+ mean.shape[0])
+ # Transform matrix of standard normals into matrix where each row
+ # contains multivariate normals with the desired covariance.
+ # Compute A such that matrixmultiply(transpose(A),A) == cov.
+ # Then the matrix products of the rows of x and A has the desired
+ # covariance. Note that sqrt(s)*v where (u,s,v) is the singular value
+ # decomposition of cov is such an A.
+
+ from scipy.corelinalg import svd
+ # XXX: we really should be doing this by Cholesky decomposition
+ (u,s,v) = svd(cov)
+ x = _sp.matrixmultiply(x*_sp.sqrt(s),v)
+ # The rows of x now have the correct covariance but mean 0. Add
+ # mean to each row. Then each row will have mean mean.
+ _sp.add(mean,x,x)
+ x.shape = tuple(final_shape)
+ return x
+
+ def multinomial(self, long n, object pvals, size=None):
+ """Multinomial distribution.
+
+ multinomial(n, pvals, size=None) -> random values
+
+ pvals is a sequence of probabilities that should sum to 1 (however, the
+ last element is always assumed to account for the remaining probability
+ as long as sum(pvals[:-1]) <= 1).
+ """
+ cdef long d
+ cdef ndarray parr "arrayObject_parr", mnarr "arrayObject_mnarr"
+ cdef double *pix
+ cdef long *mnix
+ cdef long i, j, dn
+ cdef double Sum
+
+ d = len(pvals)
+ parr = <ndarray>PyArray_ContiguousFromObject(pvals, PyArray_DOUBLE, 1, 1)
+ pix = <double*>parr.data
+
+ if kahan_sum(pix, d-1) > 1.0:
+ raise ValueError("sum(pvals) > 1.0")
+
+ if size is None:
+ shape = (d,)
+ elif type(size) is int:
+ shape = (size, d)
+ else:
+ shape = size + (d,)
+
+ multin = _sp.zeros(shape, _sp.Int)
+ mnarr = <ndarray>multin
+ mnix = <long*>mnarr.data
+ i = 0
+ while i < PyArray_SIZE(mnarr):
+ Sum = 1.0
+ dn = n
+ for j from 0 <= j < d-1:
+ mnix[i+j] = rk_binomial(self.internal_state, dn, pix[j]/Sum)
+ dn = dn - mnix[i+j]
+ if dn <= 0:
+ break
+ Sum = Sum - pix[j]
+ if dn > 0:
+ mnix[i+d-1] = dn
+
+ i = i + d
+
+ return multin
+
+ # Shuffling and permutations:
+ def shuffle(self, object x):
+ """Modify a sequence in-place by shuffling its contents.
+
+ shuffle(x)
+ """
+ cdef long i, j
+
+ # adaptation of random.shuffle()
+ i = len(x) - 1
+ while i > 0:
+ j = rk_interval(i, self.internal_state)
+ x[i], x[j] = x[j], x[i]
+ i = i - 1
+
+ def permutation(self, object x):
+ """Given an integer, return a shuffled sequence of integers >= 0 and
+ < x; given a sequence, return a shuffled array copy.
+
+ permutation(x)
+ """
+ if type(x) is int:
+ arr = _sp.arange(x)
+ else:
+ arr = _sp.array(x)
+ self.shuffle(arr)
+ return arr
+
+_rand = RandomState()
+seed = _rand.seed
+get_state = _rand.get_state
+set_state = _rand.set_state
+random_sample = _rand.random_sample
+randint = _rand.randint
+bytes = _rand.bytes
+uniform = _rand.uniform
+rand = _rand.rand
+randn = _rand.randn
+random_integers = _rand.random_integers
+standard_normal = _rand.standard_normal
+normal = _rand.normal
+beta = _rand.beta
+exponential = _rand.exponential
+standard_exponential = _rand.standard_exponential
+standard_gamma = _rand.standard_gamma
+gamma = _rand.gamma
+f = _rand.f
+noncentral_f = _rand.noncentral_f
+chisquare = _rand.chisquare
+noncentral_chisquare = _rand.noncentral_chisquare
+standard_cauchy = _rand.standard_cauchy
+standard_t = _rand.standard_t
+vonmises = _rand.vonmises
+pareto = _rand.pareto
+weibull = _rand.weibull
+power = _rand.power
+laplace = _rand.laplace
+gumbel = _rand.gumbel
+logistic = _rand.logistic
+lognormal = _rand.lognormal
+rayleigh = _rand.rayleigh
+wald = _rand.wald
+triangular = _rand.triangular
+
+binomial = _rand.binomial
+negative_binomial = _rand.negative_binomial
+poisson = _rand.poisson
+zipf = _rand.zipf
+geometric = _rand.geometric
+hypergeometric = _rand.hypergeometric
+logseries = _rand.logseries
+
+multivariate_normal = _rand.multivariate_normal
+multinomial = _rand.multinomial
+
+shuffle = _rand.shuffle
+permutation = _rand.permutation
diff --git a/numpy/random/mtrand/randomkit.c b/numpy/random/mtrand/randomkit.c
new file mode 100644
index 000000000..6a58f2dba
--- /dev/null
+++ b/numpy/random/mtrand/randomkit.c
@@ -0,0 +1,355 @@
+/* Random kit 1.3 */
+
+/*
+ * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org)
+ *
+ * The rk_random and rk_seed functions algorithms and the original design of
+ * the Mersenne Twister RNG:
+ *
+ * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Original algorithm for the implementation of rk_interval function from
+ * Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by
+ * Magnus Jonsson.
+ *
+ * Constants used in the rk_double implementation by Isaku Wada.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* static char const rcsid[] =
+ "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <time.h>
+#include <limits.h>
+#include <math.h>
+
+#ifdef _WIN32
+/* Windows */
+#include <sys/timeb.h>
+#ifndef RK_NO_WINCRYPT
+/* Windows crypto */
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x0400
+#endif
+#include <windows.h>
+#include <wincrypt.h>
+#endif
+#else
+/* Unix */
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include "randomkit.h"
+
+#ifndef RK_DEV_URANDOM
+#define RK_DEV_URANDOM "/dev/urandom"
+#endif
+
+#ifndef RK_DEV_RANDOM
+#define RK_DEV_RANDOM "/dev/random"
+#endif
+
+char *rk_strerror[RK_ERR_MAX] =
+{
+ "no error",
+ "random device unvavailable"
+};
+
+/* static functions */
+static unsigned long rk_hash(unsigned long key);
+
+void rk_seed(unsigned long seed, rk_state *state)
+{
+ int pos;
+ seed &= 0xffffffffUL;
+
+ /* Knuth's PRNG as used in the Mersenne Twister reference implementation */
+ for (pos=0; pos<RK_STATE_LEN; pos++)
+ {
+ state->key[pos] = seed;
+ seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL;
+ }
+
+ state->pos = RK_STATE_LEN;
+ state->has_gauss = 0;
+}
+
+/* Thomas Wang 32 bits integer hash function */
+unsigned long rk_hash(unsigned long key)
+{
+ key += ~(key << 15);
+ key ^= (key >> 10);
+ key += (key << 3);
+ key ^= (key >> 6);
+ key += ~(key << 11);
+ key ^= (key >> 16);
+ return key;
+}
+
+rk_error rk_randomseed(rk_state *state)
+{
+#ifndef _WIN32
+ struct timeval tv;
+#else
+ struct _timeb tv;
+#endif
+ int i;
+
+ if(rk_devfill(state->key, sizeof(state->key), 0) == RK_NOERR)
+ {
+ state->key[0] |= 0x80000000UL; /* ensures non-zero key */
+ state->pos = RK_STATE_LEN;
+ state->has_gauss = 0;
+
+ for (i=0; i<624; i++)
+ {
+ state->key[i] &= 0xffffffffUL;
+ }
+
+ return RK_NOERR;
+ }
+
+#ifndef _WIN32
+ gettimeofday(&tv, NULL);
+ rk_seed(rk_hash(getpid()) ^ rk_hash(tv.tv_sec) ^ rk_hash(tv.tv_usec)
+ ^ rk_hash(clock()), state);
+#else
+ _ftime(&tv);
+ rk_seed(rk_hash(tv.time) ^ rk_hash(tv.millitm) ^ rk_hash(clock()), state);
+#endif
+
+ return RK_ENODEV;
+}
+
+/* Magic Mersenne Twister constants */
+#define N 624
+#define M 397
+#define MATRIX_A 0x9908b0dfUL
+#define UPPER_MASK 0x80000000UL
+#define LOWER_MASK 0x7fffffffUL
+
+/* Slightly optimised reference implementation of the Mersenne Twister */
+unsigned long rk_random(rk_state *state)
+{
+ unsigned long y;
+
+ if (state->pos == RK_STATE_LEN)
+ {
+ int i;
+
+ for (i=0;i<N-M;i++)
+ {
+ y = (state->key[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK);
+ state->key[i] = state->key[i+M] ^ (y>>1) ^ (-(y & 1) & MATRIX_A);
+ }
+ for (;i<N-1;i++)
+ {
+ y = (state->key[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK);
+ state->key[i] = state->key[i+(M-N)] ^ (y>>1) ^ (-(y & 1) & MATRIX_A);
+ }
+ y = (state->key[N-1] & UPPER_MASK) | (state->key[0] & LOWER_MASK);
+ state->key[N-1] = state->key[M-1] ^ (y>>1) ^ (-(y & 1) & MATRIX_A);
+
+ state->pos = 0;
+ }
+
+ y = state->key[state->pos++];
+
+ /* Tempering */
+ y ^= (y >> 11);
+ y ^= (y << 7) & 0x9d2c5680UL;
+ y ^= (y << 15) & 0xefc60000UL;
+ y ^= (y >> 18);
+
+ return y;
+}
+
+long rk_long(rk_state *state)
+{
+ return rk_ulong(state) >> 1;
+}
+
+unsigned long rk_ulong(rk_state *state)
+{
+#if ULONG_MAX <= 0xffffffffUL
+ return rk_random(state);
+#else
+ return (rk_random(state) << 32) | (rk_random(state));
+#endif
+}
+
+unsigned long rk_interval(unsigned long max, rk_state *state)
+{
+ unsigned long mask = max, value;
+
+ if (max == 0) return 0;
+
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+ mask |= mask >> 8;
+ mask |= mask >> 16;
+#if ULONG_MAX > 0xffffffffUL
+ mask |= mask >> 32;
+#endif
+
+ /* Search a random value in [0..mask] <= max */
+ while ((value = (rk_ulong(state) & mask)) > max);
+
+ return value;
+}
+
+double rk_double(rk_state *state)
+{
+ /* shifts : 67108864 = 0x4000000, 9007199254740992 = 0x20000000000000 */
+ long a = rk_random(state) >> 5, b = rk_random(state) >> 6;
+ return (a * 67108864.0 + b) / 9007199254740992.0;
+}
+
+void rk_fill(void *buffer, size_t size, rk_state *state)
+{
+ unsigned long r;
+ unsigned char *buf = buffer;
+
+ for (; size >= 4; size -= 4)
+ {
+ r = rk_random(state);
+ *(buf++) = r & 0xFF;
+ *(buf++) = (r >> 8) & 0xFF;
+ *(buf++) = (r >> 16) & 0xFF;
+ *(buf++) = (r >> 24) & 0xFF;
+ }
+
+ if (!size) return;
+
+ r = rk_random(state);
+
+ for (; size; r >>= 8, size --)
+ *(buf++) = (unsigned char)(r & 0xFF);
+}
+
+rk_error rk_devfill(void *buffer, size_t size, int strong)
+{
+#ifndef _WIN32
+ FILE *rfile;
+ int done;
+
+ if (strong)
+ rfile = fopen(RK_DEV_RANDOM, "rb");
+ else
+ rfile = fopen(RK_DEV_URANDOM, "rb");
+ if (rfile == NULL)
+ return RK_ENODEV;
+ done = fread(buffer, size, 1, rfile);
+ fclose(rfile);
+ if (done)
+ return RK_NOERR;
+#else
+
+#ifndef RK_NO_WINCRYPT
+ HCRYPTPROV hCryptProv;
+ BOOL done;
+
+ if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL,
+ CRYPT_VERIFYCONTEXT) || !hCryptProv)
+ return RK_ENODEV;
+ done = CryptGenRandom(hCryptProv, size, (unsigned char *)buffer);
+ CryptReleaseContext(hCryptProv, 0);
+ if (done)
+ return RK_NOERR;
+#endif
+
+#endif
+
+ return RK_ENODEV;
+}
+
+rk_error rk_altfill(void *buffer, size_t size, int strong, rk_state *state)
+{
+ rk_error err;
+
+ err = rk_devfill(buffer, size, strong);
+ if (err)
+ rk_fill(buffer, size, state);
+
+ return err;
+}
+
+double rk_gauss(rk_state *state)
+{
+ if (state->has_gauss)
+ {
+ state->has_gauss = 0;
+ return state->gauss;
+ }
+ else
+ {
+ double f, x1, x2, r2;
+ do
+ {
+ x1 = 2.0*rk_double(state) - 1.0;
+ x2 = 2.0*rk_double(state) - 1.0;
+ r2 = x1*x1 + x2*x2;
+ }
+ while (r2 >= 1.0 || r2 == 0.0);
+
+ f = sqrt(-2.0*log(r2)/r2); /* Box-Muller transform */
+ state->has_gauss = 1;
+ state->gauss = f*x1; /* Keep for next call */
+ return f*x2;
+ }
+}
+
+
diff --git a/numpy/random/mtrand/randomkit.h b/numpy/random/mtrand/randomkit.h
new file mode 100644
index 000000000..389666854
--- /dev/null
+++ b/numpy/random/mtrand/randomkit.h
@@ -0,0 +1,189 @@
+/* Random kit 1.3 */
+
+/*
+ * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* @(#) $Jeannot: randomkit.h,v 1.24 2005/07/21 22:14:09 js Exp $ */
+
+/*
+ * Typical use:
+ *
+ * {
+ * rk_state state;
+ * unsigned long seed = 1, random_value;
+ *
+ * rk_seed(seed, &state); // Initialize the RNG
+ * ...
+ * random_value = rk_random(&state); // Generate random values in [0..RK_MAX]
+ * }
+ *
+ * Instead of rk_seed, you can use rk_randomseed which will get a random seed
+ * from /dev/urandom (or the clock, if /dev/urandom is unavailable):
+ *
+ * {
+ * rk_state state;
+ * unsigned long random_value;
+ *
+ * rk_randomseed(&state); // Initialize the RNG with a random seed
+ * ...
+ * random_value = rk_random(&state); // Generate random values in [0..RK_MAX]
+ * }
+ */
+
+/*
+ * Useful macro:
+ * RK_DEV_RANDOM: the device used for random seeding.
+ * defaults to "/dev/urandom"
+ */
+
+#include <stddef.h>
+
+#ifndef _RANDOMKIT_
+#define _RANDOMKIT_
+
+#define RK_STATE_LEN 624
+
+typedef struct rk_state_
+{
+ unsigned long key[RK_STATE_LEN];
+ int pos;
+ int has_gauss; /* !=0: gauss contains a gaussian deviate */
+ double gauss;
+
+ /* The rk_state structure has been extended to store the following
+ * information for the binomial generator. If the input values of n or p
+ * are different than nsave and psave, then the other parameters will be
+ * recomputed. RTK 2005-09-02 */
+
+ int has_binomial; /* !=0: following parameters initialized for
+ binomial */
+ double psave;
+ long nsave;
+ double r;
+ double q;
+ double fm;
+ long m;
+ double p1;
+ double xm;
+ double xl;
+ double xr;
+ double c;
+ double laml;
+ double lamr;
+ double p2;
+ double p3;
+ double p4;
+
+}
+rk_state;
+
+typedef enum {
+ RK_NOERR = 0, /* no error */
+ RK_ENODEV = 1, /* no RK_DEV_RANDOM device */
+ RK_ERR_MAX = 2
+} rk_error;
+
+/* error strings */
+extern char *rk_strerror[RK_ERR_MAX];
+
+/* Maximum generated random value */
+#define RK_MAX 0xFFFFFFFFUL
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Initialize the RNG state using the given seed.
+ */
+extern void rk_seed(unsigned long seed, rk_state *state);
+
+/*
+ * Initialize the RNG state using a random seed.
+ * Uses /dev/random or, when unavailable, the clock (see randomkit.c).
+ * Returns RK_NOERR when no errors occurs.
+ * Returns RK_ENODEV when the use of RK_DEV_RANDOM failed (for example because
+ * there is no such device). In this case, the RNG was initialized using the
+ * clock.
+ */
+extern rk_error rk_randomseed(rk_state *state);
+
+/*
+ * Returns a random unsigned long between 0 and RK_MAX inclusive
+ */
+extern unsigned long rk_random(rk_state *state);
+
+/*
+ * Returns a random long between 0 and LONG_MAX inclusive
+ */
+extern long rk_long(rk_state *state);
+
+/*
+ * Returns a random unsigned long between 0 and ULONG_MAX inclusive
+ */
+extern unsigned long rk_ulong(rk_state *state);
+
+/*
+ * Returns a random unsigned long between 0 and max inclusive.
+ */
+extern unsigned long rk_interval(unsigned long max, rk_state *state);
+
+/*
+ * Returns a random double between 0.0 and 1.0, 1.0 excluded.
+ */
+extern double rk_double(rk_state *state);
+
+/*
+ * fill the buffer with size random bytes
+ */
+extern void rk_fill(void *buffer, size_t size, rk_state *state);
+
+/*
+ * fill the buffer with randombytes from the random device
+ * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is
+ * On Unix, if strong is defined, RK_DEV_RANDOM is used. If not, RK_DEV_URANDOM
+ * is used instead. This parameter has no effect on Windows.
+ * Warning: on most unixes RK_DEV_RANDOM will wait for enough entropy to answer
+ * which can take a very long time on quiet systems.
+ */
+extern rk_error rk_devfill(void *buffer, size_t size, int strong);
+
+/*
+ * fill the buffer using rk_devfill if the random device is available and using
+ * rk_fill if is is not
+ * parameters have the same meaning as rk_fill and rk_devfill
+ * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is
+ */
+extern rk_error rk_altfill(void *buffer, size_t size, int strong,
+ rk_state *state);
+
+/*
+ * return a random gaussian deviate with variance unity and zero mean.
+ */
+extern double rk_gauss(rk_state *state);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RANDOMKIT_ */
diff --git a/numpy/random/mtrand/scipy.pxi b/numpy/random/mtrand/scipy.pxi
new file mode 100644
index 000000000..9c7b4d01c
--- /dev/null
+++ b/numpy/random/mtrand/scipy.pxi
@@ -0,0 +1,52 @@
+# :Author: Robert Kern
+# :Copyright: 2004, Enthought, Inc.
+# :License: BSD Style
+
+
+cdef extern from "scipy/arrayobject.h":
+ ctypedef enum PyArray_TYPES:
+ PyArray_BOOL
+ PyArray_BYTE
+ PyArray_UBYTE
+ PyArray_SHORT
+ PyArray_USHORT
+ PyArray_INT
+ PyArray_UINT
+ PyArray_LONG
+ PyArray_ULONG
+ PyArray_FLOAT
+ PyArray_DOUBLE
+ PyArray_LONGDOUBLE
+ PyArray_CFLOAT
+ PyArray_CDOUBLE
+ PyArray_CLONGDOUBLE
+ PyArray_OBJECT
+ PyArray_STRING
+ PyArray_UNICODE
+ PyArray_VOID
+ PyArray_NTYPES
+ PyArray_NOTYPE
+
+ ctypedef int intp
+
+ ctypedef extern class scipy.dtypedescr [object PyArray_Descr]:
+ cdef int type_num, elsize
+ cdef char type
+
+ ctypedef extern class scipy.ndarray [object PyArrayObject]:
+ cdef char *data
+ cdef int nd
+ cdef intp *dimensions
+ cdef intp *strides
+ cdef object base
+ cdef dtypedescr descr
+ cdef int flags
+
+ ndarray PyArray_SimpleNew(int ndims, intp* dims, int item_type)
+ int PyArray_Check(object obj)
+ ndarray PyArray_ContiguousFromObject(object obj, PyArray_TYPES type,
+ int mindim, int maxdim)
+ intp PyArray_SIZE(ndarray arr)
+ void *PyArray_DATA(ndarray arr)
+
+ void import_array()
diff --git a/numpy/random/setup.py b/numpy/random/setup.py
new file mode 100644
index 000000000..17e5e0133
--- /dev/null
+++ b/numpy/random/setup.py
@@ -0,0 +1,24 @@
+
+from os.path import join
+
+def configuration(parent_package='',top_path=None):
+ from scipy.distutils.misc_util import Configuration
+ config = Configuration('random',parent_package,top_path)
+
+ # Configure mtrand
+ config.add_extension('mtrand',
+ sources=[join('mtrand', x) for x in
+ ['mtrand.c', 'randomkit.c', 'initarray.c',
+ 'distributions.c']],
+ libraries=['m'],
+ depends = [join('mtrand','*.h'),
+ join('mtrand','*.pyx'),
+ join('mtrand','*.pxi'),
+ ]
+ )
+
+ return config
+
+if __name__ == '__main__':
+ from scipy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/numpy/setup.py b/numpy/setup.py
new file mode 100644
index 000000000..491e288a9
--- /dev/null
+++ b/numpy/setup.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+import os
+
+def configuration(parent_package='',top_path=None):
+ from scipy.distutils.misc_util import Configuration
+ config = Configuration('scipy',parent_package,top_path)
+ config.add_subpackage('distutils')
+ config.add_subpackage('testing')
+ config.add_subpackage('f2py')
+ config.add_subpackage('base')
+ config.add_subpackage('corefft')
+ config.add_subpackage('corelinalg')
+ config.add_subpackage('random')
+ config.add_data_dir('doc')
+ config.make_config_py(name='__core_config__') # installs __core_config__.py
+
+ return config.todict()
+
+if __name__ == '__main__':
+ # Remove current working directory from sys.path
+ # to avoid importing scipy.distutils as Python std. distutils:
+ import os, sys
+ sys.path.remove(os.getcwd())
+
+ from scipy.distutils.core import setup
+ setup(**configuration(top_path=''))
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
new file mode 100644
index 000000000..a0a1405f0
--- /dev/null
+++ b/numpy/testing/__init__.py
@@ -0,0 +1,4 @@
+
+from info import __doc__
+from scipytest import *
+from utils import *
diff --git a/numpy/testing/info.py b/numpy/testing/info.py
new file mode 100644
index 000000000..9b5caa074
--- /dev/null
+++ b/numpy/testing/info.py
@@ -0,0 +1,30 @@
+"""
+Scipy testing tools
+===================
+
+Scipy-style unit-testing
+------------------------
+
+ ScipyTest -- Scipy tests site manager
+ ScipyTestCase -- unittest.TestCase with measure method
+ IgnoreException -- raise when checking disabled feature, it'll be ignored
+ set_package_path -- prepend package build directory to path
+ set_local_path -- prepend local directory (to tests files) to path
+ restore_path -- restore path after set_package_path
+
+Utility functions
+-----------------
+
+ jiffies -- return 1/100ths of a second that the current process has used
+ memusage -- virtual memory size in bytes of the running python [linux]
+ rand -- array of random numbers from given shape
+ assert_equal -- assert equality
+ assert_almost_equal -- assert equality with decimal tolerance
+ assert_approx_equal -- assert equality with significant digits tolerance
+ assert_array_equal -- assert arrays equality
+ assert_array_almost_equal -- assert arrays equality with decimal tolerance
+ assert_array_less -- assert arrays less-ordering
+
+"""
+
+global_symbols = ['ScipyTest']
diff --git a/numpy/testing/scipytest.py b/numpy/testing/scipytest.py
new file mode 100644
index 000000000..b2aea7b72
--- /dev/null
+++ b/numpy/testing/scipytest.py
@@ -0,0 +1,385 @@
+
+import os
+import sys
+import imp
+import types
+import unittest
+import traceback
+
+__all__ = ['set_package_path', 'set_local_path', 'restore_path',
+ 'IgnoreException', 'ScipyTestCase', 'ScipyTest']
+
+DEBUG=0
+get_frame = sys._getframe
+from utils import jiffies
+
+
+class IgnoreException(Exception):
+ "Ignoring this exception due to disabled feature"
+
+
+def set_package_path(level=1):
+ """ Prepend package directory to sys.path.
+
+ set_package_path should be called from a test_file.py that
+ satisfies the following tree structure:
+
+ <somepath>/<somedir>/test_file.py
+
+ Then the first existing path name from the following list
+
+ <somepath>/build/lib.<platform>-<version>
+ <somepath>/..
+
+ is prepended to sys.path.
+ The caller is responsible for removing this path by using
+
+ restore_path()
+ """
+ from distutils.util import get_platform
+ f = get_frame(level)
+ if f.f_locals['__name__']=='__main__':
+ testfile = sys.argv[0]
+ else:
+ testfile = f.f_locals['__file__']
+ d = os.path.dirname(os.path.dirname(os.path.abspath(testfile)))
+ d1 = os.path.join(d,'build','lib.%s-%s'%(get_platform(),sys.version[:3]))
+ if not os.path.isdir(d1):
+ d1 = os.path.dirname(d)
+ if DEBUG:
+ print 'Inserting %r to sys.path' % (d1)
+ sys.path.insert(0,d1)
+ return
+
+
+def set_local_path(reldir='', level=1):
+ """ Prepend local directory to sys.path.
+
+ The caller is responsible for removing this path by using
+
+ restore_path()
+ """
+ f = get_frame(level)
+ if f.f_locals['__name__']=='__main__':
+ testfile = sys.argv[0]
+ else:
+ testfile = f.f_locals['__file__']
+ local_path = os.path.join(os.path.dirname(os.path.abspath(testfile)),reldir)
+ if DEBUG:
+ print 'Inserting %r to sys.path' % (local_path)
+ sys.path.insert(0,local_path)
+ return
+
+
+def restore_path():
+ if DEBUG:
+ print 'Removing %r from sys.path' % (sys.path[0])
+ del sys.path[0]
+ return
+
+
+def output_exception():
+ try:
+ type, value, tb = sys.exc_info()
+ info = traceback.extract_tb(tb)
+ #this is more verbose
+ #traceback.print_exc()
+ filename, lineno, function, text = info[-1] # last line only
+ print "%s:%d: %s: %s (in %s)" %\
+ (filename, lineno, type.__name__, str(value), function)
+ finally:
+ type = value = tb = None # clean up
+ return
+
+
+class _dummy_stream:
+ def __init__(self,stream):
+ self.data = []
+ self.stream = stream
+ def write(self,message):
+ if not self.data and not message.startswith('E'):
+ self.stream.write(message)
+ self.stream.flush()
+ message = ''
+ self.data.append(message)
+ def writeln(self,message):
+ self.write(message+'\n')
+
+
+class ScipyTestCase (unittest.TestCase):
+
+ def measure(self,code_str,times=1):
+ """ Return elapsed time for executing code_str in the
+ namespace of the caller for given times.
+ """
+ frame = get_frame(1)
+ locs,globs = frame.f_locals,frame.f_globals
+ code = compile(code_str,
+ 'ScipyTestCase runner for '+self.__class__.__name__,
+ 'exec')
+ i = 0
+ elapsed = jiffies()
+ while i<times:
+ i += 1
+ exec code in globs,locs
+ elapsed = jiffies() - elapsed
+ return 0.01*elapsed
+
+ def __call__(self, result=None):
+ if result is None:
+ return unittest.TestCase.__call__(self, result)
+
+ nof_errors = len(result.errors)
+ save_stream = result.stream
+ result.stream = _dummy_stream(save_stream)
+ unittest.TestCase.__call__(self, result)
+ if nof_errors != len(result.errors):
+ test, errstr = result.errors[-1]
+ if type(errstr) is type(()):
+ errstr = str(errstr[0])
+ else:
+ errstr = errstr.split('\n')[-2]
+ l = len(result.stream.data)
+ if errstr.startswith('IgnoreException:'):
+ if l==1:
+ assert result.stream.data[-1]=='E',`result.stream.data`
+ result.stream.data[-1] = 'i'
+ else:
+ assert result.stream.data[-1]=='ERROR\n',`result.stream.data`
+ result.stream.data[-1] = 'ignoring\n'
+ del result.errors[-1]
+ map(save_stream.write, result.stream.data)
+ result.stream = save_stream
+
+def _get_all_method_names(cls):
+ names = dir(cls)
+ if sys.version[:3]<='2.1':
+ for b in cls.__bases__:
+ for n in dir(b)+_get_all_method_names(b):
+ if n not in names:
+ names.append(n)
+ return names
+
+
+# for debug build--check for memory leaks during the test.
+class _SciPyTextTestResult(unittest._TextTestResult):
+ def startTest(self, test):
+ unittest._TextTestResult.startTest(self, test)
+ if self.showAll:
+ N = len(sys.getobjects(0))
+ self._totnumobj = N
+ self._totrefcnt = sys.gettotalrefcount()
+ return
+
+ def stopTest(self, test):
+ if self.showAll:
+ N = len(sys.getobjects(0))
+ self.stream.write("objects: %d ===> %d; " % (self._totnumobj, N))
+ self.stream.write("refcnts: %d ===> %d\n" % (self._totrefcnt,
+ sys.gettotalrefcount()))
+ return
+
+class SciPyTextTestRunner(unittest.TextTestRunner):
+ def _makeResult(self):
+ return _SciPyTextTestResult(self.stream, self.descriptions, self.verbosity)
+
+
+class ScipyTest:
+ """ Scipy tests site manager.
+
+ Usage:
+ >>> ScipyTest(<package>).test(level=1,verbosity=2)
+
+ <package> is package name or its module object.
+
+ Package is supposed to contain a directory tests/
+ with test_*.py files where * refers to the names of submodules.
+
+ test_*.py files are supposed to define a classes, derived
+ from ScipyTestCase or unittest.TestCase, with methods having
+ names starting with test or bench or check.
+
+ And that is it! No need to implement test or test_suite functions
+ in each .py file.
+
+ Also old styled test_suite(level=1) hooks are supported but
+ soon to be removed.
+ """
+ def __init__(self, package=None):
+ if package is None:
+ from scipy.distutils.misc_util import get_frame
+ f = get_frame(1)
+ package = f.f_locals['__name__']
+ self.package = package
+
+ def _module_str(self, module):
+ filename = module.__file__[-30:]
+ if filename!=module.__file__:
+ filename = '...'+filename
+ return '<module %s from %s>' % (`module.__name__`, `filename`)
+
+ def _get_method_names(self,clsobj,level):
+ names = []
+ for mthname in _get_all_method_names(clsobj):
+ if mthname[:5] not in ['bench','check'] \
+ and mthname[:4] not in ['test']:
+ continue
+ mth = getattr(clsobj, mthname)
+ if type(mth) is not types.MethodType:
+ continue
+ d = mth.im_func.func_defaults
+ if d is not None:
+ mthlevel = d[0]
+ else:
+ mthlevel = 1
+ if level>=mthlevel:
+ if mthname not in names:
+ names.append(mthname)
+ for base in clsobj.__bases__:
+ for n in self._get_method_names(base,level):
+ if n not in names:
+ names.append(n)
+ return names
+
+ def _get_module_tests(self,module,level,verbosity):
+ mstr = self._module_str
+ d,f = os.path.split(module.__file__)
+
+ short_module_name = os.path.splitext(os.path.basename(f))[0]
+ if short_module_name=='__init__':
+ short_module_name = module.__name__.split('.')[-1]
+
+ test_dir = os.path.join(d,'tests')
+ test_file = os.path.join(test_dir,'test_'+short_module_name+'.py')
+
+ local_test_dir = os.path.join(os.getcwd(),'tests')
+ local_test_file = os.path.join(local_test_dir,
+ 'test_'+short_module_name+'.py')
+ if os.path.basename(os.path.dirname(local_test_dir)) \
+ == os.path.basename(os.path.dirname(test_dir)) \
+ and os.path.isfile(local_test_file):
+ test_file = local_test_file
+
+ if not os.path.isfile(test_file):
+ if short_module_name[:5]=='info_' \
+ and short_module_name[5:]==module.__name__.split('.')[-2]:
+ return []
+ if short_module_name in ['__cvs_version__','__svn_version__']:
+ return []
+ if short_module_name[-8:]=='_version' \
+ and short_module_name[:-8]==module.__name__.split('.')[-2]:
+ return []
+ if verbosity>1:
+ print test_file
+ print ' !! No test file %r found for %s' \
+ % (os.path.basename(test_file), mstr(module))
+ return []
+
+ try:
+ if sys.version[:3]=='2.1':
+ # Workaround for Python 2.1 .pyc file generator bug
+ import random
+ pref = '-nopyc'+`random.randint(1,100)`
+ else:
+ pref = ''
+ f = open(test_file,'r')
+ test_module = imp.load_module(\
+ module.__name__+'.test_'+short_module_name+pref,
+ f, test_file+pref,('.py', 'r', 1))
+ f.close()
+ if sys.version[:3]=='2.1' and os.path.isfile(test_file+pref+'c'):
+ os.remove(test_file+pref+'c')
+ except:
+ print ' !! FAILURE importing tests for ', mstr(module)
+ print ' ',
+ output_exception()
+ return []
+ return self._get_suite_list(test_module, level, module.__name__)
+
+ def _get_suite_list(self, test_module, level, module_name='__main__'):
+ mstr = self._module_str
+ if hasattr(test_module,'test_suite'):
+ # Using old styled test suite
+ try:
+ total_suite = test_module.test_suite(level)
+ return total_suite._tests
+ except:
+ print ' !! FAILURE building tests for ', mstr(test_module)
+ print ' ',
+ output_exception()
+ return []
+ suite_list = []
+ for name in dir(test_module):
+ obj = getattr(test_module, name)
+ if type(obj) is not type(unittest.TestCase) \
+ or not issubclass(obj, unittest.TestCase) \
+ or obj.__name__[:4] != 'test':
+ continue
+ for mthname in self._get_method_names(obj,level):
+ suite = obj(mthname)
+ if getattr(suite,'isrunnable',lambda mthname:1)(mthname):
+ suite_list.append(suite)
+ print ' Found',len(suite_list),'tests for',module_name
+ return suite_list
+
+ def test(self,level=1,verbosity=1):
+ """ Run Scipy module test suite with level and verbosity.
+ """
+ if type(self.package) is type(''):
+ exec 'import %s as this_package' % (self.package)
+ else:
+ this_package = self.package
+
+ package_name = this_package.__name__
+
+ suites = []
+ for name, module in sys.modules.items():
+ if package_name != name[:len(package_name)] \
+ or module is None:
+ continue
+ if os.path.basename(os.path.dirname(module.__file__))=='tests':
+ continue
+ suites.extend(self._get_module_tests(module, level, verbosity))
+
+ suites.extend(self._get_suite_list(sys.modules[package_name], level))
+
+ all_tests = unittest.TestSuite(suites)
+ #if hasattr(sys,'getobjects'):
+ # runner = SciPyTextTestRunner(verbosity=verbosity)
+ #else:
+ runner = unittest.TextTestRunner(verbosity=verbosity)
+ # Use the builtin displayhook. If the tests are being run
+ # under IPython (for instance), any doctest test suites will
+ # fail otherwise.
+ old_displayhook = sys.displayhook
+ sys.displayhook = sys.__displayhook__
+ try:
+ runner.run(all_tests)
+ finally:
+ sys.displayhook = old_displayhook
+ return runner
+
+ def run(self):
+ """ Run Scipy module test suite with level and verbosity
+ taken from sys.argv. Requires optparse module.
+ """
+ try:
+ from optparse import OptionParser
+ except ImportError:
+ print 'Failed to import optparse module, ignoring.'
+ return self.test()
+ usage = r'usage: %prog [-v <verbosity>] [-l <level>]'
+ parser = OptionParser(usage)
+ parser.add_option("-v", "--verbosity",
+ action="store",
+ dest="verbosity",
+ default=1,
+ type='int')
+ parser.add_option("-l", "--level",
+ action="store",
+ dest="level",
+ default=1,
+ type='int')
+ (options, args) = parser.parse_args()
+ self.test(options.level,options.verbosity)
+ return
diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py
new file mode 100755
index 000000000..0f7764f07
--- /dev/null
+++ b/numpy/testing/setup.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+def configuration(parent_package='',top_path=None):
+ from scipy.distutils.misc_util import Configuration
+ config = Configuration('testing',parent_package,top_path)
+ return config
+
+if __name__ == '__main__':
+ from scipy.distutils.core import setup
+ setup(maintainer = "SciPy Developers",
+ maintainer_email = "scipy-dev@scipy.org",
+ description = "SciPy test module",
+ url = "http://www.scipy.org",
+ license = "SciPy License (BSD Style)",
+ **configuration(top_path='').todict()
+ )
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
new file mode 100644
index 000000000..362f30fec
--- /dev/null
+++ b/numpy/testing/utils.py
@@ -0,0 +1,210 @@
+"""
+Utility function to facilitate testing.
+"""
+
+import os
+import sys
+import time
+import math
+
+__all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal',
+ 'assert_array_equal', 'assert_array_less',
+ 'assert_array_almost_equal', 'jiffies', 'memusage', 'rand']
+
+def rand(*args):
+ """Returns an array of random numbers with the given shape.
+
+ This only uses the standard library, so it is useful for testing purposes.
+ """
+ import random
+ from scipy.base import zeros, Float64
+ results = zeros(args,Float64)
+ f = results.flat
+ for i in range(len(f)):
+ f[i] = random.random()
+ return results
+
+if sys.platform[:5]=='linux':
+ def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),
+ _load_time=time.time()):
+ """ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc. """
+ try:
+ f=open(_proc_pid_stat,'r')
+ l = f.readline().split(' ')
+ f.close()
+ return int(l[13])
+ except:
+ return int(100*(time.time()-_load_time))
+
+ def memusage(_proc_pid_stat = '/proc/%s/stat'%(os.getpid())):
+ """ Return virtual memory size in bytes of the running python.
+ """
+ try:
+ f=open(_proc_pid_stat,'r')
+ l = f.readline().split(' ')
+ f.close()
+ return int(l[22])
+ except:
+ return
+else:
+ # os.getpid is not in all platforms available.
+ # Using time is safe but inaccurate, especially when process
+ # was suspended or sleeping.
+ def jiffies(_load_time=time.time()):
+ """ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. [Emulation with time.time]. """
+ return int(100*(time.time()-_load_time))
+
+ def memusage():
+ """ Return memory usage of running python. [Not implemented]"""
+ return
+
+def assert_equal(actual,desired,err_msg='',verbose=1):
+ """ Raise an assertion if two items are not
+ equal. I think this should be part of unittest.py
+ """
+ from scipy.base import ArrayType
+ if isinstance(actual, ArrayType) or isinstance(desired, ArrayType):
+ return assert_array_equal(actual, desired, err_msg)
+ msg = '\nItems are not equal:\n' + err_msg
+ try:
+ if ( verbose and len(repr(desired)) < 100 and len(repr(actual)) ):
+ msg = msg \
+ + 'DESIRED: ' + repr(desired) \
+ + '\nACTUAL: ' + repr(actual)
+ except:
+ msg = msg \
+ + 'DESIRED: ' + repr(desired) \
+ + '\nACTUAL: ' + repr(actual)
+ assert desired == actual, msg
+
+
+def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=1):
+ """ Raise an assertion if two items are not
+ equal. I think this should be part of unittest.py
+ """
+ from scipy.base import ArrayType
+ if isinstance(actual, ArrayType) or isinstance(desired, ArrayType):
+ return assert_array_almost_equal(actual, desired, decimal, err_msg)
+ msg = '\nItems are not equal:\n' + err_msg
+ try:
+ if ( verbose and len(repr(desired)) < 100 and len(repr(actual)) ):
+ msg = msg \
+ + 'DESIRED: ' + repr(desired) \
+ + '\nACTUAL: ' + repr(actual)
+ except:
+ msg = msg \
+ + 'DESIRED: ' + repr(desired) \
+ + '\nACTUAL: ' + repr(actual)
+ assert round(abs(desired - actual),decimal) == 0, msg
+
+
+def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=1):
+ """ Raise an assertion if two items are not
+ equal. I think this should be part of unittest.py
+ Approximately equal is defined as the number of significant digits
+ correct
+ """
+ msg = '\nItems are not equal to %d significant digits:\n' % significant
+ msg += err_msg
+ actual, desired = map(float, (actual, desired))
+ if desired==actual:
+ return
+ # Normalized the numbers to be in range (-10.0,10.0)
+ scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
+ try:
+ sc_desired = desired/scale
+ except ZeroDivisionError:
+ sc_desired = 0.0
+ try:
+ sc_actual = actual/scale
+ except ZeroDivisionError:
+ sc_actual = 0.0
+ try:
+ if ( verbose and len(repr(desired)) < 100 and len(repr(actual)) ):
+ msg = msg \
+ + 'DESIRED: ' + repr(desired) \
+ + '\nACTUAL: ' + repr(actual)
+ except:
+ msg = msg \
+ + 'DESIRED: ' + repr(desired) \
+ + '\nACTUAL: ' + repr(actual)
+ assert math.fabs(sc_desired - sc_actual) < pow(10.,-1*significant), msg
+
+
+def assert_array_equal(x,y,err_msg=''):
+ from scipy.base import asarray, alltrue, equal, shape, ravel, array2string
+ x,y = asarray(x), asarray(y)
+ msg = '\nArrays are not equal'
+ try:
+ assert 0 in [len(shape(x)),len(shape(y))] \
+ or (len(shape(x))==len(shape(y)) and \
+ alltrue(equal(shape(x),shape(y)))),\
+ msg + ' (shapes %s, %s mismatch):\n\t' \
+ % (shape(x),shape(y)) + err_msg
+ reduced = ravel(equal(x,y))
+ cond = alltrue(reduced)
+ if not cond:
+ s1 = array2string(x,precision=16)
+ s2 = array2string(y,precision=16)
+ if len(s1)>120: s1 = s1[:120] + '...'
+ if len(s2)>120: s2 = s2[:120] + '...'
+ match = 100-100.0*reduced.tolist().count(1)/len(reduced)
+ msg = msg + ' (mismatch %s%%):\n\tArray 1: %s\n\tArray 2: %s' % (match,s1,s2)
+ assert cond,\
+ msg + '\n\t' + err_msg
+ except ValueError:
+ raise ValueError, msg
+
+
+def assert_array_almost_equal(x,y,decimal=6,err_msg=''):
+ from scipy.base import asarray, alltrue, equal, shape, ravel,\
+ array2string, less_equal, around
+ x = asarray(x)
+ y = asarray(y)
+ msg = '\nArrays are not almost equal'
+ try:
+ cond = alltrue(equal(shape(x),shape(y)))
+ if not cond:
+ msg = msg + ' (shapes mismatch):\n\t'\
+ 'Shape of array 1: %s\n\tShape of array 2: %s' % (shape(x),shape(y))
+ assert cond, msg + '\n\t' + err_msg
+ reduced = ravel(equal(less_equal(around(abs(x-y),decimal),10.0**(-decimal)),1))
+ cond = alltrue(reduced)
+ if not cond:
+ s1 = array2string(x,precision=decimal+1)
+ s2 = array2string(y,precision=decimal+1)
+ if len(s1)>120: s1 = s1[:120] + '...'
+ if len(s2)>120: s2 = s2[:120] + '...'
+ match = 100-100.0*reduced.tolist().count(1)/len(reduced)
+ msg = msg + ' (mismatch %s%%):\n\tArray 1: %s\n\tArray 2: %s' % (match,s1,s2)
+ assert cond,\
+ msg + '\n\t' + err_msg
+ except ValueError:
+ print sys.exc_value
+ print shape(x),shape(y)
+ print x, y
+ raise ValueError, 'arrays are not almost equal'
+
+def assert_array_less(x,y,err_msg=''):
+ from scipy.base import asarray, alltrue, less, equal, shape, ravel, array2string
+ x,y = asarray(x), asarray(y)
+ msg = '\nArrays are not less-ordered'
+ try:
+ assert alltrue(equal(shape(x),shape(y))),\
+ msg + ' (shapes mismatch):\n\t' + err_msg
+ reduced = ravel(less(x,y))
+ cond = alltrue(reduced)
+ if not cond:
+ s1 = array2string(x,precision=16)
+ s2 = array2string(y,precision=16)
+ if len(s1)>120: s1 = s1[:120] + '...'
+ if len(s2)>120: s2 = s2[:120] + '...'
+ match = 100-100.0*reduced.tolist().count(1)/len(reduced)
+ msg = msg + ' (mismatch %s%%):\n\tArray 1: %s\n\tArray 2: %s' % (match,s1,s2)
+ assert cond,\
+ msg + '\n\t' + err_msg
+ except ValueError:
+ print shape(x),shape(y)
+ raise ValueError, 'arrays are not less-ordered'